perf/x86/intel: Reduce lbr_sel_map[] size
authorYan, Zheng <zheng.z.yan@intel.com>
Wed, 5 Nov 2014 02:55:57 +0000 (21:55 -0500)
committerIngo Molnar <mingo@kernel.org>
Wed, 18 Feb 2015 16:16:01 +0000 (17:16 +0100)
The index of lbr_sel_map is bit value of perf branch_sample_type.
PERF_SAMPLE_BRANCH_MAX is 1024 at present, so each lbr_sel_map uses
4096 bytes. By using bit shift as index, we can reduce lbr_sel_map
size to 40 bytes. This patch defines 'bit shift' for branch types,
and use 'bit shift' to define lbr_sel_maps.

Signed-off-by: Yan, Zheng <zheng.z.yan@intel.com>
Signed-off-by: Kan Liang <kan.liang@intel.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Reviewed-by: Stephane Eranian <eranian@google.com>
Cc: Andy Lutomirski <luto@amacapital.net>
Cc: Arnaldo Carvalho de Melo <acme@kernel.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Vince Weaver <vincent.weaver@maine.edu>
Cc: jolsa@redhat.com
Cc: linux-api@vger.kernel.org
Link: http://lkml.kernel.org/r/1415156173-10035-2-git-send-email-kan.liang@intel.com
Signed-off-by: Ingo Molnar <mingo@kernel.org>
arch/x86/kernel/cpu/perf_event.h
arch/x86/kernel/cpu/perf_event_intel_lbr.c
include/uapi/linux/perf_event.h

index df525d2be1e814766ac96a82ee3caf33cc4316e4..0c45b22495dccca073ca7a56ff4f29ae902f64ec 100644 (file)
@@ -515,6 +515,10 @@ struct x86_pmu {
        struct perf_guest_switch_msr *(*guest_get_msrs)(int *nr);
 };
 
+enum {
+       PERF_SAMPLE_BRANCH_SELECT_MAP_SIZE = PERF_SAMPLE_BRANCH_MAX_SHIFT,
+};
+
 #define x86_add_quirk(func_)                                           \
 do {                                                                   \
        static struct x86_pmu_quirk __quirk __initdata = {              \
index 58f1a94beaf09f6fdab1dd1b12727ad045a58913..8bc078f43a8218e0102fc3aafae5d29340f7a872 100644 (file)
@@ -69,10 +69,6 @@ static enum {
 #define LBR_FROM_FLAG_IN_TX    (1ULL << 62)
 #define LBR_FROM_FLAG_ABORT    (1ULL << 61)
 
-#define for_each_branch_sample_type(x) \
-       for ((x) = PERF_SAMPLE_BRANCH_USER; \
-            (x) < PERF_SAMPLE_BRANCH_MAX; (x) <<= 1)
-
 /*
  * x86control flow change classification
  * x86control flow changes include branches, interrupts, traps, faults
@@ -403,14 +399,14 @@ static int intel_pmu_setup_hw_lbr_filter(struct perf_event *event)
 {
        struct hw_perf_event_extra *reg;
        u64 br_type = event->attr.branch_sample_type;
-       u64 mask = 0, m;
-       u64 v;
+       u64 mask = 0, v;
+       int i;
 
-       for_each_branch_sample_type(m) {
-               if (!(br_type & m))
+       for (i = 0; i < PERF_SAMPLE_BRANCH_SELECT_MAP_SIZE; i++) {
+               if (!(br_type & (1ULL << i)))
                        continue;
 
-               v = x86_pmu.lbr_sel_map[m];
+               v = x86_pmu.lbr_sel_map[i];
                if (v == LBR_NOT_SUPP)
                        return -EOPNOTSUPP;
 
@@ -678,35 +674,35 @@ intel_pmu_lbr_filter(struct cpu_hw_events *cpuc)
 /*
  * Map interface branch filters onto LBR filters
  */
-static const int nhm_lbr_sel_map[PERF_SAMPLE_BRANCH_MAX] = {
-       [PERF_SAMPLE_BRANCH_ANY]        = LBR_ANY,
-       [PERF_SAMPLE_BRANCH_USER]       = LBR_USER,
-       [PERF_SAMPLE_BRANCH_KERNEL]     = LBR_KERNEL,
-       [PERF_SAMPLE_BRANCH_HV]         = LBR_IGN,
-       [PERF_SAMPLE_BRANCH_ANY_RETURN] = LBR_RETURN | LBR_REL_JMP
-                                       | LBR_IND_JMP | LBR_FAR,
+static const int nhm_lbr_sel_map[PERF_SAMPLE_BRANCH_SELECT_MAP_SIZE] = {
+       [PERF_SAMPLE_BRANCH_ANY_SHIFT]          = LBR_ANY,
+       [PERF_SAMPLE_BRANCH_USER_SHIFT]         = LBR_USER,
+       [PERF_SAMPLE_BRANCH_KERNEL_SHIFT]       = LBR_KERNEL,
+       [PERF_SAMPLE_BRANCH_HV_SHIFT]           = LBR_IGN,
+       [PERF_SAMPLE_BRANCH_ANY_RETURN_SHIFT]   = LBR_RETURN | LBR_REL_JMP
+                                               | LBR_IND_JMP | LBR_FAR,
        /*
         * NHM/WSM erratum: must include REL_JMP+IND_JMP to get CALL branches
         */
-       [PERF_SAMPLE_BRANCH_ANY_CALL] =
+       [PERF_SAMPLE_BRANCH_ANY_CALL_SHIFT] =
         LBR_REL_CALL | LBR_IND_CALL | LBR_REL_JMP | LBR_IND_JMP | LBR_FAR,
        /*
         * NHM/WSM erratum: must include IND_JMP to capture IND_CALL
         */
-       [PERF_SAMPLE_BRANCH_IND_CALL] = LBR_IND_CALL | LBR_IND_JMP,
-       [PERF_SAMPLE_BRANCH_COND]     = LBR_JCC,
+       [PERF_SAMPLE_BRANCH_IND_CALL_SHIFT] = LBR_IND_CALL | LBR_IND_JMP,
+       [PERF_SAMPLE_BRANCH_COND_SHIFT]     = LBR_JCC,
 };
 
-static const int snb_lbr_sel_map[PERF_SAMPLE_BRANCH_MAX] = {
-       [PERF_SAMPLE_BRANCH_ANY]        = LBR_ANY,
-       [PERF_SAMPLE_BRANCH_USER]       = LBR_USER,
-       [PERF_SAMPLE_BRANCH_KERNEL]     = LBR_KERNEL,
-       [PERF_SAMPLE_BRANCH_HV]         = LBR_IGN,
-       [PERF_SAMPLE_BRANCH_ANY_RETURN] = LBR_RETURN | LBR_FAR,
-       [PERF_SAMPLE_BRANCH_ANY_CALL]   = LBR_REL_CALL | LBR_IND_CALL
-                                       | LBR_FAR,
-       [PERF_SAMPLE_BRANCH_IND_CALL]   = LBR_IND_CALL,
-       [PERF_SAMPLE_BRANCH_COND]       = LBR_JCC,
+static const int snb_lbr_sel_map[PERF_SAMPLE_BRANCH_SELECT_MAP_SIZE] = {
+       [PERF_SAMPLE_BRANCH_ANY_SHIFT]          = LBR_ANY,
+       [PERF_SAMPLE_BRANCH_USER_SHIFT]         = LBR_USER,
+       [PERF_SAMPLE_BRANCH_KERNEL_SHIFT]       = LBR_KERNEL,
+       [PERF_SAMPLE_BRANCH_HV_SHIFT]           = LBR_IGN,
+       [PERF_SAMPLE_BRANCH_ANY_RETURN_SHIFT]   = LBR_RETURN | LBR_FAR,
+       [PERF_SAMPLE_BRANCH_ANY_CALL_SHIFT]     = LBR_REL_CALL | LBR_IND_CALL
+                                               | LBR_FAR,
+       [PERF_SAMPLE_BRANCH_IND_CALL_SHIFT]     = LBR_IND_CALL,
+       [PERF_SAMPLE_BRANCH_COND_SHIFT]         = LBR_JCC,
 };
 
 /* core */
index 9b79abbd1ab80f765eb15b2d563f2c30262327d2..e46b93279e3d5b4fb48ef4bfed44b7b5ea4199d1 100644 (file)
@@ -152,21 +152,42 @@ enum perf_event_sample_format {
  * The branch types can be combined, however BRANCH_ANY covers all types
  * of branches and therefore it supersedes all the other types.
  */
+enum perf_branch_sample_type_shift {
+       PERF_SAMPLE_BRANCH_USER_SHIFT           = 0, /* user branches */
+       PERF_SAMPLE_BRANCH_KERNEL_SHIFT         = 1, /* kernel branches */
+       PERF_SAMPLE_BRANCH_HV_SHIFT             = 2, /* hypervisor branches */
+
+       PERF_SAMPLE_BRANCH_ANY_SHIFT            = 3, /* any branch types */
+       PERF_SAMPLE_BRANCH_ANY_CALL_SHIFT       = 4, /* any call branch */
+       PERF_SAMPLE_BRANCH_ANY_RETURN_SHIFT     = 5, /* any return branch */
+       PERF_SAMPLE_BRANCH_IND_CALL_SHIFT       = 6, /* indirect calls */
+       PERF_SAMPLE_BRANCH_ABORT_TX_SHIFT       = 7, /* transaction aborts */
+       PERF_SAMPLE_BRANCH_IN_TX_SHIFT          = 8, /* in transaction */
+       PERF_SAMPLE_BRANCH_NO_TX_SHIFT          = 9, /* not in transaction */
+       PERF_SAMPLE_BRANCH_COND_SHIFT           = 10, /* conditional branches */
+
+       PERF_SAMPLE_BRANCH_MAX_SHIFT            /* non-ABI */
+};
+
 enum perf_branch_sample_type {
-       PERF_SAMPLE_BRANCH_USER         = 1U << 0, /* user branches */
-       PERF_SAMPLE_BRANCH_KERNEL       = 1U << 1, /* kernel branches */
-       PERF_SAMPLE_BRANCH_HV           = 1U << 2, /* hypervisor branches */
-
-       PERF_SAMPLE_BRANCH_ANY          = 1U << 3, /* any branch types */
-       PERF_SAMPLE_BRANCH_ANY_CALL     = 1U << 4, /* any call branch */
-       PERF_SAMPLE_BRANCH_ANY_RETURN   = 1U << 5, /* any return branch */
-       PERF_SAMPLE_BRANCH_IND_CALL     = 1U << 6, /* indirect calls */
-       PERF_SAMPLE_BRANCH_ABORT_TX     = 1U << 7, /* transaction aborts */
-       PERF_SAMPLE_BRANCH_IN_TX        = 1U << 8, /* in transaction */
-       PERF_SAMPLE_BRANCH_NO_TX        = 1U << 9, /* not in transaction */
-       PERF_SAMPLE_BRANCH_COND         = 1U << 10, /* conditional branches */
-
-       PERF_SAMPLE_BRANCH_MAX          = 1U << 11, /* non-ABI */
+       PERF_SAMPLE_BRANCH_USER         = 1U << PERF_SAMPLE_BRANCH_USER_SHIFT,
+       PERF_SAMPLE_BRANCH_KERNEL       = 1U << PERF_SAMPLE_BRANCH_KERNEL_SHIFT,
+       PERF_SAMPLE_BRANCH_HV           = 1U << PERF_SAMPLE_BRANCH_HV_SHIFT,
+
+       PERF_SAMPLE_BRANCH_ANY          = 1U << PERF_SAMPLE_BRANCH_ANY_SHIFT,
+       PERF_SAMPLE_BRANCH_ANY_CALL     =
+                               1U << PERF_SAMPLE_BRANCH_ANY_CALL_SHIFT,
+       PERF_SAMPLE_BRANCH_ANY_RETURN   =
+                               1U << PERF_SAMPLE_BRANCH_ANY_RETURN_SHIFT,
+       PERF_SAMPLE_BRANCH_IND_CALL     =
+                               1U << PERF_SAMPLE_BRANCH_IND_CALL_SHIFT,
+       PERF_SAMPLE_BRANCH_ABORT_TX     =
+                               1U << PERF_SAMPLE_BRANCH_ABORT_TX_SHIFT,
+       PERF_SAMPLE_BRANCH_IN_TX        = 1U << PERF_SAMPLE_BRANCH_IN_TX_SHIFT,
+       PERF_SAMPLE_BRANCH_NO_TX        = 1U << PERF_SAMPLE_BRANCH_NO_TX_SHIFT,
+       PERF_SAMPLE_BRANCH_COND         = 1U << PERF_SAMPLE_BRANCH_COND_SHIFT,
+
+       PERF_SAMPLE_BRANCH_MAX          = 1U << PERF_SAMPLE_BRANCH_MAX_SHIFT,
 };
 
 #define PERF_SAMPLE_BRANCH_PLM_ALL \