perf/x86: Sync branch stack sampling with precise_sampling
[linux-2.6-block.git] / arch / x86 / kernel / cpu / perf_event_intel_lbr.c
index 3fab3de3ce96dde8b3bece9bdf65cae8c5b1e466..6710a5116ebd2c1cbabbb340a1d3aad0cc68bdc5 100644 (file)
@@ -72,8 +72,6 @@ void intel_pmu_lbr_enable(struct perf_event *event)
        if (!x86_pmu.lbr_nr)
                return;
 
-       WARN_ON_ONCE(cpuc->enabled);
-
        /*
         * Reset the LBR stack if we changed task context to
         * avoid data leaks.
@@ -144,9 +142,11 @@ static void intel_pmu_lbr_read_32(struct cpu_hw_events *cpuc)
 
                rdmsrl(x86_pmu.lbr_from + lbr_idx, msr_lastbranch.lbr);
 
-               cpuc->lbr_entries[i].from  = msr_lastbranch.from;
-               cpuc->lbr_entries[i].to    = msr_lastbranch.to;
-               cpuc->lbr_entries[i].flags = 0;
+               cpuc->lbr_entries[i].from       = msr_lastbranch.from;
+               cpuc->lbr_entries[i].to         = msr_lastbranch.to;
+               cpuc->lbr_entries[i].mispred    = 0;
+               cpuc->lbr_entries[i].predicted  = 0;
+               cpuc->lbr_entries[i].reserved   = 0;
        }
        cpuc->lbr_stack.nr = i;
 }
@@ -167,19 +167,22 @@ static void intel_pmu_lbr_read_64(struct cpu_hw_events *cpuc)
 
        for (i = 0; i < x86_pmu.lbr_nr; i++) {
                unsigned long lbr_idx = (tos - i) & mask;
-               u64 from, to, flags = 0;
+               u64 from, to, mis = 0, pred = 0;
 
                rdmsrl(x86_pmu.lbr_from + lbr_idx, from);
                rdmsrl(x86_pmu.lbr_to   + lbr_idx, to);
 
                if (lbr_format == LBR_FORMAT_EIP_FLAGS) {
-                       flags = !!(from & LBR_FROM_FLAG_MISPRED);
+                       mis = !!(from & LBR_FROM_FLAG_MISPRED);
+                       pred = !mis;
                        from = (u64)((((s64)from) << 1) >> 1);
                }
 
-               cpuc->lbr_entries[i].from  = from;
-               cpuc->lbr_entries[i].to    = to;
-               cpuc->lbr_entries[i].flags = flags;
+               cpuc->lbr_entries[i].from       = from;
+               cpuc->lbr_entries[i].to         = to;
+               cpuc->lbr_entries[i].mispred    = mis;
+               cpuc->lbr_entries[i].predicted  = pred;
+               cpuc->lbr_entries[i].reserved   = 0;
        }
        cpuc->lbr_stack.nr = i;
 }
@@ -200,23 +203,23 @@ void intel_pmu_lbr_read(void)
 void intel_pmu_lbr_init_core(void)
 {
        x86_pmu.lbr_nr     = 4;
-       x86_pmu.lbr_tos    = 0x01c9;
-       x86_pmu.lbr_from   = 0x40;
-       x86_pmu.lbr_to     = 0x60;
+       x86_pmu.lbr_tos    = MSR_LBR_TOS;
+       x86_pmu.lbr_from   = MSR_LBR_CORE_FROM;
+       x86_pmu.lbr_to     = MSR_LBR_CORE_TO;
 }
 
 void intel_pmu_lbr_init_nhm(void)
 {
        x86_pmu.lbr_nr     = 16;
-       x86_pmu.lbr_tos    = 0x01c9;
-       x86_pmu.lbr_from   = 0x680;
-       x86_pmu.lbr_to     = 0x6c0;
+       x86_pmu.lbr_tos    = MSR_LBR_TOS;
+       x86_pmu.lbr_from   = MSR_LBR_NHM_FROM;
+       x86_pmu.lbr_to     = MSR_LBR_NHM_TO;
 }
 
 void intel_pmu_lbr_init_atom(void)
 {
        x86_pmu.lbr_nr     = 8;
-       x86_pmu.lbr_tos    = 0x01c9;
-       x86_pmu.lbr_from   = 0x40;
-       x86_pmu.lbr_to     = 0x60;
+       x86_pmu.lbr_tos    = MSR_LBR_TOS;
+       x86_pmu.lbr_from   = MSR_LBR_CORE_FROM;
+       x86_pmu.lbr_to     = MSR_LBR_CORE_TO;
 }