perf/x86: Implement PERF_SAMPLE_BRANCH for Intel CPUs
[linux-2.6-block.git] / arch / x86 / kernel / cpu / perf_event.h
CommitLineData
de0428a7
KW
1/*
2 * Performance events x86 architecture header
3 *
4 * Copyright (C) 2008 Thomas Gleixner <tglx@linutronix.de>
5 * Copyright (C) 2008-2009 Red Hat, Inc., Ingo Molnar
6 * Copyright (C) 2009 Jaswinder Singh Rajput
7 * Copyright (C) 2009 Advanced Micro Devices, Inc., Robert Richter
8 * Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
9 * Copyright (C) 2009 Intel Corporation, <markus.t.metzger@intel.com>
10 * Copyright (C) 2009 Google, Inc., Stephane Eranian
11 *
12 * For licencing details see kernel-base/COPYING
13 */
14
15#include <linux/perf_event.h>
16
17/*
18 * | NHM/WSM | SNB |
19 * register -------------------------------
20 * | HT | no HT | HT | no HT |
21 *-----------------------------------------
22 * offcore | core | core | cpu | core |
23 * lbr_sel | core | core | cpu | core |
24 * ld_lat | cpu | core | cpu | core |
25 *-----------------------------------------
26 *
27 * Given that there is a small number of shared regs,
28 * we can pre-allocate their slot in the per-cpu
29 * per-core reg tables.
30 */
31enum extra_reg_type {
32 EXTRA_REG_NONE = -1, /* not used */
33
34 EXTRA_REG_RSP_0 = 0, /* offcore_response_0 */
35 EXTRA_REG_RSP_1 = 1, /* offcore_response_1 */
b36817e8 36 EXTRA_REG_LBR = 2, /* lbr_select */
de0428a7
KW
37
38 EXTRA_REG_MAX /* number of entries needed */
39};
40
41struct event_constraint {
42 union {
43 unsigned long idxmsk[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
44 u64 idxmsk64;
45 };
46 u64 code;
47 u64 cmask;
48 int weight;
bc1738f6 49 int overlap;
de0428a7
KW
50};
51
52struct amd_nb {
53 int nb_id; /* NorthBridge id */
54 int refcnt; /* reference count */
55 struct perf_event *owners[X86_PMC_IDX_MAX];
56 struct event_constraint event_constraints[X86_PMC_IDX_MAX];
57};
58
59/* The maximal number of PEBS events: */
60#define MAX_PEBS_EVENTS 4
61
62/*
63 * A debug store configuration.
64 *
65 * We only support architectures that use 64bit fields.
66 */
67struct debug_store {
68 u64 bts_buffer_base;
69 u64 bts_index;
70 u64 bts_absolute_maximum;
71 u64 bts_interrupt_threshold;
72 u64 pebs_buffer_base;
73 u64 pebs_index;
74 u64 pebs_absolute_maximum;
75 u64 pebs_interrupt_threshold;
76 u64 pebs_event_reset[MAX_PEBS_EVENTS];
77};
78
79/*
80 * Per register state.
81 */
82struct er_account {
83 raw_spinlock_t lock; /* per-core: protect structure */
84 u64 config; /* extra MSR config */
85 u64 reg; /* extra MSR number */
86 atomic_t ref; /* reference count */
87};
88
89/*
90 * Per core/cpu state
91 *
92 * Used to coordinate shared registers between HT threads or
93 * among events on a single PMU.
94 */
95struct intel_shared_regs {
96 struct er_account regs[EXTRA_REG_MAX];
97 int refcnt; /* per-core: #HT threads */
98 unsigned core_id; /* per-core: core id */
99};
100
101#define MAX_LBR_ENTRIES 16
102
103struct cpu_hw_events {
104 /*
105 * Generic x86 PMC bits
106 */
107 struct perf_event *events[X86_PMC_IDX_MAX]; /* in counter order */
108 unsigned long active_mask[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
109 unsigned long running[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
110 int enabled;
111
112 int n_events;
113 int n_added;
114 int n_txn;
115 int assign[X86_PMC_IDX_MAX]; /* event to counter assignment */
116 u64 tags[X86_PMC_IDX_MAX];
117 struct perf_event *event_list[X86_PMC_IDX_MAX]; /* in enabled order */
118
119 unsigned int group_flag;
120
121 /*
122 * Intel DebugStore bits
123 */
124 struct debug_store *ds;
125 u64 pebs_enabled;
126
127 /*
128 * Intel LBR bits
129 */
130 int lbr_users;
131 void *lbr_context;
132 struct perf_branch_stack lbr_stack;
133 struct perf_branch_entry lbr_entries[MAX_LBR_ENTRIES];
b36817e8 134 struct er_account *lbr_sel;
de0428a7 135
144d31e6
GN
136 /*
137 * Intel host/guest exclude bits
138 */
139 u64 intel_ctrl_guest_mask;
140 u64 intel_ctrl_host_mask;
141 struct perf_guest_switch_msr guest_switch_msrs[X86_PMC_IDX_MAX];
142
de0428a7
KW
143 /*
144 * manage shared (per-core, per-cpu) registers
145 * used on Intel NHM/WSM/SNB
146 */
147 struct intel_shared_regs *shared_regs;
148
149 /*
150 * AMD specific bits
151 */
1018faa6
JR
152 struct amd_nb *amd_nb;
153 /* Inverted mask of bits to clear in the perf_ctr ctrl registers */
154 u64 perf_ctr_virt_mask;
de0428a7
KW
155
156 void *kfree_on_online;
157};
158
bc1738f6 159#define __EVENT_CONSTRAINT(c, n, m, w, o) {\
de0428a7
KW
160 { .idxmsk64 = (n) }, \
161 .code = (c), \
162 .cmask = (m), \
163 .weight = (w), \
bc1738f6 164 .overlap = (o), \
de0428a7
KW
165}
166
167#define EVENT_CONSTRAINT(c, n, m) \
bc1738f6
RR
168 __EVENT_CONSTRAINT(c, n, m, HWEIGHT(n), 0)
169
170/*
171 * The overlap flag marks event constraints with overlapping counter
172 * masks. This is the case if the counter mask of such an event is not
173 * a subset of any other counter mask of a constraint with an equal or
174 * higher weight, e.g.:
175 *
176 * c_overlaps = EVENT_CONSTRAINT_OVERLAP(0, 0x09, 0);
177 * c_another1 = EVENT_CONSTRAINT(0, 0x07, 0);
178 * c_another2 = EVENT_CONSTRAINT(0, 0x38, 0);
179 *
180 * The event scheduler may not select the correct counter in the first
181 * cycle because it needs to know which subsequent events will be
182 * scheduled. It may fail to schedule the events then. So we set the
183 * overlap flag for such constraints to give the scheduler a hint which
184 * events to select for counter rescheduling.
185 *
186 * Care must be taken as the rescheduling algorithm is O(n!) which
187 * will increase scheduling cycles for an over-commited system
188 * dramatically. The number of such EVENT_CONSTRAINT_OVERLAP() macros
189 * and its counter masks must be kept at a minimum.
190 */
191#define EVENT_CONSTRAINT_OVERLAP(c, n, m) \
192 __EVENT_CONSTRAINT(c, n, m, HWEIGHT(n), 1)
de0428a7
KW
193
194/*
195 * Constraint on the Event code.
196 */
197#define INTEL_EVENT_CONSTRAINT(c, n) \
198 EVENT_CONSTRAINT(c, n, ARCH_PERFMON_EVENTSEL_EVENT)
199
200/*
201 * Constraint on the Event code + UMask + fixed-mask
202 *
203 * filter mask to validate fixed counter events.
204 * the following filters disqualify for fixed counters:
205 * - inv
206 * - edge
207 * - cnt-mask
208 * The other filters are supported by fixed counters.
209 * The any-thread option is supported starting with v3.
210 */
211#define FIXED_EVENT_CONSTRAINT(c, n) \
212 EVENT_CONSTRAINT(c, (1ULL << (32+n)), X86_RAW_EVENT_MASK)
213
214/*
215 * Constraint on the Event code + UMask
216 */
217#define INTEL_UEVENT_CONSTRAINT(c, n) \
218 EVENT_CONSTRAINT(c, n, INTEL_ARCH_EVENT_MASK)
219
220#define EVENT_CONSTRAINT_END \
221 EVENT_CONSTRAINT(0, 0, 0)
222
223#define for_each_event_constraint(e, c) \
224 for ((e) = (c); (e)->weight; (e)++)
225
226/*
227 * Extra registers for specific events.
228 *
229 * Some events need large masks and require external MSRs.
230 * Those extra MSRs end up being shared for all events on
231 * a PMU and sometimes between PMU of sibling HT threads.
232 * In either case, the kernel needs to handle conflicting
233 * accesses to those extra, shared, regs. The data structure
234 * to manage those registers is stored in cpu_hw_event.
235 */
236struct extra_reg {
237 unsigned int event;
238 unsigned int msr;
239 u64 config_mask;
240 u64 valid_mask;
241 int idx; /* per_xxx->regs[] reg index */
242};
243
244#define EVENT_EXTRA_REG(e, ms, m, vm, i) { \
245 .event = (e), \
246 .msr = (ms), \
247 .config_mask = (m), \
248 .valid_mask = (vm), \
249 .idx = EXTRA_REG_##i \
250 }
251
252#define INTEL_EVENT_EXTRA_REG(event, msr, vm, idx) \
253 EVENT_EXTRA_REG(event, msr, ARCH_PERFMON_EVENTSEL_EVENT, vm, idx)
254
255#define EVENT_EXTRA_END EVENT_EXTRA_REG(0, 0, 0, 0, RSP_0)
256
257union perf_capabilities {
258 struct {
259 u64 lbr_format:6;
260 u64 pebs_trap:1;
261 u64 pebs_arch_reg:1;
262 u64 pebs_format:4;
263 u64 smm_freeze:1;
264 };
265 u64 capabilities;
266};
267
c1d6f42f
PZ
268struct x86_pmu_quirk {
269 struct x86_pmu_quirk *next;
270 void (*func)(void);
271};
272
de0428a7
KW
273/*
274 * struct x86_pmu - generic x86 pmu
275 */
276struct x86_pmu {
277 /*
278 * Generic x86 PMC bits
279 */
280 const char *name;
281 int version;
282 int (*handle_irq)(struct pt_regs *);
283 void (*disable_all)(void);
284 void (*enable_all)(int added);
285 void (*enable)(struct perf_event *);
286 void (*disable)(struct perf_event *);
287 int (*hw_config)(struct perf_event *event);
288 int (*schedule_events)(struct cpu_hw_events *cpuc, int n, int *assign);
289 unsigned eventsel;
290 unsigned perfctr;
291 u64 (*event_map)(int);
292 int max_events;
293 int num_counters;
294 int num_counters_fixed;
295 int cntval_bits;
296 u64 cntval_mask;
ffb871bc
GN
297 union {
298 unsigned long events_maskl;
299 unsigned long events_mask[BITS_TO_LONGS(ARCH_PERFMON_EVENTS_COUNT)];
300 };
301 int events_mask_len;
de0428a7
KW
302 int apic;
303 u64 max_period;
304 struct event_constraint *
305 (*get_event_constraints)(struct cpu_hw_events *cpuc,
306 struct perf_event *event);
307
308 void (*put_event_constraints)(struct cpu_hw_events *cpuc,
309 struct perf_event *event);
310 struct event_constraint *event_constraints;
c1d6f42f 311 struct x86_pmu_quirk *quirks;
de0428a7
KW
312 int perfctr_second_write;
313
0c9d42ed
PZ
314 /*
315 * sysfs attrs
316 */
317 int attr_rdpmc;
318
319 /*
320 * CPU Hotplug hooks
321 */
de0428a7
KW
322 int (*cpu_prepare)(int cpu);
323 void (*cpu_starting)(int cpu);
324 void (*cpu_dying)(int cpu);
325 void (*cpu_dead)(int cpu);
326
327 /*
328 * Intel Arch Perfmon v2+
329 */
330 u64 intel_ctrl;
331 union perf_capabilities intel_cap;
332
333 /*
334 * Intel DebugStore bits
335 */
336 int bts, pebs;
337 int bts_active, pebs_active;
338 int pebs_record_size;
339 void (*drain_pebs)(struct pt_regs *regs);
340 struct event_constraint *pebs_constraints;
341
342 /*
343 * Intel LBR
344 */
345 unsigned long lbr_tos, lbr_from, lbr_to; /* MSR base regs */
346 int lbr_nr; /* hardware stack size */
b36817e8
SE
347 u64 lbr_sel_mask; /* LBR_SELECT valid bits */
348 const int *lbr_sel_map; /* lbr_select mappings */
de0428a7
KW
349
350 /*
351 * Extra registers for events
352 */
353 struct extra_reg *extra_regs;
354 unsigned int er_flags;
144d31e6
GN
355
356 /*
357 * Intel host/guest support (KVM)
358 */
359 struct perf_guest_switch_msr *(*guest_get_msrs)(int *nr);
de0428a7
KW
360};
361
c1d6f42f
PZ
362#define x86_add_quirk(func_) \
363do { \
364 static struct x86_pmu_quirk __quirk __initdata = { \
365 .func = func_, \
366 }; \
367 __quirk.next = x86_pmu.quirks; \
368 x86_pmu.quirks = &__quirk; \
369} while (0)
370
de0428a7
KW
371#define ERF_NO_HT_SHARING 1
372#define ERF_HAS_RSP_1 2
373
374extern struct x86_pmu x86_pmu __read_mostly;
375
376DECLARE_PER_CPU(struct cpu_hw_events, cpu_hw_events);
377
378int x86_perf_event_set_period(struct perf_event *event);
379
380/*
381 * Generalized hw caching related hw_event table, filled
382 * in on a per model basis. A value of 0 means
383 * 'not supported', -1 means 'hw_event makes no sense on
384 * this CPU', any other value means the raw hw_event
385 * ID.
386 */
387
388#define C(x) PERF_COUNT_HW_CACHE_##x
389
390extern u64 __read_mostly hw_cache_event_ids
391 [PERF_COUNT_HW_CACHE_MAX]
392 [PERF_COUNT_HW_CACHE_OP_MAX]
393 [PERF_COUNT_HW_CACHE_RESULT_MAX];
394extern u64 __read_mostly hw_cache_extra_regs
395 [PERF_COUNT_HW_CACHE_MAX]
396 [PERF_COUNT_HW_CACHE_OP_MAX]
397 [PERF_COUNT_HW_CACHE_RESULT_MAX];
398
399u64 x86_perf_event_update(struct perf_event *event);
400
401static inline int x86_pmu_addr_offset(int index)
402{
403 int offset;
404
405 /* offset = X86_FEATURE_PERFCTR_CORE ? index << 1 : index */
406 alternative_io(ASM_NOP2,
407 "shll $1, %%eax",
408 X86_FEATURE_PERFCTR_CORE,
409 "=a" (offset),
410 "a" (index));
411
412 return offset;
413}
414
415static inline unsigned int x86_pmu_config_addr(int index)
416{
417 return x86_pmu.eventsel + x86_pmu_addr_offset(index);
418}
419
420static inline unsigned int x86_pmu_event_addr(int index)
421{
422 return x86_pmu.perfctr + x86_pmu_addr_offset(index);
423}
424
425int x86_setup_perfctr(struct perf_event *event);
426
427int x86_pmu_hw_config(struct perf_event *event);
428
429void x86_pmu_disable_all(void);
430
431static inline void __x86_pmu_enable_event(struct hw_perf_event *hwc,
432 u64 enable_mask)
433{
1018faa6
JR
434 u64 disable_mask = __this_cpu_read(cpu_hw_events.perf_ctr_virt_mask);
435
de0428a7
KW
436 if (hwc->extra_reg.reg)
437 wrmsrl(hwc->extra_reg.reg, hwc->extra_reg.config);
1018faa6 438 wrmsrl(hwc->config_base, (hwc->config | enable_mask) & ~disable_mask);
de0428a7
KW
439}
440
441void x86_pmu_enable_all(int added);
442
443int x86_schedule_events(struct cpu_hw_events *cpuc, int n, int *assign);
444
445void x86_pmu_stop(struct perf_event *event, int flags);
446
447static inline void x86_pmu_disable_event(struct perf_event *event)
448{
449 struct hw_perf_event *hwc = &event->hw;
450
451 wrmsrl(hwc->config_base, hwc->config);
452}
453
454void x86_pmu_enable_event(struct perf_event *event);
455
456int x86_pmu_handle_irq(struct pt_regs *regs);
457
458extern struct event_constraint emptyconstraint;
459
460extern struct event_constraint unconstrained;
461
462#ifdef CONFIG_CPU_SUP_AMD
463
464int amd_pmu_init(void);
465
466#else /* CONFIG_CPU_SUP_AMD */
467
468static inline int amd_pmu_init(void)
469{
470 return 0;
471}
472
473#endif /* CONFIG_CPU_SUP_AMD */
474
475#ifdef CONFIG_CPU_SUP_INTEL
476
477int intel_pmu_save_and_restart(struct perf_event *event);
478
479struct event_constraint *
480x86_get_event_constraints(struct cpu_hw_events *cpuc, struct perf_event *event);
481
482struct intel_shared_regs *allocate_shared_regs(int cpu);
483
484int intel_pmu_init(void);
485
486void init_debug_store_on_cpu(int cpu);
487
488void fini_debug_store_on_cpu(int cpu);
489
490void release_ds_buffers(void);
491
492void reserve_ds_buffers(void);
493
494extern struct event_constraint bts_constraint;
495
496void intel_pmu_enable_bts(u64 config);
497
498void intel_pmu_disable_bts(void);
499
500int intel_pmu_drain_bts_buffer(void);
501
502extern struct event_constraint intel_core2_pebs_event_constraints[];
503
504extern struct event_constraint intel_atom_pebs_event_constraints[];
505
506extern struct event_constraint intel_nehalem_pebs_event_constraints[];
507
508extern struct event_constraint intel_westmere_pebs_event_constraints[];
509
510extern struct event_constraint intel_snb_pebs_event_constraints[];
511
512struct event_constraint *intel_pebs_constraints(struct perf_event *event);
513
514void intel_pmu_pebs_enable(struct perf_event *event);
515
516void intel_pmu_pebs_disable(struct perf_event *event);
517
518void intel_pmu_pebs_enable_all(void);
519
520void intel_pmu_pebs_disable_all(void);
521
522void intel_ds_init(void);
523
524void intel_pmu_lbr_reset(void);
525
526void intel_pmu_lbr_enable(struct perf_event *event);
527
528void intel_pmu_lbr_disable(struct perf_event *event);
529
530void intel_pmu_lbr_enable_all(void);
531
532void intel_pmu_lbr_disable_all(void);
533
534void intel_pmu_lbr_read(void);
535
536void intel_pmu_lbr_init_core(void);
537
538void intel_pmu_lbr_init_nhm(void);
539
540void intel_pmu_lbr_init_atom(void);
541
c5cc2cd9
SE
542void intel_pmu_lbr_init_snb(void);
543
60ce0fbd
SE
544int intel_pmu_setup_lbr_filter(struct perf_event *event);
545
de0428a7
KW
546int p4_pmu_init(void);
547
548int p6_pmu_init(void);
549
550#else /* CONFIG_CPU_SUP_INTEL */
551
552static inline void reserve_ds_buffers(void)
553{
554}
555
556static inline void release_ds_buffers(void)
557{
558}
559
560static inline int intel_pmu_init(void)
561{
562 return 0;
563}
564
565static inline struct intel_shared_regs *allocate_shared_regs(int cpu)
566{
567 return NULL;
568}
569
570#endif /* CONFIG_CPU_SUP_INTEL */