perf/x86: Vectorize cpuc->kfree_on_online
[linux-2.6-block.git] / arch / x86 / kernel / cpu / perf_event.h
CommitLineData
de0428a7
KW
1/*
2 * Performance events x86 architecture header
3 *
4 * Copyright (C) 2008 Thomas Gleixner <tglx@linutronix.de>
5 * Copyright (C) 2008-2009 Red Hat, Inc., Ingo Molnar
6 * Copyright (C) 2009 Jaswinder Singh Rajput
7 * Copyright (C) 2009 Advanced Micro Devices, Inc., Robert Richter
8 * Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
9 * Copyright (C) 2009 Intel Corporation, <markus.t.metzger@intel.com>
10 * Copyright (C) 2009 Google, Inc., Stephane Eranian
11 *
12 * For licencing details see kernel-base/COPYING
13 */
14
15#include <linux/perf_event.h>
16
1c2ac3fd
PZ
17#if 0
18#undef wrmsrl
19#define wrmsrl(msr, val) \
20do { \
21 unsigned int _msr = (msr); \
22 u64 _val = (val); \
23 trace_printk("wrmsrl(%x, %Lx)\n", (unsigned int)(_msr), \
24 (unsigned long long)(_val)); \
25 native_write_msr((_msr), (u32)(_val), (u32)(_val >> 32)); \
26} while (0)
27#endif
28
de0428a7
KW
29/*
30 * | NHM/WSM | SNB |
31 * register -------------------------------
32 * | HT | no HT | HT | no HT |
33 *-----------------------------------------
34 * offcore | core | core | cpu | core |
35 * lbr_sel | core | core | cpu | core |
36 * ld_lat | cpu | core | cpu | core |
37 *-----------------------------------------
38 *
39 * Given that there is a small number of shared regs,
40 * we can pre-allocate their slot in the per-cpu
41 * per-core reg tables.
42 */
43enum extra_reg_type {
44 EXTRA_REG_NONE = -1, /* not used */
45
46 EXTRA_REG_RSP_0 = 0, /* offcore_response_0 */
47 EXTRA_REG_RSP_1 = 1, /* offcore_response_1 */
b36817e8 48 EXTRA_REG_LBR = 2, /* lbr_select */
f20093ee 49 EXTRA_REG_LDLAT = 3, /* ld_lat_threshold */
de0428a7
KW
50
51 EXTRA_REG_MAX /* number of entries needed */
52};
53
54struct event_constraint {
55 union {
56 unsigned long idxmsk[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
57 u64 idxmsk64;
58 };
59 u64 code;
60 u64 cmask;
61 int weight;
bc1738f6 62 int overlap;
9fac2cf3 63 int flags;
de0428a7 64};
f20093ee 65/*
2f7f73a5 66 * struct hw_perf_event.flags flags
f20093ee
SE
67 */
68#define PERF_X86_EVENT_PEBS_LDLAT 0x1 /* ld+ldlat data address sampling */
9ad64c0f 69#define PERF_X86_EVENT_PEBS_ST 0x2 /* st data address sampling */
86a04461 70#define PERF_X86_EVENT_PEBS_ST_HSW 0x4 /* haswell style datala, store */
2f7f73a5 71#define PERF_X86_EVENT_COMMITTED 0x8 /* event passed commit_txn */
86a04461
AK
72#define PERF_X86_EVENT_PEBS_LD_HSW 0x10 /* haswell style datala, load */
73#define PERF_X86_EVENT_PEBS_NA_HSW 0x20 /* haswell style datala, unknown */
7911d3f7
AL
74#define PERF_X86_EVENT_RDPMC_ALLOWED 0x40 /* grant rdpmc permission */
75
de0428a7
KW
76
77struct amd_nb {
78 int nb_id; /* NorthBridge id */
79 int refcnt; /* reference count */
80 struct perf_event *owners[X86_PMC_IDX_MAX];
81 struct event_constraint event_constraints[X86_PMC_IDX_MAX];
82};
83
84/* The maximal number of PEBS events: */
70ab7003 85#define MAX_PEBS_EVENTS 8
de0428a7
KW
86
87/*
88 * A debug store configuration.
89 *
90 * We only support architectures that use 64bit fields.
91 */
92struct debug_store {
93 u64 bts_buffer_base;
94 u64 bts_index;
95 u64 bts_absolute_maximum;
96 u64 bts_interrupt_threshold;
97 u64 pebs_buffer_base;
98 u64 pebs_index;
99 u64 pebs_absolute_maximum;
100 u64 pebs_interrupt_threshold;
101 u64 pebs_event_reset[MAX_PEBS_EVENTS];
102};
103
104/*
105 * Per register state.
106 */
107struct er_account {
108 raw_spinlock_t lock; /* per-core: protect structure */
109 u64 config; /* extra MSR config */
110 u64 reg; /* extra MSR number */
111 atomic_t ref; /* reference count */
112};
113
114/*
115 * Per core/cpu state
116 *
117 * Used to coordinate shared registers between HT threads or
118 * among events on a single PMU.
119 */
120struct intel_shared_regs {
121 struct er_account regs[EXTRA_REG_MAX];
122 int refcnt; /* per-core: #HT threads */
123 unsigned core_id; /* per-core: core id */
124};
125
126#define MAX_LBR_ENTRIES 16
127
90413464
SE
128enum {
129 X86_PERF_KFREE_SHARED = 0,
130 X86_PERF_KFREE_EXCL = 1,
131 X86_PERF_KFREE_MAX
132};
133
de0428a7
KW
134struct cpu_hw_events {
135 /*
136 * Generic x86 PMC bits
137 */
138 struct perf_event *events[X86_PMC_IDX_MAX]; /* in counter order */
139 unsigned long active_mask[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
140 unsigned long running[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
141 int enabled;
142
c347a2f1
PZ
143 int n_events; /* the # of events in the below arrays */
144 int n_added; /* the # last events in the below arrays;
145 they've never been enabled yet */
146 int n_txn; /* the # last events in the below arrays;
147 added in the current transaction */
de0428a7
KW
148 int assign[X86_PMC_IDX_MAX]; /* event to counter assignment */
149 u64 tags[X86_PMC_IDX_MAX];
150 struct perf_event *event_list[X86_PMC_IDX_MAX]; /* in enabled order */
151
152 unsigned int group_flag;
5a425294 153 int is_fake;
de0428a7
KW
154
155 /*
156 * Intel DebugStore bits
157 */
158 struct debug_store *ds;
159 u64 pebs_enabled;
160
161 /*
162 * Intel LBR bits
163 */
164 int lbr_users;
165 void *lbr_context;
166 struct perf_branch_stack lbr_stack;
167 struct perf_branch_entry lbr_entries[MAX_LBR_ENTRIES];
b36817e8 168 struct er_account *lbr_sel;
3e702ff6 169 u64 br_sel;
de0428a7 170
144d31e6
GN
171 /*
172 * Intel host/guest exclude bits
173 */
174 u64 intel_ctrl_guest_mask;
175 u64 intel_ctrl_host_mask;
176 struct perf_guest_switch_msr guest_switch_msrs[X86_PMC_IDX_MAX];
177
2b9e344d
PZ
178 /*
179 * Intel checkpoint mask
180 */
181 u64 intel_cp_status;
182
de0428a7
KW
183 /*
184 * manage shared (per-core, per-cpu) registers
185 * used on Intel NHM/WSM/SNB
186 */
187 struct intel_shared_regs *shared_regs;
188
189 /*
190 * AMD specific bits
191 */
1018faa6
JR
192 struct amd_nb *amd_nb;
193 /* Inverted mask of bits to clear in the perf_ctr ctrl registers */
194 u64 perf_ctr_virt_mask;
de0428a7 195
90413464 196 void *kfree_on_online[X86_PERF_KFREE_MAX];
de0428a7
KW
197};
198
9fac2cf3 199#define __EVENT_CONSTRAINT(c, n, m, w, o, f) {\
de0428a7
KW
200 { .idxmsk64 = (n) }, \
201 .code = (c), \
202 .cmask = (m), \
203 .weight = (w), \
bc1738f6 204 .overlap = (o), \
9fac2cf3 205 .flags = f, \
de0428a7
KW
206}
207
208#define EVENT_CONSTRAINT(c, n, m) \
9fac2cf3 209 __EVENT_CONSTRAINT(c, n, m, HWEIGHT(n), 0, 0)
bc1738f6
RR
210
211/*
212 * The overlap flag marks event constraints with overlapping counter
213 * masks. This is the case if the counter mask of such an event is not
214 * a subset of any other counter mask of a constraint with an equal or
215 * higher weight, e.g.:
216 *
217 * c_overlaps = EVENT_CONSTRAINT_OVERLAP(0, 0x09, 0);
218 * c_another1 = EVENT_CONSTRAINT(0, 0x07, 0);
219 * c_another2 = EVENT_CONSTRAINT(0, 0x38, 0);
220 *
221 * The event scheduler may not select the correct counter in the first
222 * cycle because it needs to know which subsequent events will be
223 * scheduled. It may fail to schedule the events then. So we set the
224 * overlap flag for such constraints to give the scheduler a hint which
225 * events to select for counter rescheduling.
226 *
227 * Care must be taken as the rescheduling algorithm is O(n!) which
228 * will increase scheduling cycles for an over-commited system
229 * dramatically. The number of such EVENT_CONSTRAINT_OVERLAP() macros
230 * and its counter masks must be kept at a minimum.
231 */
232#define EVENT_CONSTRAINT_OVERLAP(c, n, m) \
9fac2cf3 233 __EVENT_CONSTRAINT(c, n, m, HWEIGHT(n), 1, 0)
de0428a7
KW
234
235/*
236 * Constraint on the Event code.
237 */
238#define INTEL_EVENT_CONSTRAINT(c, n) \
239 EVENT_CONSTRAINT(c, n, ARCH_PERFMON_EVENTSEL_EVENT)
240
241/*
242 * Constraint on the Event code + UMask + fixed-mask
243 *
244 * filter mask to validate fixed counter events.
245 * the following filters disqualify for fixed counters:
246 * - inv
247 * - edge
248 * - cnt-mask
3a632cb2
AK
249 * - in_tx
250 * - in_tx_checkpointed
de0428a7
KW
251 * The other filters are supported by fixed counters.
252 * The any-thread option is supported starting with v3.
253 */
3a632cb2 254#define FIXED_EVENT_FLAGS (X86_RAW_EVENT_MASK|HSW_IN_TX|HSW_IN_TX_CHECKPOINTED)
de0428a7 255#define FIXED_EVENT_CONSTRAINT(c, n) \
3a632cb2 256 EVENT_CONSTRAINT(c, (1ULL << (32+n)), FIXED_EVENT_FLAGS)
de0428a7
KW
257
258/*
259 * Constraint on the Event code + UMask
260 */
261#define INTEL_UEVENT_CONSTRAINT(c, n) \
262 EVENT_CONSTRAINT(c, n, INTEL_ARCH_EVENT_MASK)
263
7550ddff
AK
264/* Like UEVENT_CONSTRAINT, but match flags too */
265#define INTEL_FLAGS_UEVENT_CONSTRAINT(c, n) \
266 EVENT_CONSTRAINT(c, n, INTEL_ARCH_EVENT_MASK|X86_ALL_EVENT_FLAGS)
267
f20093ee 268#define INTEL_PLD_CONSTRAINT(c, n) \
86a04461 269 __EVENT_CONSTRAINT(c, n, INTEL_ARCH_EVENT_MASK|X86_ALL_EVENT_FLAGS, \
f20093ee
SE
270 HWEIGHT(n), 0, PERF_X86_EVENT_PEBS_LDLAT)
271
9ad64c0f 272#define INTEL_PST_CONSTRAINT(c, n) \
86a04461 273 __EVENT_CONSTRAINT(c, n, INTEL_ARCH_EVENT_MASK|X86_ALL_EVENT_FLAGS, \
9ad64c0f
SE
274 HWEIGHT(n), 0, PERF_X86_EVENT_PEBS_ST)
275
86a04461
AK
276/* Event constraint, but match on all event flags too. */
277#define INTEL_FLAGS_EVENT_CONSTRAINT(c, n) \
278 EVENT_CONSTRAINT(c, n, INTEL_ARCH_EVENT_MASK|X86_ALL_EVENT_FLAGS)
279
280/* Check only flags, but allow all event/umask */
281#define INTEL_ALL_EVENT_CONSTRAINT(code, n) \
282 EVENT_CONSTRAINT(code, n, X86_ALL_EVENT_FLAGS)
283
284/* Check flags and event code, and set the HSW store flag */
285#define INTEL_FLAGS_EVENT_CONSTRAINT_DATALA_ST(code, n) \
286 __EVENT_CONSTRAINT(code, n, \
287 ARCH_PERFMON_EVENTSEL_EVENT|X86_ALL_EVENT_FLAGS, \
288 HWEIGHT(n), 0, PERF_X86_EVENT_PEBS_ST_HSW)
289
290/* Check flags and event code, and set the HSW load flag */
291#define INTEL_FLAGS_EVENT_CONSTRAINT_DATALA_LD(code, n) \
292 __EVENT_CONSTRAINT(code, n, \
293 ARCH_PERFMON_EVENTSEL_EVENT|X86_ALL_EVENT_FLAGS, \
294 HWEIGHT(n), 0, PERF_X86_EVENT_PEBS_LD_HSW)
295
296/* Check flags and event code/umask, and set the HSW store flag */
297#define INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_ST(code, n) \
298 __EVENT_CONSTRAINT(code, n, \
299 INTEL_ARCH_EVENT_MASK|X86_ALL_EVENT_FLAGS, \
f9134f36
AK
300 HWEIGHT(n), 0, PERF_X86_EVENT_PEBS_ST_HSW)
301
86a04461
AK
302/* Check flags and event code/umask, and set the HSW load flag */
303#define INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_LD(code, n) \
304 __EVENT_CONSTRAINT(code, n, \
305 INTEL_ARCH_EVENT_MASK|X86_ALL_EVENT_FLAGS, \
306 HWEIGHT(n), 0, PERF_X86_EVENT_PEBS_LD_HSW)
307
308/* Check flags and event code/umask, and set the HSW N/A flag */
309#define INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_NA(code, n) \
310 __EVENT_CONSTRAINT(code, n, \
311 INTEL_ARCH_EVENT_MASK|INTEL_ARCH_EVENT_MASK, \
312 HWEIGHT(n), 0, PERF_X86_EVENT_PEBS_NA_HSW)
313
314
cf30d52e
MD
315/*
316 * We define the end marker as having a weight of -1
317 * to enable blacklisting of events using a counter bitmask
318 * of zero and thus a weight of zero.
319 * The end marker has a weight that cannot possibly be
320 * obtained from counting the bits in the bitmask.
321 */
322#define EVENT_CONSTRAINT_END { .weight = -1 }
de0428a7 323
cf30d52e
MD
324/*
325 * Check for end marker with weight == -1
326 */
de0428a7 327#define for_each_event_constraint(e, c) \
cf30d52e 328 for ((e) = (c); (e)->weight != -1; (e)++)
de0428a7
KW
329
330/*
331 * Extra registers for specific events.
332 *
333 * Some events need large masks and require external MSRs.
334 * Those extra MSRs end up being shared for all events on
335 * a PMU and sometimes between PMU of sibling HT threads.
336 * In either case, the kernel needs to handle conflicting
337 * accesses to those extra, shared, regs. The data structure
338 * to manage those registers is stored in cpu_hw_event.
339 */
340struct extra_reg {
341 unsigned int event;
342 unsigned int msr;
343 u64 config_mask;
344 u64 valid_mask;
345 int idx; /* per_xxx->regs[] reg index */
338b522c 346 bool extra_msr_access;
de0428a7
KW
347};
348
349#define EVENT_EXTRA_REG(e, ms, m, vm, i) { \
338b522c
KL
350 .event = (e), \
351 .msr = (ms), \
352 .config_mask = (m), \
353 .valid_mask = (vm), \
354 .idx = EXTRA_REG_##i, \
355 .extra_msr_access = true, \
de0428a7
KW
356 }
357
358#define INTEL_EVENT_EXTRA_REG(event, msr, vm, idx) \
359 EVENT_EXTRA_REG(event, msr, ARCH_PERFMON_EVENTSEL_EVENT, vm, idx)
360
f20093ee
SE
361#define INTEL_UEVENT_EXTRA_REG(event, msr, vm, idx) \
362 EVENT_EXTRA_REG(event, msr, ARCH_PERFMON_EVENTSEL_EVENT | \
363 ARCH_PERFMON_EVENTSEL_UMASK, vm, idx)
364
365#define INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(c) \
366 INTEL_UEVENT_EXTRA_REG(c, \
367 MSR_PEBS_LD_LAT_THRESHOLD, \
368 0xffff, \
369 LDLAT)
370
de0428a7
KW
371#define EVENT_EXTRA_END EVENT_EXTRA_REG(0, 0, 0, 0, RSP_0)
372
373union perf_capabilities {
374 struct {
375 u64 lbr_format:6;
376 u64 pebs_trap:1;
377 u64 pebs_arch_reg:1;
378 u64 pebs_format:4;
379 u64 smm_freeze:1;
069e0c3c
AK
380 /*
381 * PMU supports separate counter range for writing
382 * values > 32bit.
383 */
384 u64 full_width_write:1;
de0428a7
KW
385 };
386 u64 capabilities;
387};
388
c1d6f42f
PZ
389struct x86_pmu_quirk {
390 struct x86_pmu_quirk *next;
391 void (*func)(void);
392};
393
f9b4eeb8
PZ
394union x86_pmu_config {
395 struct {
396 u64 event:8,
397 umask:8,
398 usr:1,
399 os:1,
400 edge:1,
401 pc:1,
402 interrupt:1,
403 __reserved1:1,
404 en:1,
405 inv:1,
406 cmask:8,
407 event2:4,
408 __reserved2:4,
409 go:1,
410 ho:1;
411 } bits;
412 u64 value;
413};
414
415#define X86_CONFIG(args...) ((union x86_pmu_config){.bits = {args}}).value
416
48070342
AS
417enum {
418 x86_lbr_exclusive_lbr,
8062382c 419 x86_lbr_exclusive_bts,
48070342
AS
420 x86_lbr_exclusive_pt,
421 x86_lbr_exclusive_max,
422};
423
de0428a7
KW
424/*
425 * struct x86_pmu - generic x86 pmu
426 */
427struct x86_pmu {
428 /*
429 * Generic x86 PMC bits
430 */
431 const char *name;
432 int version;
433 int (*handle_irq)(struct pt_regs *);
434 void (*disable_all)(void);
435 void (*enable_all)(int added);
436 void (*enable)(struct perf_event *);
437 void (*disable)(struct perf_event *);
438 int (*hw_config)(struct perf_event *event);
439 int (*schedule_events)(struct cpu_hw_events *cpuc, int n, int *assign);
440 unsigned eventsel;
441 unsigned perfctr;
4c1fd17a 442 int (*addr_offset)(int index, bool eventsel);
0fbdad07 443 int (*rdpmc_index)(int index);
de0428a7
KW
444 u64 (*event_map)(int);
445 int max_events;
446 int num_counters;
447 int num_counters_fixed;
448 int cntval_bits;
449 u64 cntval_mask;
ffb871bc
GN
450 union {
451 unsigned long events_maskl;
452 unsigned long events_mask[BITS_TO_LONGS(ARCH_PERFMON_EVENTS_COUNT)];
453 };
454 int events_mask_len;
de0428a7
KW
455 int apic;
456 u64 max_period;
457 struct event_constraint *
458 (*get_event_constraints)(struct cpu_hw_events *cpuc,
459 struct perf_event *event);
460
461 void (*put_event_constraints)(struct cpu_hw_events *cpuc,
462 struct perf_event *event);
463 struct event_constraint *event_constraints;
c1d6f42f 464 struct x86_pmu_quirk *quirks;
de0428a7 465 int perfctr_second_write;
72db5596 466 bool late_ack;
294fe0f5 467 unsigned (*limit_period)(struct perf_event *event, unsigned l);
de0428a7 468
0c9d42ed
PZ
469 /*
470 * sysfs attrs
471 */
e97df763 472 int attr_rdpmc_broken;
0c9d42ed 473 int attr_rdpmc;
641cc938 474 struct attribute **format_attrs;
f20093ee 475 struct attribute **event_attrs;
0c9d42ed 476
a4747393 477 ssize_t (*events_sysfs_show)(char *page, u64 config);
1a6461b1 478 struct attribute **cpu_events;
a4747393 479
0c9d42ed
PZ
480 /*
481 * CPU Hotplug hooks
482 */
de0428a7
KW
483 int (*cpu_prepare)(int cpu);
484 void (*cpu_starting)(int cpu);
485 void (*cpu_dying)(int cpu);
486 void (*cpu_dead)(int cpu);
c93dc84c
PZ
487
488 void (*check_microcode)(void);
ba532500
YZ
489 void (*sched_task)(struct perf_event_context *ctx,
490 bool sched_in);
de0428a7
KW
491
492 /*
493 * Intel Arch Perfmon v2+
494 */
495 u64 intel_ctrl;
496 union perf_capabilities intel_cap;
497
498 /*
499 * Intel DebugStore bits
500 */
597ed953 501 unsigned int bts :1,
3e0091e2
PZ
502 bts_active :1,
503 pebs :1,
504 pebs_active :1,
505 pebs_broken :1;
de0428a7
KW
506 int pebs_record_size;
507 void (*drain_pebs)(struct pt_regs *regs);
508 struct event_constraint *pebs_constraints;
0780c927 509 void (*pebs_aliases)(struct perf_event *event);
70ab7003 510 int max_pebs_events;
de0428a7
KW
511
512 /*
513 * Intel LBR
514 */
515 unsigned long lbr_tos, lbr_from, lbr_to; /* MSR base regs */
516 int lbr_nr; /* hardware stack size */
b36817e8
SE
517 u64 lbr_sel_mask; /* LBR_SELECT valid bits */
518 const int *lbr_sel_map; /* lbr_select mappings */
b7af41a1 519 bool lbr_double_abort; /* duplicated lbr aborts */
de0428a7 520
48070342
AS
521 /*
522 * Intel PT/LBR/BTS are exclusive
523 */
524 atomic_t lbr_exclusive[x86_lbr_exclusive_max];
525
de0428a7
KW
526 /*
527 * Extra registers for events
528 */
529 struct extra_reg *extra_regs;
9a5e3fb5 530 unsigned int flags;
144d31e6
GN
531
532 /*
533 * Intel host/guest support (KVM)
534 */
535 struct perf_guest_switch_msr *(*guest_get_msrs)(int *nr);
de0428a7
KW
536};
537
e18bf526
YZ
538struct x86_perf_task_context {
539 u64 lbr_from[MAX_LBR_ENTRIES];
540 u64 lbr_to[MAX_LBR_ENTRIES];
541 int lbr_callstack_users;
542 int lbr_stack_state;
543};
544
c1d6f42f
PZ
545#define x86_add_quirk(func_) \
546do { \
547 static struct x86_pmu_quirk __quirk __initdata = { \
548 .func = func_, \
549 }; \
550 __quirk.next = x86_pmu.quirks; \
551 x86_pmu.quirks = &__quirk; \
552} while (0)
553
9a5e3fb5
SE
554/*
555 * x86_pmu flags
556 */
557#define PMU_FL_NO_HT_SHARING 0x1 /* no hyper-threading resource sharing */
558#define PMU_FL_HAS_RSP_1 0x2 /* has 2 equivalent offcore_rsp regs */
de0428a7 559
3a54aaa0
SE
560#define EVENT_VAR(_id) event_attr_##_id
561#define EVENT_PTR(_id) &event_attr_##_id.attr.attr
562
563#define EVENT_ATTR(_name, _id) \
564static struct perf_pmu_events_attr EVENT_VAR(_id) = { \
565 .attr = __ATTR(_name, 0444, events_sysfs_show, NULL), \
566 .id = PERF_COUNT_HW_##_id, \
567 .event_str = NULL, \
568};
569
570#define EVENT_ATTR_STR(_name, v, str) \
571static struct perf_pmu_events_attr event_attr_##v = { \
572 .attr = __ATTR(_name, 0444, events_sysfs_show, NULL), \
573 .id = 0, \
574 .event_str = str, \
575};
576
de0428a7
KW
577extern struct x86_pmu x86_pmu __read_mostly;
578
e9d7f7cd
YZ
579static inline bool x86_pmu_has_lbr_callstack(void)
580{
581 return x86_pmu.lbr_sel_map &&
582 x86_pmu.lbr_sel_map[PERF_SAMPLE_BRANCH_CALL_STACK_SHIFT] > 0;
583}
584
de0428a7
KW
585DECLARE_PER_CPU(struct cpu_hw_events, cpu_hw_events);
586
587int x86_perf_event_set_period(struct perf_event *event);
588
589/*
590 * Generalized hw caching related hw_event table, filled
591 * in on a per model basis. A value of 0 means
592 * 'not supported', -1 means 'hw_event makes no sense on
593 * this CPU', any other value means the raw hw_event
594 * ID.
595 */
596
597#define C(x) PERF_COUNT_HW_CACHE_##x
598
599extern u64 __read_mostly hw_cache_event_ids
600 [PERF_COUNT_HW_CACHE_MAX]
601 [PERF_COUNT_HW_CACHE_OP_MAX]
602 [PERF_COUNT_HW_CACHE_RESULT_MAX];
603extern u64 __read_mostly hw_cache_extra_regs
604 [PERF_COUNT_HW_CACHE_MAX]
605 [PERF_COUNT_HW_CACHE_OP_MAX]
606 [PERF_COUNT_HW_CACHE_RESULT_MAX];
607
608u64 x86_perf_event_update(struct perf_event *event);
609
de0428a7
KW
610static inline unsigned int x86_pmu_config_addr(int index)
611{
4c1fd17a
JS
612 return x86_pmu.eventsel + (x86_pmu.addr_offset ?
613 x86_pmu.addr_offset(index, true) : index);
de0428a7
KW
614}
615
616static inline unsigned int x86_pmu_event_addr(int index)
617{
4c1fd17a
JS
618 return x86_pmu.perfctr + (x86_pmu.addr_offset ?
619 x86_pmu.addr_offset(index, false) : index);
de0428a7
KW
620}
621
0fbdad07
JS
622static inline int x86_pmu_rdpmc_index(int index)
623{
624 return x86_pmu.rdpmc_index ? x86_pmu.rdpmc_index(index) : index;
625}
626
48070342
AS
627int x86_add_exclusive(unsigned int what);
628
629void x86_del_exclusive(unsigned int what);
630
631void hw_perf_lbr_event_destroy(struct perf_event *event);
632
de0428a7
KW
633int x86_setup_perfctr(struct perf_event *event);
634
635int x86_pmu_hw_config(struct perf_event *event);
636
637void x86_pmu_disable_all(void);
638
639static inline void __x86_pmu_enable_event(struct hw_perf_event *hwc,
640 u64 enable_mask)
641{
1018faa6
JR
642 u64 disable_mask = __this_cpu_read(cpu_hw_events.perf_ctr_virt_mask);
643
de0428a7
KW
644 if (hwc->extra_reg.reg)
645 wrmsrl(hwc->extra_reg.reg, hwc->extra_reg.config);
1018faa6 646 wrmsrl(hwc->config_base, (hwc->config | enable_mask) & ~disable_mask);
de0428a7
KW
647}
648
649void x86_pmu_enable_all(int added);
650
43b45780 651int perf_assign_events(struct perf_event **events, int n,
4b4969b1 652 int wmin, int wmax, int *assign);
de0428a7
KW
653int x86_schedule_events(struct cpu_hw_events *cpuc, int n, int *assign);
654
655void x86_pmu_stop(struct perf_event *event, int flags);
656
657static inline void x86_pmu_disable_event(struct perf_event *event)
658{
659 struct hw_perf_event *hwc = &event->hw;
660
661 wrmsrl(hwc->config_base, hwc->config);
662}
663
664void x86_pmu_enable_event(struct perf_event *event);
665
666int x86_pmu_handle_irq(struct pt_regs *regs);
667
668extern struct event_constraint emptyconstraint;
669
670extern struct event_constraint unconstrained;
671
3e702ff6
SE
672static inline bool kernel_ip(unsigned long ip)
673{
674#ifdef CONFIG_X86_32
675 return ip > PAGE_OFFSET;
676#else
677 return (long)ip < 0;
678#endif
679}
680
d07bdfd3
PZ
681/*
682 * Not all PMUs provide the right context information to place the reported IP
683 * into full context. Specifically segment registers are typically not
684 * supplied.
685 *
686 * Assuming the address is a linear address (it is for IBS), we fake the CS and
687 * vm86 mode using the known zero-based code segment and 'fix up' the registers
688 * to reflect this.
689 *
690 * Intel PEBS/LBR appear to typically provide the effective address, nothing
691 * much we can do about that but pray and treat it like a linear address.
692 */
693static inline void set_linear_ip(struct pt_regs *regs, unsigned long ip)
694{
695 regs->cs = kernel_ip(ip) ? __KERNEL_CS : __USER_CS;
696 if (regs->flags & X86_VM_MASK)
697 regs->flags ^= (PERF_EFLAGS_VM | X86_VM_MASK);
698 regs->ip = ip;
699}
700
0bf79d44 701ssize_t x86_event_sysfs_show(char *page, u64 config, u64 event);
20550a43 702ssize_t intel_event_sysfs_show(char *page, u64 config);
43c032fe 703
de0428a7
KW
704#ifdef CONFIG_CPU_SUP_AMD
705
706int amd_pmu_init(void);
707
708#else /* CONFIG_CPU_SUP_AMD */
709
710static inline int amd_pmu_init(void)
711{
712 return 0;
713}
714
715#endif /* CONFIG_CPU_SUP_AMD */
716
717#ifdef CONFIG_CPU_SUP_INTEL
718
48070342
AS
719static inline bool intel_pmu_needs_lbr_smpl(struct perf_event *event)
720{
721 /* user explicitly requested branch sampling */
722 if (has_branch_stack(event))
723 return true;
724
725 /* implicit branch sampling to correct PEBS skid */
726 if (x86_pmu.intel_cap.pebs_trap && event->attr.precise_ip > 1 &&
727 x86_pmu.intel_cap.pebs_format < 2)
728 return true;
729
730 return false;
731}
732
733static inline bool intel_pmu_has_bts(struct perf_event *event)
734{
735 if (event->attr.config == PERF_COUNT_HW_BRANCH_INSTRUCTIONS &&
736 !event->attr.freq && event->hw.sample_period == 1)
737 return true;
738
739 return false;
740}
741
de0428a7
KW
742int intel_pmu_save_and_restart(struct perf_event *event);
743
744struct event_constraint *
745x86_get_event_constraints(struct cpu_hw_events *cpuc, struct perf_event *event);
746
747struct intel_shared_regs *allocate_shared_regs(int cpu);
748
749int intel_pmu_init(void);
750
751void init_debug_store_on_cpu(int cpu);
752
753void fini_debug_store_on_cpu(int cpu);
754
755void release_ds_buffers(void);
756
757void reserve_ds_buffers(void);
758
759extern struct event_constraint bts_constraint;
760
761void intel_pmu_enable_bts(u64 config);
762
763void intel_pmu_disable_bts(void);
764
765int intel_pmu_drain_bts_buffer(void);
766
767extern struct event_constraint intel_core2_pebs_event_constraints[];
768
769extern struct event_constraint intel_atom_pebs_event_constraints[];
770
1fa64180
YZ
771extern struct event_constraint intel_slm_pebs_event_constraints[];
772
de0428a7
KW
773extern struct event_constraint intel_nehalem_pebs_event_constraints[];
774
775extern struct event_constraint intel_westmere_pebs_event_constraints[];
776
777extern struct event_constraint intel_snb_pebs_event_constraints[];
778
20a36e39
SE
779extern struct event_constraint intel_ivb_pebs_event_constraints[];
780
3044318f
AK
781extern struct event_constraint intel_hsw_pebs_event_constraints[];
782
de0428a7
KW
783struct event_constraint *intel_pebs_constraints(struct perf_event *event);
784
785void intel_pmu_pebs_enable(struct perf_event *event);
786
787void intel_pmu_pebs_disable(struct perf_event *event);
788
789void intel_pmu_pebs_enable_all(void);
790
791void intel_pmu_pebs_disable_all(void);
792
793void intel_ds_init(void);
794
2a0ad3b3
YZ
795void intel_pmu_lbr_sched_task(struct perf_event_context *ctx, bool sched_in);
796
de0428a7
KW
797void intel_pmu_lbr_reset(void);
798
799void intel_pmu_lbr_enable(struct perf_event *event);
800
801void intel_pmu_lbr_disable(struct perf_event *event);
802
803void intel_pmu_lbr_enable_all(void);
804
805void intel_pmu_lbr_disable_all(void);
806
807void intel_pmu_lbr_read(void);
808
809void intel_pmu_lbr_init_core(void);
810
811void intel_pmu_lbr_init_nhm(void);
812
813void intel_pmu_lbr_init_atom(void);
814
c5cc2cd9
SE
815void intel_pmu_lbr_init_snb(void);
816
e9d7f7cd
YZ
817void intel_pmu_lbr_init_hsw(void);
818
60ce0fbd
SE
819int intel_pmu_setup_lbr_filter(struct perf_event *event);
820
52ca9ced
AS
821void intel_pt_interrupt(void);
822
8062382c
AS
823int intel_bts_interrupt(void);
824
825void intel_bts_enable_local(void);
826
827void intel_bts_disable_local(void);
828
de0428a7
KW
829int p4_pmu_init(void);
830
831int p6_pmu_init(void);
832
e717bf4e
VW
833int knc_pmu_init(void);
834
f20093ee
SE
835ssize_t events_sysfs_show(struct device *dev, struct device_attribute *attr,
836 char *page);
837
de0428a7
KW
838#else /* CONFIG_CPU_SUP_INTEL */
839
840static inline void reserve_ds_buffers(void)
841{
842}
843
844static inline void release_ds_buffers(void)
845{
846}
847
848static inline int intel_pmu_init(void)
849{
850 return 0;
851}
852
853static inline struct intel_shared_regs *allocate_shared_regs(int cpu)
854{
855 return NULL;
856}
857
858#endif /* CONFIG_CPU_SUP_INTEL */