Commit | Line | Data |
---|---|---|
de0428a7 KW |
1 | /* |
2 | * Performance events x86 architecture header | |
3 | * | |
4 | * Copyright (C) 2008 Thomas Gleixner <tglx@linutronix.de> | |
5 | * Copyright (C) 2008-2009 Red Hat, Inc., Ingo Molnar | |
6 | * Copyright (C) 2009 Jaswinder Singh Rajput | |
7 | * Copyright (C) 2009 Advanced Micro Devices, Inc., Robert Richter | |
8 | * Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com> | |
9 | * Copyright (C) 2009 Intel Corporation, <markus.t.metzger@intel.com> | |
10 | * Copyright (C) 2009 Google, Inc., Stephane Eranian | |
11 | * | |
12 | * For licencing details see kernel-base/COPYING | |
13 | */ | |
14 | ||
15 | #include <linux/perf_event.h> | |
16 | ||
1c2ac3fd PZ |
17 | #if 0 |
18 | #undef wrmsrl | |
19 | #define wrmsrl(msr, val) \ | |
20 | do { \ | |
21 | unsigned int _msr = (msr); \ | |
22 | u64 _val = (val); \ | |
23 | trace_printk("wrmsrl(%x, %Lx)\n", (unsigned int)(_msr), \ | |
24 | (unsigned long long)(_val)); \ | |
25 | native_write_msr((_msr), (u32)(_val), (u32)(_val >> 32)); \ | |
26 | } while (0) | |
27 | #endif | |
28 | ||
de0428a7 KW |
29 | /* |
30 | * | NHM/WSM | SNB | | |
31 | * register ------------------------------- | |
32 | * | HT | no HT | HT | no HT | | |
33 | *----------------------------------------- | |
34 | * offcore | core | core | cpu | core | | |
35 | * lbr_sel | core | core | cpu | core | | |
36 | * ld_lat | cpu | core | cpu | core | | |
37 | *----------------------------------------- | |
38 | * | |
39 | * Given that there is a small number of shared regs, | |
40 | * we can pre-allocate their slot in the per-cpu | |
41 | * per-core reg tables. | |
42 | */ | |
43 | enum extra_reg_type { | |
44 | EXTRA_REG_NONE = -1, /* not used */ | |
45 | ||
46 | EXTRA_REG_RSP_0 = 0, /* offcore_response_0 */ | |
47 | EXTRA_REG_RSP_1 = 1, /* offcore_response_1 */ | |
b36817e8 | 48 | EXTRA_REG_LBR = 2, /* lbr_select */ |
f20093ee | 49 | EXTRA_REG_LDLAT = 3, /* ld_lat_threshold */ |
de0428a7 KW |
50 | |
51 | EXTRA_REG_MAX /* number of entries needed */ | |
52 | }; | |
53 | ||
54 | struct event_constraint { | |
55 | union { | |
56 | unsigned long idxmsk[BITS_TO_LONGS(X86_PMC_IDX_MAX)]; | |
57 | u64 idxmsk64; | |
58 | }; | |
59 | u64 code; | |
60 | u64 cmask; | |
61 | int weight; | |
bc1738f6 | 62 | int overlap; |
9fac2cf3 | 63 | int flags; |
de0428a7 | 64 | }; |
f20093ee SE |
65 | /* |
66 | * struct event_constraint flags | |
67 | */ | |
68 | #define PERF_X86_EVENT_PEBS_LDLAT 0x1 /* ld+ldlat data address sampling */ | |
9ad64c0f | 69 | #define PERF_X86_EVENT_PEBS_ST 0x2 /* st data address sampling */ |
de0428a7 KW |
70 | |
71 | struct amd_nb { | |
72 | int nb_id; /* NorthBridge id */ | |
73 | int refcnt; /* reference count */ | |
74 | struct perf_event *owners[X86_PMC_IDX_MAX]; | |
75 | struct event_constraint event_constraints[X86_PMC_IDX_MAX]; | |
76 | }; | |
77 | ||
78 | /* The maximal number of PEBS events: */ | |
70ab7003 | 79 | #define MAX_PEBS_EVENTS 8 |
de0428a7 KW |
80 | |
81 | /* | |
82 | * A debug store configuration. | |
83 | * | |
84 | * We only support architectures that use 64bit fields. | |
85 | */ | |
86 | struct debug_store { | |
87 | u64 bts_buffer_base; | |
88 | u64 bts_index; | |
89 | u64 bts_absolute_maximum; | |
90 | u64 bts_interrupt_threshold; | |
91 | u64 pebs_buffer_base; | |
92 | u64 pebs_index; | |
93 | u64 pebs_absolute_maximum; | |
94 | u64 pebs_interrupt_threshold; | |
95 | u64 pebs_event_reset[MAX_PEBS_EVENTS]; | |
96 | }; | |
97 | ||
98 | /* | |
99 | * Per register state. | |
100 | */ | |
101 | struct er_account { | |
102 | raw_spinlock_t lock; /* per-core: protect structure */ | |
103 | u64 config; /* extra MSR config */ | |
104 | u64 reg; /* extra MSR number */ | |
105 | atomic_t ref; /* reference count */ | |
106 | }; | |
107 | ||
108 | /* | |
109 | * Per core/cpu state | |
110 | * | |
111 | * Used to coordinate shared registers between HT threads or | |
112 | * among events on a single PMU. | |
113 | */ | |
114 | struct intel_shared_regs { | |
115 | struct er_account regs[EXTRA_REG_MAX]; | |
116 | int refcnt; /* per-core: #HT threads */ | |
117 | unsigned core_id; /* per-core: core id */ | |
118 | }; | |
119 | ||
120 | #define MAX_LBR_ENTRIES 16 | |
121 | ||
122 | struct cpu_hw_events { | |
123 | /* | |
124 | * Generic x86 PMC bits | |
125 | */ | |
126 | struct perf_event *events[X86_PMC_IDX_MAX]; /* in counter order */ | |
127 | unsigned long active_mask[BITS_TO_LONGS(X86_PMC_IDX_MAX)]; | |
128 | unsigned long running[BITS_TO_LONGS(X86_PMC_IDX_MAX)]; | |
129 | int enabled; | |
130 | ||
131 | int n_events; | |
132 | int n_added; | |
133 | int n_txn; | |
134 | int assign[X86_PMC_IDX_MAX]; /* event to counter assignment */ | |
135 | u64 tags[X86_PMC_IDX_MAX]; | |
136 | struct perf_event *event_list[X86_PMC_IDX_MAX]; /* in enabled order */ | |
137 | ||
138 | unsigned int group_flag; | |
5a425294 | 139 | int is_fake; |
de0428a7 KW |
140 | |
141 | /* | |
142 | * Intel DebugStore bits | |
143 | */ | |
144 | struct debug_store *ds; | |
145 | u64 pebs_enabled; | |
146 | ||
147 | /* | |
148 | * Intel LBR bits | |
149 | */ | |
150 | int lbr_users; | |
151 | void *lbr_context; | |
152 | struct perf_branch_stack lbr_stack; | |
153 | struct perf_branch_entry lbr_entries[MAX_LBR_ENTRIES]; | |
b36817e8 | 154 | struct er_account *lbr_sel; |
3e702ff6 | 155 | u64 br_sel; |
de0428a7 | 156 | |
144d31e6 GN |
157 | /* |
158 | * Intel host/guest exclude bits | |
159 | */ | |
160 | u64 intel_ctrl_guest_mask; | |
161 | u64 intel_ctrl_host_mask; | |
162 | struct perf_guest_switch_msr guest_switch_msrs[X86_PMC_IDX_MAX]; | |
163 | ||
de0428a7 KW |
164 | /* |
165 | * manage shared (per-core, per-cpu) registers | |
166 | * used on Intel NHM/WSM/SNB | |
167 | */ | |
168 | struct intel_shared_regs *shared_regs; | |
169 | ||
170 | /* | |
171 | * AMD specific bits | |
172 | */ | |
1018faa6 JR |
173 | struct amd_nb *amd_nb; |
174 | /* Inverted mask of bits to clear in the perf_ctr ctrl registers */ | |
175 | u64 perf_ctr_virt_mask; | |
de0428a7 KW |
176 | |
177 | void *kfree_on_online; | |
178 | }; | |
179 | ||
9fac2cf3 | 180 | #define __EVENT_CONSTRAINT(c, n, m, w, o, f) {\ |
de0428a7 KW |
181 | { .idxmsk64 = (n) }, \ |
182 | .code = (c), \ | |
183 | .cmask = (m), \ | |
184 | .weight = (w), \ | |
bc1738f6 | 185 | .overlap = (o), \ |
9fac2cf3 | 186 | .flags = f, \ |
de0428a7 KW |
187 | } |
188 | ||
189 | #define EVENT_CONSTRAINT(c, n, m) \ | |
9fac2cf3 | 190 | __EVENT_CONSTRAINT(c, n, m, HWEIGHT(n), 0, 0) |
bc1738f6 RR |
191 | |
192 | /* | |
193 | * The overlap flag marks event constraints with overlapping counter | |
194 | * masks. This is the case if the counter mask of such an event is not | |
195 | * a subset of any other counter mask of a constraint with an equal or | |
196 | * higher weight, e.g.: | |
197 | * | |
198 | * c_overlaps = EVENT_CONSTRAINT_OVERLAP(0, 0x09, 0); | |
199 | * c_another1 = EVENT_CONSTRAINT(0, 0x07, 0); | |
200 | * c_another2 = EVENT_CONSTRAINT(0, 0x38, 0); | |
201 | * | |
202 | * The event scheduler may not select the correct counter in the first | |
203 | * cycle because it needs to know which subsequent events will be | |
204 | * scheduled. It may fail to schedule the events then. So we set the | |
205 | * overlap flag for such constraints to give the scheduler a hint which | |
206 | * events to select for counter rescheduling. | |
207 | * | |
208 | * Care must be taken as the rescheduling algorithm is O(n!) which | |
209 | * will increase scheduling cycles for an over-commited system | |
210 | * dramatically. The number of such EVENT_CONSTRAINT_OVERLAP() macros | |
211 | * and its counter masks must be kept at a minimum. | |
212 | */ | |
213 | #define EVENT_CONSTRAINT_OVERLAP(c, n, m) \ | |
9fac2cf3 | 214 | __EVENT_CONSTRAINT(c, n, m, HWEIGHT(n), 1, 0) |
de0428a7 KW |
215 | |
216 | /* | |
217 | * Constraint on the Event code. | |
218 | */ | |
219 | #define INTEL_EVENT_CONSTRAINT(c, n) \ | |
220 | EVENT_CONSTRAINT(c, n, ARCH_PERFMON_EVENTSEL_EVENT) | |
221 | ||
222 | /* | |
223 | * Constraint on the Event code + UMask + fixed-mask | |
224 | * | |
225 | * filter mask to validate fixed counter events. | |
226 | * the following filters disqualify for fixed counters: | |
227 | * - inv | |
228 | * - edge | |
229 | * - cnt-mask | |
230 | * The other filters are supported by fixed counters. | |
231 | * The any-thread option is supported starting with v3. | |
232 | */ | |
233 | #define FIXED_EVENT_CONSTRAINT(c, n) \ | |
234 | EVENT_CONSTRAINT(c, (1ULL << (32+n)), X86_RAW_EVENT_MASK) | |
235 | ||
236 | /* | |
237 | * Constraint on the Event code + UMask | |
238 | */ | |
239 | #define INTEL_UEVENT_CONSTRAINT(c, n) \ | |
240 | EVENT_CONSTRAINT(c, n, INTEL_ARCH_EVENT_MASK) | |
241 | ||
f20093ee SE |
242 | #define INTEL_PLD_CONSTRAINT(c, n) \ |
243 | __EVENT_CONSTRAINT(c, n, INTEL_ARCH_EVENT_MASK, \ | |
244 | HWEIGHT(n), 0, PERF_X86_EVENT_PEBS_LDLAT) | |
245 | ||
9ad64c0f SE |
246 | #define INTEL_PST_CONSTRAINT(c, n) \ |
247 | __EVENT_CONSTRAINT(c, n, INTEL_ARCH_EVENT_MASK, \ | |
248 | HWEIGHT(n), 0, PERF_X86_EVENT_PEBS_ST) | |
249 | ||
de0428a7 KW |
250 | #define EVENT_CONSTRAINT_END \ |
251 | EVENT_CONSTRAINT(0, 0, 0) | |
252 | ||
253 | #define for_each_event_constraint(e, c) \ | |
254 | for ((e) = (c); (e)->weight; (e)++) | |
255 | ||
256 | /* | |
257 | * Extra registers for specific events. | |
258 | * | |
259 | * Some events need large masks and require external MSRs. | |
260 | * Those extra MSRs end up being shared for all events on | |
261 | * a PMU and sometimes between PMU of sibling HT threads. | |
262 | * In either case, the kernel needs to handle conflicting | |
263 | * accesses to those extra, shared, regs. The data structure | |
264 | * to manage those registers is stored in cpu_hw_event. | |
265 | */ | |
266 | struct extra_reg { | |
267 | unsigned int event; | |
268 | unsigned int msr; | |
269 | u64 config_mask; | |
270 | u64 valid_mask; | |
271 | int idx; /* per_xxx->regs[] reg index */ | |
272 | }; | |
273 | ||
274 | #define EVENT_EXTRA_REG(e, ms, m, vm, i) { \ | |
275 | .event = (e), \ | |
276 | .msr = (ms), \ | |
277 | .config_mask = (m), \ | |
278 | .valid_mask = (vm), \ | |
f20093ee | 279 | .idx = EXTRA_REG_##i, \ |
de0428a7 KW |
280 | } |
281 | ||
282 | #define INTEL_EVENT_EXTRA_REG(event, msr, vm, idx) \ | |
283 | EVENT_EXTRA_REG(event, msr, ARCH_PERFMON_EVENTSEL_EVENT, vm, idx) | |
284 | ||
f20093ee SE |
285 | #define INTEL_UEVENT_EXTRA_REG(event, msr, vm, idx) \ |
286 | EVENT_EXTRA_REG(event, msr, ARCH_PERFMON_EVENTSEL_EVENT | \ | |
287 | ARCH_PERFMON_EVENTSEL_UMASK, vm, idx) | |
288 | ||
289 | #define INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(c) \ | |
290 | INTEL_UEVENT_EXTRA_REG(c, \ | |
291 | MSR_PEBS_LD_LAT_THRESHOLD, \ | |
292 | 0xffff, \ | |
293 | LDLAT) | |
294 | ||
de0428a7 KW |
295 | #define EVENT_EXTRA_END EVENT_EXTRA_REG(0, 0, 0, 0, RSP_0) |
296 | ||
297 | union perf_capabilities { | |
298 | struct { | |
299 | u64 lbr_format:6; | |
300 | u64 pebs_trap:1; | |
301 | u64 pebs_arch_reg:1; | |
302 | u64 pebs_format:4; | |
303 | u64 smm_freeze:1; | |
304 | }; | |
305 | u64 capabilities; | |
306 | }; | |
307 | ||
c1d6f42f PZ |
308 | struct x86_pmu_quirk { |
309 | struct x86_pmu_quirk *next; | |
310 | void (*func)(void); | |
311 | }; | |
312 | ||
f9b4eeb8 PZ |
313 | union x86_pmu_config { |
314 | struct { | |
315 | u64 event:8, | |
316 | umask:8, | |
317 | usr:1, | |
318 | os:1, | |
319 | edge:1, | |
320 | pc:1, | |
321 | interrupt:1, | |
322 | __reserved1:1, | |
323 | en:1, | |
324 | inv:1, | |
325 | cmask:8, | |
326 | event2:4, | |
327 | __reserved2:4, | |
328 | go:1, | |
329 | ho:1; | |
330 | } bits; | |
331 | u64 value; | |
332 | }; | |
333 | ||
334 | #define X86_CONFIG(args...) ((union x86_pmu_config){.bits = {args}}).value | |
335 | ||
de0428a7 KW |
336 | /* |
337 | * struct x86_pmu - generic x86 pmu | |
338 | */ | |
339 | struct x86_pmu { | |
340 | /* | |
341 | * Generic x86 PMC bits | |
342 | */ | |
343 | const char *name; | |
344 | int version; | |
345 | int (*handle_irq)(struct pt_regs *); | |
346 | void (*disable_all)(void); | |
347 | void (*enable_all)(int added); | |
348 | void (*enable)(struct perf_event *); | |
349 | void (*disable)(struct perf_event *); | |
350 | int (*hw_config)(struct perf_event *event); | |
351 | int (*schedule_events)(struct cpu_hw_events *cpuc, int n, int *assign); | |
352 | unsigned eventsel; | |
353 | unsigned perfctr; | |
4c1fd17a | 354 | int (*addr_offset)(int index, bool eventsel); |
0fbdad07 | 355 | int (*rdpmc_index)(int index); |
de0428a7 KW |
356 | u64 (*event_map)(int); |
357 | int max_events; | |
358 | int num_counters; | |
359 | int num_counters_fixed; | |
360 | int cntval_bits; | |
361 | u64 cntval_mask; | |
ffb871bc GN |
362 | union { |
363 | unsigned long events_maskl; | |
364 | unsigned long events_mask[BITS_TO_LONGS(ARCH_PERFMON_EVENTS_COUNT)]; | |
365 | }; | |
366 | int events_mask_len; | |
de0428a7 KW |
367 | int apic; |
368 | u64 max_period; | |
369 | struct event_constraint * | |
370 | (*get_event_constraints)(struct cpu_hw_events *cpuc, | |
371 | struct perf_event *event); | |
372 | ||
373 | void (*put_event_constraints)(struct cpu_hw_events *cpuc, | |
374 | struct perf_event *event); | |
375 | struct event_constraint *event_constraints; | |
c1d6f42f | 376 | struct x86_pmu_quirk *quirks; |
de0428a7 KW |
377 | int perfctr_second_write; |
378 | ||
0c9d42ed PZ |
379 | /* |
380 | * sysfs attrs | |
381 | */ | |
382 | int attr_rdpmc; | |
641cc938 | 383 | struct attribute **format_attrs; |
f20093ee | 384 | struct attribute **event_attrs; |
0c9d42ed | 385 | |
a4747393 | 386 | ssize_t (*events_sysfs_show)(char *page, u64 config); |
1a6461b1 | 387 | struct attribute **cpu_events; |
a4747393 | 388 | |
0c9d42ed PZ |
389 | /* |
390 | * CPU Hotplug hooks | |
391 | */ | |
de0428a7 KW |
392 | int (*cpu_prepare)(int cpu); |
393 | void (*cpu_starting)(int cpu); | |
394 | void (*cpu_dying)(int cpu); | |
395 | void (*cpu_dead)(int cpu); | |
c93dc84c PZ |
396 | |
397 | void (*check_microcode)(void); | |
d010b332 | 398 | void (*flush_branch_stack)(void); |
de0428a7 KW |
399 | |
400 | /* | |
401 | * Intel Arch Perfmon v2+ | |
402 | */ | |
403 | u64 intel_ctrl; | |
404 | union perf_capabilities intel_cap; | |
405 | ||
406 | /* | |
407 | * Intel DebugStore bits | |
408 | */ | |
597ed953 | 409 | unsigned int bts :1, |
3e0091e2 PZ |
410 | bts_active :1, |
411 | pebs :1, | |
412 | pebs_active :1, | |
413 | pebs_broken :1; | |
de0428a7 KW |
414 | int pebs_record_size; |
415 | void (*drain_pebs)(struct pt_regs *regs); | |
416 | struct event_constraint *pebs_constraints; | |
0780c927 | 417 | void (*pebs_aliases)(struct perf_event *event); |
70ab7003 | 418 | int max_pebs_events; |
de0428a7 KW |
419 | |
420 | /* | |
421 | * Intel LBR | |
422 | */ | |
423 | unsigned long lbr_tos, lbr_from, lbr_to; /* MSR base regs */ | |
424 | int lbr_nr; /* hardware stack size */ | |
b36817e8 SE |
425 | u64 lbr_sel_mask; /* LBR_SELECT valid bits */ |
426 | const int *lbr_sel_map; /* lbr_select mappings */ | |
de0428a7 KW |
427 | |
428 | /* | |
429 | * Extra registers for events | |
430 | */ | |
431 | struct extra_reg *extra_regs; | |
432 | unsigned int er_flags; | |
144d31e6 GN |
433 | |
434 | /* | |
435 | * Intel host/guest support (KVM) | |
436 | */ | |
437 | struct perf_guest_switch_msr *(*guest_get_msrs)(int *nr); | |
de0428a7 KW |
438 | }; |
439 | ||
c1d6f42f PZ |
440 | #define x86_add_quirk(func_) \ |
441 | do { \ | |
442 | static struct x86_pmu_quirk __quirk __initdata = { \ | |
443 | .func = func_, \ | |
444 | }; \ | |
445 | __quirk.next = x86_pmu.quirks; \ | |
446 | x86_pmu.quirks = &__quirk; \ | |
447 | } while (0) | |
448 | ||
de0428a7 KW |
449 | #define ERF_NO_HT_SHARING 1 |
450 | #define ERF_HAS_RSP_1 2 | |
451 | ||
3a54aaa0 SE |
452 | #define EVENT_VAR(_id) event_attr_##_id |
453 | #define EVENT_PTR(_id) &event_attr_##_id.attr.attr | |
454 | ||
455 | #define EVENT_ATTR(_name, _id) \ | |
456 | static struct perf_pmu_events_attr EVENT_VAR(_id) = { \ | |
457 | .attr = __ATTR(_name, 0444, events_sysfs_show, NULL), \ | |
458 | .id = PERF_COUNT_HW_##_id, \ | |
459 | .event_str = NULL, \ | |
460 | }; | |
461 | ||
462 | #define EVENT_ATTR_STR(_name, v, str) \ | |
463 | static struct perf_pmu_events_attr event_attr_##v = { \ | |
464 | .attr = __ATTR(_name, 0444, events_sysfs_show, NULL), \ | |
465 | .id = 0, \ | |
466 | .event_str = str, \ | |
467 | }; | |
468 | ||
de0428a7 KW |
469 | extern struct x86_pmu x86_pmu __read_mostly; |
470 | ||
471 | DECLARE_PER_CPU(struct cpu_hw_events, cpu_hw_events); | |
472 | ||
473 | int x86_perf_event_set_period(struct perf_event *event); | |
474 | ||
475 | /* | |
476 | * Generalized hw caching related hw_event table, filled | |
477 | * in on a per model basis. A value of 0 means | |
478 | * 'not supported', -1 means 'hw_event makes no sense on | |
479 | * this CPU', any other value means the raw hw_event | |
480 | * ID. | |
481 | */ | |
482 | ||
483 | #define C(x) PERF_COUNT_HW_CACHE_##x | |
484 | ||
485 | extern u64 __read_mostly hw_cache_event_ids | |
486 | [PERF_COUNT_HW_CACHE_MAX] | |
487 | [PERF_COUNT_HW_CACHE_OP_MAX] | |
488 | [PERF_COUNT_HW_CACHE_RESULT_MAX]; | |
489 | extern u64 __read_mostly hw_cache_extra_regs | |
490 | [PERF_COUNT_HW_CACHE_MAX] | |
491 | [PERF_COUNT_HW_CACHE_OP_MAX] | |
492 | [PERF_COUNT_HW_CACHE_RESULT_MAX]; | |
493 | ||
494 | u64 x86_perf_event_update(struct perf_event *event); | |
495 | ||
de0428a7 KW |
496 | static inline unsigned int x86_pmu_config_addr(int index) |
497 | { | |
4c1fd17a JS |
498 | return x86_pmu.eventsel + (x86_pmu.addr_offset ? |
499 | x86_pmu.addr_offset(index, true) : index); | |
de0428a7 KW |
500 | } |
501 | ||
502 | static inline unsigned int x86_pmu_event_addr(int index) | |
503 | { | |
4c1fd17a JS |
504 | return x86_pmu.perfctr + (x86_pmu.addr_offset ? |
505 | x86_pmu.addr_offset(index, false) : index); | |
de0428a7 KW |
506 | } |
507 | ||
0fbdad07 JS |
508 | static inline int x86_pmu_rdpmc_index(int index) |
509 | { | |
510 | return x86_pmu.rdpmc_index ? x86_pmu.rdpmc_index(index) : index; | |
511 | } | |
512 | ||
de0428a7 KW |
513 | int x86_setup_perfctr(struct perf_event *event); |
514 | ||
515 | int x86_pmu_hw_config(struct perf_event *event); | |
516 | ||
517 | void x86_pmu_disable_all(void); | |
518 | ||
519 | static inline void __x86_pmu_enable_event(struct hw_perf_event *hwc, | |
520 | u64 enable_mask) | |
521 | { | |
1018faa6 JR |
522 | u64 disable_mask = __this_cpu_read(cpu_hw_events.perf_ctr_virt_mask); |
523 | ||
de0428a7 KW |
524 | if (hwc->extra_reg.reg) |
525 | wrmsrl(hwc->extra_reg.reg, hwc->extra_reg.config); | |
1018faa6 | 526 | wrmsrl(hwc->config_base, (hwc->config | enable_mask) & ~disable_mask); |
de0428a7 KW |
527 | } |
528 | ||
529 | void x86_pmu_enable_all(int added); | |
530 | ||
4b4969b1 YZ |
531 | int perf_assign_events(struct event_constraint **constraints, int n, |
532 | int wmin, int wmax, int *assign); | |
de0428a7 KW |
533 | int x86_schedule_events(struct cpu_hw_events *cpuc, int n, int *assign); |
534 | ||
535 | void x86_pmu_stop(struct perf_event *event, int flags); | |
536 | ||
537 | static inline void x86_pmu_disable_event(struct perf_event *event) | |
538 | { | |
539 | struct hw_perf_event *hwc = &event->hw; | |
540 | ||
541 | wrmsrl(hwc->config_base, hwc->config); | |
542 | } | |
543 | ||
544 | void x86_pmu_enable_event(struct perf_event *event); | |
545 | ||
546 | int x86_pmu_handle_irq(struct pt_regs *regs); | |
547 | ||
548 | extern struct event_constraint emptyconstraint; | |
549 | ||
550 | extern struct event_constraint unconstrained; | |
551 | ||
3e702ff6 SE |
552 | static inline bool kernel_ip(unsigned long ip) |
553 | { | |
554 | #ifdef CONFIG_X86_32 | |
555 | return ip > PAGE_OFFSET; | |
556 | #else | |
557 | return (long)ip < 0; | |
558 | #endif | |
559 | } | |
560 | ||
d07bdfd3 PZ |
561 | /* |
562 | * Not all PMUs provide the right context information to place the reported IP | |
563 | * into full context. Specifically segment registers are typically not | |
564 | * supplied. | |
565 | * | |
566 | * Assuming the address is a linear address (it is for IBS), we fake the CS and | |
567 | * vm86 mode using the known zero-based code segment and 'fix up' the registers | |
568 | * to reflect this. | |
569 | * | |
570 | * Intel PEBS/LBR appear to typically provide the effective address, nothing | |
571 | * much we can do about that but pray and treat it like a linear address. | |
572 | */ | |
573 | static inline void set_linear_ip(struct pt_regs *regs, unsigned long ip) | |
574 | { | |
575 | regs->cs = kernel_ip(ip) ? __KERNEL_CS : __USER_CS; | |
576 | if (regs->flags & X86_VM_MASK) | |
577 | regs->flags ^= (PERF_EFLAGS_VM | X86_VM_MASK); | |
578 | regs->ip = ip; | |
579 | } | |
580 | ||
0bf79d44 | 581 | ssize_t x86_event_sysfs_show(char *page, u64 config, u64 event); |
20550a43 | 582 | ssize_t intel_event_sysfs_show(char *page, u64 config); |
43c032fe | 583 | |
de0428a7 KW |
584 | #ifdef CONFIG_CPU_SUP_AMD |
585 | ||
586 | int amd_pmu_init(void); | |
587 | ||
588 | #else /* CONFIG_CPU_SUP_AMD */ | |
589 | ||
590 | static inline int amd_pmu_init(void) | |
591 | { | |
592 | return 0; | |
593 | } | |
594 | ||
595 | #endif /* CONFIG_CPU_SUP_AMD */ | |
596 | ||
597 | #ifdef CONFIG_CPU_SUP_INTEL | |
598 | ||
599 | int intel_pmu_save_and_restart(struct perf_event *event); | |
600 | ||
601 | struct event_constraint * | |
602 | x86_get_event_constraints(struct cpu_hw_events *cpuc, struct perf_event *event); | |
603 | ||
604 | struct intel_shared_regs *allocate_shared_regs(int cpu); | |
605 | ||
606 | int intel_pmu_init(void); | |
607 | ||
608 | void init_debug_store_on_cpu(int cpu); | |
609 | ||
610 | void fini_debug_store_on_cpu(int cpu); | |
611 | ||
612 | void release_ds_buffers(void); | |
613 | ||
614 | void reserve_ds_buffers(void); | |
615 | ||
616 | extern struct event_constraint bts_constraint; | |
617 | ||
618 | void intel_pmu_enable_bts(u64 config); | |
619 | ||
620 | void intel_pmu_disable_bts(void); | |
621 | ||
622 | int intel_pmu_drain_bts_buffer(void); | |
623 | ||
624 | extern struct event_constraint intel_core2_pebs_event_constraints[]; | |
625 | ||
626 | extern struct event_constraint intel_atom_pebs_event_constraints[]; | |
627 | ||
628 | extern struct event_constraint intel_nehalem_pebs_event_constraints[]; | |
629 | ||
630 | extern struct event_constraint intel_westmere_pebs_event_constraints[]; | |
631 | ||
632 | extern struct event_constraint intel_snb_pebs_event_constraints[]; | |
633 | ||
20a36e39 SE |
634 | extern struct event_constraint intel_ivb_pebs_event_constraints[]; |
635 | ||
de0428a7 KW |
636 | struct event_constraint *intel_pebs_constraints(struct perf_event *event); |
637 | ||
638 | void intel_pmu_pebs_enable(struct perf_event *event); | |
639 | ||
640 | void intel_pmu_pebs_disable(struct perf_event *event); | |
641 | ||
642 | void intel_pmu_pebs_enable_all(void); | |
643 | ||
644 | void intel_pmu_pebs_disable_all(void); | |
645 | ||
646 | void intel_ds_init(void); | |
647 | ||
648 | void intel_pmu_lbr_reset(void); | |
649 | ||
650 | void intel_pmu_lbr_enable(struct perf_event *event); | |
651 | ||
652 | void intel_pmu_lbr_disable(struct perf_event *event); | |
653 | ||
654 | void intel_pmu_lbr_enable_all(void); | |
655 | ||
656 | void intel_pmu_lbr_disable_all(void); | |
657 | ||
658 | void intel_pmu_lbr_read(void); | |
659 | ||
660 | void intel_pmu_lbr_init_core(void); | |
661 | ||
662 | void intel_pmu_lbr_init_nhm(void); | |
663 | ||
664 | void intel_pmu_lbr_init_atom(void); | |
665 | ||
c5cc2cd9 SE |
666 | void intel_pmu_lbr_init_snb(void); |
667 | ||
60ce0fbd SE |
668 | int intel_pmu_setup_lbr_filter(struct perf_event *event); |
669 | ||
de0428a7 KW |
670 | int p4_pmu_init(void); |
671 | ||
672 | int p6_pmu_init(void); | |
673 | ||
e717bf4e VW |
674 | int knc_pmu_init(void); |
675 | ||
f20093ee SE |
676 | ssize_t events_sysfs_show(struct device *dev, struct device_attribute *attr, |
677 | char *page); | |
678 | ||
de0428a7 KW |
679 | #else /* CONFIG_CPU_SUP_INTEL */ |
680 | ||
681 | static inline void reserve_ds_buffers(void) | |
682 | { | |
683 | } | |
684 | ||
685 | static inline void release_ds_buffers(void) | |
686 | { | |
687 | } | |
688 | ||
689 | static inline int intel_pmu_init(void) | |
690 | { | |
691 | return 0; | |
692 | } | |
693 | ||
694 | static inline struct intel_shared_regs *allocate_shared_regs(int cpu) | |
695 | { | |
696 | return NULL; | |
697 | } | |
698 | ||
699 | #endif /* CONFIG_CPU_SUP_INTEL */ |