Commit | Line | Data |
---|---|---|
979f8671 MC |
1 | /* |
2 | * Hardware performance events for the Alpha. | |
3 | * | |
4 | * We implement HW counts on the EV67 and subsequent CPUs only. | |
5 | * | |
6 | * (C) 2010 Michael J. Cree | |
7 | * | |
8 | * Somewhat based on the Sparc code, and to a lesser extent the PowerPC and | |
9 | * ARM code, which are copyright by their respective authors. | |
10 | */ | |
11 | ||
12 | #include <linux/perf_event.h> | |
13 | #include <linux/kprobes.h> | |
14 | #include <linux/kernel.h> | |
15 | #include <linux/kdebug.h> | |
16 | #include <linux/mutex.h> | |
004417a6 | 17 | #include <linux/init.h> |
979f8671 MC |
18 | |
19 | #include <asm/hwrpb.h> | |
20 | #include <asm/atomic.h> | |
21 | #include <asm/irq.h> | |
22 | #include <asm/irq_regs.h> | |
23 | #include <asm/pal.h> | |
24 | #include <asm/wrperfmon.h> | |
25 | #include <asm/hw_irq.h> | |
26 | ||
27 | ||
28 | /* The maximum number of PMCs on any Alpha CPU whatsoever. */ | |
29 | #define MAX_HWEVENTS 3 | |
30 | #define PMC_NO_INDEX -1 | |
31 | ||
32 | /* For tracking PMCs and the hw events they monitor on each CPU. */ | |
33 | struct cpu_hw_events { | |
34 | int enabled; | |
35 | /* Number of events scheduled; also number entries valid in arrays below. */ | |
36 | int n_events; | |
37 | /* Number events added since last hw_perf_disable(). */ | |
38 | int n_added; | |
39 | /* Events currently scheduled. */ | |
40 | struct perf_event *event[MAX_HWEVENTS]; | |
41 | /* Event type of each scheduled event. */ | |
42 | unsigned long evtype[MAX_HWEVENTS]; | |
43 | /* Current index of each scheduled event; if not yet determined | |
44 | * contains PMC_NO_INDEX. | |
45 | */ | |
46 | int current_idx[MAX_HWEVENTS]; | |
47 | /* The active PMCs' config for easy use with wrperfmon(). */ | |
48 | unsigned long config; | |
49 | /* The active counters' indices for easy use with wrperfmon(). */ | |
50 | unsigned long idx_mask; | |
51 | }; | |
52 | DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events); | |
53 | ||
54 | ||
55 | ||
56 | /* | |
57 | * A structure to hold the description of the PMCs available on a particular | |
58 | * type of Alpha CPU. | |
59 | */ | |
60 | struct alpha_pmu_t { | |
61 | /* Mapping of the perf system hw event types to indigenous event types */ | |
62 | const int *event_map; | |
63 | /* The number of entries in the event_map */ | |
64 | int max_events; | |
65 | /* The number of PMCs on this Alpha */ | |
66 | int num_pmcs; | |
67 | /* | |
68 | * All PMC counters reside in the IBOX register PCTR. This is the | |
69 | * LSB of the counter. | |
70 | */ | |
71 | int pmc_count_shift[MAX_HWEVENTS]; | |
72 | /* | |
73 | * The mask that isolates the PMC bits when the LSB of the counter | |
74 | * is shifted to bit 0. | |
75 | */ | |
76 | unsigned long pmc_count_mask[MAX_HWEVENTS]; | |
77 | /* The maximum period the PMC can count. */ | |
78 | unsigned long pmc_max_period[MAX_HWEVENTS]; | |
79 | /* | |
80 | * The maximum value that may be written to the counter due to | |
81 | * hardware restrictions is pmc_max_period - pmc_left. | |
82 | */ | |
83 | long pmc_left[3]; | |
84 | /* Subroutine for allocation of PMCs. Enforces constraints. */ | |
85 | int (*check_constraints)(struct perf_event **, unsigned long *, int); | |
86 | }; | |
87 | ||
88 | /* | |
89 | * The Alpha CPU PMU description currently in operation. This is set during | |
90 | * the boot process to the specific CPU of the machine. | |
91 | */ | |
92 | static const struct alpha_pmu_t *alpha_pmu; | |
93 | ||
94 | ||
95 | #define HW_OP_UNSUPPORTED -1 | |
96 | ||
97 | /* | |
98 | * The hardware description of the EV67, EV68, EV69, EV7 and EV79 PMUs | |
99 | * follow. Since they are identical we refer to them collectively as the | |
100 | * EV67 henceforth. | |
101 | */ | |
102 | ||
103 | /* | |
104 | * EV67 PMC event types | |
105 | * | |
106 | * There is no one-to-one mapping of the possible hw event types to the | |
107 | * actual codes that are used to program the PMCs hence we introduce our | |
108 | * own hw event type identifiers. | |
109 | */ | |
110 | enum ev67_pmc_event_type { | |
111 | EV67_CYCLES = 1, | |
112 | EV67_INSTRUCTIONS, | |
113 | EV67_BCACHEMISS, | |
114 | EV67_MBOXREPLAY, | |
115 | EV67_LAST_ET | |
116 | }; | |
117 | #define EV67_NUM_EVENT_TYPES (EV67_LAST_ET-EV67_CYCLES) | |
118 | ||
119 | ||
120 | /* Mapping of the hw event types to the perf tool interface */ | |
121 | static const int ev67_perfmon_event_map[] = { | |
122 | [PERF_COUNT_HW_CPU_CYCLES] = EV67_CYCLES, | |
123 | [PERF_COUNT_HW_INSTRUCTIONS] = EV67_INSTRUCTIONS, | |
124 | [PERF_COUNT_HW_CACHE_REFERENCES] = HW_OP_UNSUPPORTED, | |
125 | [PERF_COUNT_HW_CACHE_MISSES] = EV67_BCACHEMISS, | |
126 | }; | |
127 | ||
128 | struct ev67_mapping_t { | |
129 | int config; | |
130 | int idx; | |
131 | }; | |
132 | ||
133 | /* | |
134 | * The mapping used for one event only - these must be in same order as enum | |
135 | * ev67_pmc_event_type definition. | |
136 | */ | |
137 | static const struct ev67_mapping_t ev67_mapping[] = { | |
138 | {EV67_PCTR_INSTR_CYCLES, 1}, /* EV67_CYCLES, */ | |
139 | {EV67_PCTR_INSTR_CYCLES, 0}, /* EV67_INSTRUCTIONS */ | |
140 | {EV67_PCTR_INSTR_BCACHEMISS, 1}, /* EV67_BCACHEMISS */ | |
141 | {EV67_PCTR_CYCLES_MBOX, 1} /* EV67_MBOXREPLAY */ | |
142 | }; | |
143 | ||
144 | ||
145 | /* | |
146 | * Check that a group of events can be simultaneously scheduled on to the | |
147 | * EV67 PMU. Also allocate counter indices and config. | |
148 | */ | |
149 | static int ev67_check_constraints(struct perf_event **event, | |
150 | unsigned long *evtype, int n_ev) | |
151 | { | |
152 | int idx0; | |
153 | unsigned long config; | |
154 | ||
155 | idx0 = ev67_mapping[evtype[0]-1].idx; | |
156 | config = ev67_mapping[evtype[0]-1].config; | |
157 | if (n_ev == 1) | |
158 | goto success; | |
159 | ||
160 | BUG_ON(n_ev != 2); | |
161 | ||
162 | if (evtype[0] == EV67_MBOXREPLAY || evtype[1] == EV67_MBOXREPLAY) { | |
163 | /* MBOX replay traps must be on PMC 1 */ | |
164 | idx0 = (evtype[0] == EV67_MBOXREPLAY) ? 1 : 0; | |
165 | /* Only cycles can accompany MBOX replay traps */ | |
166 | if (evtype[idx0] == EV67_CYCLES) { | |
167 | config = EV67_PCTR_CYCLES_MBOX; | |
168 | goto success; | |
169 | } | |
170 | } | |
171 | ||
172 | if (evtype[0] == EV67_BCACHEMISS || evtype[1] == EV67_BCACHEMISS) { | |
173 | /* Bcache misses must be on PMC 1 */ | |
174 | idx0 = (evtype[0] == EV67_BCACHEMISS) ? 1 : 0; | |
175 | /* Only instructions can accompany Bcache misses */ | |
176 | if (evtype[idx0] == EV67_INSTRUCTIONS) { | |
177 | config = EV67_PCTR_INSTR_BCACHEMISS; | |
178 | goto success; | |
179 | } | |
180 | } | |
181 | ||
182 | if (evtype[0] == EV67_INSTRUCTIONS || evtype[1] == EV67_INSTRUCTIONS) { | |
183 | /* Instructions must be on PMC 0 */ | |
184 | idx0 = (evtype[0] == EV67_INSTRUCTIONS) ? 0 : 1; | |
185 | /* By this point only cycles can accompany instructions */ | |
186 | if (evtype[idx0^1] == EV67_CYCLES) { | |
187 | config = EV67_PCTR_INSTR_CYCLES; | |
188 | goto success; | |
189 | } | |
190 | } | |
191 | ||
192 | /* Otherwise, darn it, there is a conflict. */ | |
193 | return -1; | |
194 | ||
195 | success: | |
196 | event[0]->hw.idx = idx0; | |
197 | event[0]->hw.config_base = config; | |
198 | if (n_ev == 2) { | |
199 | event[1]->hw.idx = idx0 ^ 1; | |
200 | event[1]->hw.config_base = config; | |
201 | } | |
202 | return 0; | |
203 | } | |
204 | ||
205 | ||
206 | static const struct alpha_pmu_t ev67_pmu = { | |
207 | .event_map = ev67_perfmon_event_map, | |
208 | .max_events = ARRAY_SIZE(ev67_perfmon_event_map), | |
209 | .num_pmcs = 2, | |
210 | .pmc_count_shift = {EV67_PCTR_0_COUNT_SHIFT, EV67_PCTR_1_COUNT_SHIFT, 0}, | |
211 | .pmc_count_mask = {EV67_PCTR_0_COUNT_MASK, EV67_PCTR_1_COUNT_MASK, 0}, | |
212 | .pmc_max_period = {(1UL<<20) - 1, (1UL<<20) - 1, 0}, | |
213 | .pmc_left = {16, 4, 0}, | |
214 | .check_constraints = ev67_check_constraints | |
215 | }; | |
216 | ||
217 | ||
218 | ||
219 | /* | |
220 | * Helper routines to ensure that we read/write only the correct PMC bits | |
221 | * when calling the wrperfmon PALcall. | |
222 | */ | |
223 | static inline void alpha_write_pmc(int idx, unsigned long val) | |
224 | { | |
225 | val &= alpha_pmu->pmc_count_mask[idx]; | |
226 | val <<= alpha_pmu->pmc_count_shift[idx]; | |
227 | val |= (1<<idx); | |
228 | wrperfmon(PERFMON_CMD_WRITE, val); | |
229 | } | |
230 | ||
231 | static inline unsigned long alpha_read_pmc(int idx) | |
232 | { | |
233 | unsigned long val; | |
234 | ||
235 | val = wrperfmon(PERFMON_CMD_READ, 0); | |
236 | val >>= alpha_pmu->pmc_count_shift[idx]; | |
237 | val &= alpha_pmu->pmc_count_mask[idx]; | |
238 | return val; | |
239 | } | |
240 | ||
241 | /* Set a new period to sample over */ | |
242 | static int alpha_perf_event_set_period(struct perf_event *event, | |
243 | struct hw_perf_event *hwc, int idx) | |
244 | { | |
7b598cdd | 245 | long left = local64_read(&hwc->period_left); |
979f8671 MC |
246 | long period = hwc->sample_period; |
247 | int ret = 0; | |
248 | ||
249 | if (unlikely(left <= -period)) { | |
250 | left = period; | |
7b598cdd | 251 | local64_set(&hwc->period_left, left); |
979f8671 MC |
252 | hwc->last_period = period; |
253 | ret = 1; | |
254 | } | |
255 | ||
256 | if (unlikely(left <= 0)) { | |
257 | left += period; | |
7b598cdd | 258 | local64_set(&hwc->period_left, left); |
979f8671 MC |
259 | hwc->last_period = period; |
260 | ret = 1; | |
261 | } | |
262 | ||
263 | /* | |
264 | * Hardware restrictions require that the counters must not be | |
265 | * written with values that are too close to the maximum period. | |
266 | */ | |
267 | if (unlikely(left < alpha_pmu->pmc_left[idx])) | |
268 | left = alpha_pmu->pmc_left[idx]; | |
269 | ||
270 | if (left > (long)alpha_pmu->pmc_max_period[idx]) | |
271 | left = alpha_pmu->pmc_max_period[idx]; | |
272 | ||
7b598cdd | 273 | local64_set(&hwc->prev_count, (unsigned long)(-left)); |
979f8671 MC |
274 | |
275 | alpha_write_pmc(idx, (unsigned long)(-left)); | |
276 | ||
277 | perf_event_update_userpage(event); | |
278 | ||
279 | return ret; | |
280 | } | |
281 | ||
282 | ||
283 | /* | |
284 | * Calculates the count (the 'delta') since the last time the PMC was read. | |
285 | * | |
286 | * As the PMCs' full period can easily be exceeded within the perf system | |
287 | * sampling period we cannot use any high order bits as a guard bit in the | |
288 | * PMCs to detect overflow as is done by other architectures. The code here | |
289 | * calculates the delta on the basis that there is no overflow when ovf is | |
290 | * zero. The value passed via ovf by the interrupt handler corrects for | |
291 | * overflow. | |
292 | * | |
293 | * This can be racey on rare occasions -- a call to this routine can occur | |
294 | * with an overflowed counter just before the PMI service routine is called. | |
295 | * The check for delta negative hopefully always rectifies this situation. | |
296 | */ | |
297 | static unsigned long alpha_perf_event_update(struct perf_event *event, | |
298 | struct hw_perf_event *hwc, int idx, long ovf) | |
299 | { | |
300 | long prev_raw_count, new_raw_count; | |
301 | long delta; | |
302 | ||
303 | again: | |
7b598cdd | 304 | prev_raw_count = local64_read(&hwc->prev_count); |
979f8671 MC |
305 | new_raw_count = alpha_read_pmc(idx); |
306 | ||
7b598cdd | 307 | if (local64_cmpxchg(&hwc->prev_count, prev_raw_count, |
979f8671 MC |
308 | new_raw_count) != prev_raw_count) |
309 | goto again; | |
310 | ||
a4eaf7f1 | 311 | delta = (new_raw_count - (prev_raw_count & alpha_pmu->pmc_count_mask[idx])) + ovf; |
979f8671 MC |
312 | |
313 | /* It is possible on very rare occasions that the PMC has overflowed | |
314 | * but the interrupt is yet to come. Detect and fix this situation. | |
315 | */ | |
316 | if (unlikely(delta < 0)) { | |
317 | delta += alpha_pmu->pmc_max_period[idx] + 1; | |
318 | } | |
319 | ||
7b598cdd MC |
320 | local64_add(delta, &event->count); |
321 | local64_sub(delta, &hwc->period_left); | |
979f8671 MC |
322 | |
323 | return new_raw_count; | |
324 | } | |
325 | ||
326 | ||
327 | /* | |
328 | * Collect all HW events into the array event[]. | |
329 | */ | |
330 | static int collect_events(struct perf_event *group, int max_count, | |
331 | struct perf_event *event[], unsigned long *evtype, | |
332 | int *current_idx) | |
333 | { | |
334 | struct perf_event *pe; | |
335 | int n = 0; | |
336 | ||
337 | if (!is_software_event(group)) { | |
338 | if (n >= max_count) | |
339 | return -1; | |
340 | event[n] = group; | |
341 | evtype[n] = group->hw.event_base; | |
342 | current_idx[n++] = PMC_NO_INDEX; | |
343 | } | |
344 | list_for_each_entry(pe, &group->sibling_list, group_entry) { | |
345 | if (!is_software_event(pe) && pe->state != PERF_EVENT_STATE_OFF) { | |
346 | if (n >= max_count) | |
347 | return -1; | |
348 | event[n] = pe; | |
349 | evtype[n] = pe->hw.event_base; | |
350 | current_idx[n++] = PMC_NO_INDEX; | |
351 | } | |
352 | } | |
353 | return n; | |
354 | } | |
355 | ||
356 | ||
357 | ||
358 | /* | |
359 | * Check that a group of events can be simultaneously scheduled on to the PMU. | |
360 | */ | |
361 | static int alpha_check_constraints(struct perf_event **events, | |
362 | unsigned long *evtypes, int n_ev) | |
363 | { | |
364 | ||
365 | /* No HW events is possible from hw_perf_group_sched_in(). */ | |
366 | if (n_ev == 0) | |
367 | return 0; | |
368 | ||
369 | if (n_ev > alpha_pmu->num_pmcs) | |
370 | return -1; | |
371 | ||
372 | return alpha_pmu->check_constraints(events, evtypes, n_ev); | |
373 | } | |
374 | ||
375 | ||
376 | /* | |
377 | * If new events have been scheduled then update cpuc with the new | |
378 | * configuration. This may involve shifting cycle counts from one PMC to | |
379 | * another. | |
380 | */ | |
381 | static void maybe_change_configuration(struct cpu_hw_events *cpuc) | |
382 | { | |
383 | int j; | |
384 | ||
385 | if (cpuc->n_added == 0) | |
386 | return; | |
387 | ||
388 | /* Find counters that are moving to another PMC and update */ | |
389 | for (j = 0; j < cpuc->n_events; j++) { | |
390 | struct perf_event *pe = cpuc->event[j]; | |
391 | ||
392 | if (cpuc->current_idx[j] != PMC_NO_INDEX && | |
393 | cpuc->current_idx[j] != pe->hw.idx) { | |
394 | alpha_perf_event_update(pe, &pe->hw, cpuc->current_idx[j], 0); | |
395 | cpuc->current_idx[j] = PMC_NO_INDEX; | |
396 | } | |
397 | } | |
398 | ||
399 | /* Assign to counters all unassigned events. */ | |
400 | cpuc->idx_mask = 0; | |
401 | for (j = 0; j < cpuc->n_events; j++) { | |
402 | struct perf_event *pe = cpuc->event[j]; | |
403 | struct hw_perf_event *hwc = &pe->hw; | |
404 | int idx = hwc->idx; | |
405 | ||
a4eaf7f1 PZ |
406 | if (cpuc->current_idx[j] == PMC_NO_INDEX) { |
407 | alpha_perf_event_set_period(pe, hwc, idx); | |
408 | cpuc->current_idx[j] = idx; | |
979f8671 MC |
409 | } |
410 | ||
a4eaf7f1 PZ |
411 | if (!(hwc->state & PERF_HES_STOPPED)) |
412 | cpuc->idx_mask |= (1<<cpuc->current_idx[j]); | |
979f8671 MC |
413 | } |
414 | cpuc->config = cpuc->event[0]->hw.config_base; | |
415 | } | |
416 | ||
417 | ||
418 | ||
419 | /* Schedule perf HW event on to PMU. | |
420 | * - this function is called from outside this module via the pmu struct | |
421 | * returned from perf event initialisation. | |
422 | */ | |
a4eaf7f1 | 423 | static int alpha_pmu_add(struct perf_event *event, int flags) |
979f8671 MC |
424 | { |
425 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | |
65175c07 | 426 | struct hw_perf_event *hwc = &event->hw; |
979f8671 MC |
427 | int n0; |
428 | int ret; | |
65175c07 | 429 | unsigned long irq_flags; |
979f8671 MC |
430 | |
431 | /* | |
432 | * The Sparc code has the IRQ disable first followed by the perf | |
433 | * disable, however this can lead to an overflowed counter with the | |
434 | * PMI disabled on rare occasions. The alpha_perf_event_update() | |
435 | * routine should detect this situation by noting a negative delta, | |
436 | * nevertheless we disable the PMCs first to enable a potential | |
437 | * final PMI to occur before we disable interrupts. | |
438 | */ | |
33696fc0 | 439 | perf_pmu_disable(event->pmu); |
65175c07 | 440 | local_irq_save(irq_flags); |
979f8671 MC |
441 | |
442 | /* Default to error to be returned */ | |
443 | ret = -EAGAIN; | |
444 | ||
445 | /* Insert event on to PMU and if successful modify ret to valid return */ | |
446 | n0 = cpuc->n_events; | |
447 | if (n0 < alpha_pmu->num_pmcs) { | |
448 | cpuc->event[n0] = event; | |
449 | cpuc->evtype[n0] = event->hw.event_base; | |
450 | cpuc->current_idx[n0] = PMC_NO_INDEX; | |
451 | ||
452 | if (!alpha_check_constraints(cpuc->event, cpuc->evtype, n0+1)) { | |
453 | cpuc->n_events++; | |
454 | cpuc->n_added++; | |
455 | ret = 0; | |
456 | } | |
457 | } | |
458 | ||
a4eaf7f1 PZ |
459 | hwc->state = PERF_HES_UPTODATE; |
460 | if (!(flags & PERF_EF_START)) | |
461 | hwc->state |= PERF_HES_STOPPED; | |
462 | ||
65175c07 | 463 | local_irq_restore(irq_flags); |
33696fc0 | 464 | perf_pmu_enable(event->pmu); |
979f8671 MC |
465 | |
466 | return ret; | |
467 | } | |
468 | ||
469 | ||
470 | ||
471 | /* Disable performance monitoring unit | |
472 | * - this function is called from outside this module via the pmu struct | |
473 | * returned from perf event initialisation. | |
474 | */ | |
a4eaf7f1 | 475 | static void alpha_pmu_del(struct perf_event *event, int flags) |
979f8671 MC |
476 | { |
477 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | |
478 | struct hw_perf_event *hwc = &event->hw; | |
65175c07 | 479 | unsigned long irq_flags; |
979f8671 MC |
480 | int j; |
481 | ||
33696fc0 | 482 | perf_pmu_disable(event->pmu); |
65175c07 | 483 | local_irq_save(irq_flags); |
979f8671 MC |
484 | |
485 | for (j = 0; j < cpuc->n_events; j++) { | |
486 | if (event == cpuc->event[j]) { | |
487 | int idx = cpuc->current_idx[j]; | |
488 | ||
489 | /* Shift remaining entries down into the existing | |
490 | * slot. | |
491 | */ | |
492 | while (++j < cpuc->n_events) { | |
493 | cpuc->event[j - 1] = cpuc->event[j]; | |
494 | cpuc->evtype[j - 1] = cpuc->evtype[j]; | |
495 | cpuc->current_idx[j - 1] = | |
496 | cpuc->current_idx[j]; | |
497 | } | |
498 | ||
499 | /* Absorb the final count and turn off the event. */ | |
500 | alpha_perf_event_update(event, hwc, idx, 0); | |
501 | perf_event_update_userpage(event); | |
502 | ||
503 | cpuc->idx_mask &= ~(1UL<<idx); | |
504 | cpuc->n_events--; | |
505 | break; | |
506 | } | |
507 | } | |
508 | ||
65175c07 | 509 | local_irq_restore(irq_flags); |
33696fc0 | 510 | perf_pmu_enable(event->pmu); |
979f8671 MC |
511 | } |
512 | ||
513 | ||
514 | static void alpha_pmu_read(struct perf_event *event) | |
515 | { | |
516 | struct hw_perf_event *hwc = &event->hw; | |
517 | ||
518 | alpha_perf_event_update(event, hwc, hwc->idx, 0); | |
519 | } | |
520 | ||
521 | ||
a4eaf7f1 | 522 | static void alpha_pmu_stop(struct perf_event *event, int flags) |
979f8671 MC |
523 | { |
524 | struct hw_perf_event *hwc = &event->hw; | |
525 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | |
526 | ||
a4eaf7f1 | 527 | if (!(hwc->state & PERF_HES_STOPPED)) { |
65175c07 | 528 | cpuc->idx_mask &= ~(1UL<<hwc->idx); |
a4eaf7f1 PZ |
529 | hwc->state |= PERF_HES_STOPPED; |
530 | } | |
531 | ||
532 | if ((flags & PERF_EF_UPDATE) && !(hwc->state & PERF_HES_UPTODATE)) { | |
533 | alpha_perf_event_update(event, hwc, hwc->idx, 0); | |
534 | hwc->state |= PERF_HES_UPTODATE; | |
535 | } | |
536 | ||
537 | if (cpuc->enabled) | |
65175c07 | 538 | wrperfmon(PERFMON_CMD_DISABLE, (1UL<<hwc->idx)); |
a4eaf7f1 PZ |
539 | } |
540 | ||
541 | ||
542 | static void alpha_pmu_start(struct perf_event *event, int flags) | |
543 | { | |
544 | struct hw_perf_event *hwc = &event->hw; | |
545 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | |
546 | ||
547 | if (WARN_ON_ONCE(!(hwc->state & PERF_HES_STOPPED))) | |
548 | return; | |
549 | ||
550 | if (flags & PERF_EF_RELOAD) { | |
551 | WARN_ON_ONCE(!(hwc->state & PERF_HES_UPTODATE)); | |
552 | alpha_perf_event_set_period(event, hwc, hwc->idx); | |
553 | } | |
554 | ||
555 | hwc->state = 0; | |
556 | ||
979f8671 | 557 | cpuc->idx_mask |= 1UL<<hwc->idx; |
a4eaf7f1 PZ |
558 | if (cpuc->enabled) |
559 | wrperfmon(PERFMON_CMD_ENABLE, (1UL<<hwc->idx)); | |
979f8671 MC |
560 | } |
561 | ||
562 | ||
563 | /* | |
564 | * Check that CPU performance counters are supported. | |
565 | * - currently support EV67 and later CPUs. | |
566 | * - actually some later revisions of the EV6 have the same PMC model as the | |
567 | * EV67 but we don't do suffiently deep CPU detection to detect them. | |
568 | * Bad luck to the very few people who might have one, I guess. | |
569 | */ | |
570 | static int supported_cpu(void) | |
571 | { | |
572 | struct percpu_struct *cpu; | |
573 | unsigned long cputype; | |
574 | ||
575 | /* Get cpu type from HW */ | |
576 | cpu = (struct percpu_struct *)((char *)hwrpb + hwrpb->processor_offset); | |
577 | cputype = cpu->type & 0xffffffff; | |
578 | /* Include all of EV67, EV68, EV7, EV79 and EV69 as supported. */ | |
579 | return (cputype >= EV67_CPU) && (cputype <= EV69_CPU); | |
580 | } | |
581 | ||
582 | ||
583 | ||
584 | static void hw_perf_event_destroy(struct perf_event *event) | |
585 | { | |
586 | /* Nothing to be done! */ | |
587 | return; | |
588 | } | |
589 | ||
590 | ||
591 | ||
592 | static int __hw_perf_event_init(struct perf_event *event) | |
593 | { | |
594 | struct perf_event_attr *attr = &event->attr; | |
595 | struct hw_perf_event *hwc = &event->hw; | |
596 | struct perf_event *evts[MAX_HWEVENTS]; | |
597 | unsigned long evtypes[MAX_HWEVENTS]; | |
598 | int idx_rubbish_bin[MAX_HWEVENTS]; | |
599 | int ev; | |
600 | int n; | |
601 | ||
602 | /* We only support a limited range of HARDWARE event types with one | |
603 | * only programmable via a RAW event type. | |
604 | */ | |
605 | if (attr->type == PERF_TYPE_HARDWARE) { | |
606 | if (attr->config >= alpha_pmu->max_events) | |
607 | return -EINVAL; | |
608 | ev = alpha_pmu->event_map[attr->config]; | |
609 | } else if (attr->type == PERF_TYPE_HW_CACHE) { | |
610 | return -EOPNOTSUPP; | |
611 | } else if (attr->type == PERF_TYPE_RAW) { | |
612 | ev = attr->config & 0xff; | |
613 | } else { | |
614 | return -EOPNOTSUPP; | |
615 | } | |
616 | ||
617 | if (ev < 0) { | |
618 | return ev; | |
619 | } | |
620 | ||
621 | /* The EV67 does not support mode exclusion */ | |
622 | if (attr->exclude_kernel || attr->exclude_user | |
623 | || attr->exclude_hv || attr->exclude_idle) { | |
624 | return -EPERM; | |
625 | } | |
626 | ||
627 | /* | |
628 | * We place the event type in event_base here and leave calculation | |
629 | * of the codes to programme the PMU for alpha_pmu_enable() because | |
630 | * it is only then we will know what HW events are actually | |
631 | * scheduled on to the PMU. At that point the code to programme the | |
632 | * PMU is put into config_base and the PMC to use is placed into | |
633 | * idx. We initialise idx (below) to PMC_NO_INDEX to indicate that | |
634 | * it is yet to be determined. | |
635 | */ | |
636 | hwc->event_base = ev; | |
637 | ||
638 | /* Collect events in a group together suitable for calling | |
639 | * alpha_check_constraints() to verify that the group as a whole can | |
640 | * be scheduled on to the PMU. | |
641 | */ | |
642 | n = 0; | |
643 | if (event->group_leader != event) { | |
644 | n = collect_events(event->group_leader, | |
645 | alpha_pmu->num_pmcs - 1, | |
646 | evts, evtypes, idx_rubbish_bin); | |
647 | if (n < 0) | |
648 | return -EINVAL; | |
649 | } | |
650 | evtypes[n] = hwc->event_base; | |
651 | evts[n] = event; | |
652 | ||
653 | if (alpha_check_constraints(evts, evtypes, n + 1)) | |
654 | return -EINVAL; | |
655 | ||
656 | /* Indicate that PMU config and idx are yet to be determined. */ | |
657 | hwc->config_base = 0; | |
658 | hwc->idx = PMC_NO_INDEX; | |
659 | ||
660 | event->destroy = hw_perf_event_destroy; | |
661 | ||
662 | /* | |
663 | * Most architectures reserve the PMU for their use at this point. | |
664 | * As there is no existing mechanism to arbitrate usage and there | |
665 | * appears to be no other user of the Alpha PMU we just assume | |
666 | * that we can just use it, hence a NO-OP here. | |
667 | * | |
668 | * Maybe an alpha_reserve_pmu() routine should be implemented but is | |
669 | * anything else ever going to use it? | |
670 | */ | |
671 | ||
672 | if (!hwc->sample_period) { | |
673 | hwc->sample_period = alpha_pmu->pmc_max_period[0]; | |
674 | hwc->last_period = hwc->sample_period; | |
7b598cdd | 675 | local64_set(&hwc->period_left, hwc->sample_period); |
979f8671 MC |
676 | } |
677 | ||
678 | return 0; | |
679 | } | |
680 | ||
979f8671 MC |
681 | /* |
682 | * Main entry point to initialise a HW performance event. | |
683 | */ | |
b0a873eb | 684 | static int alpha_pmu_event_init(struct perf_event *event) |
979f8671 MC |
685 | { |
686 | int err; | |
687 | ||
b0a873eb PZ |
688 | switch (event->attr.type) { |
689 | case PERF_TYPE_RAW: | |
690 | case PERF_TYPE_HARDWARE: | |
691 | case PERF_TYPE_HW_CACHE: | |
692 | break; | |
693 | ||
694 | default: | |
695 | return -ENOENT; | |
696 | } | |
697 | ||
979f8671 | 698 | if (!alpha_pmu) |
b0a873eb | 699 | return -ENODEV; |
979f8671 MC |
700 | |
701 | /* Do the real initialisation work. */ | |
702 | err = __hw_perf_event_init(event); | |
703 | ||
b0a873eb | 704 | return err; |
979f8671 MC |
705 | } |
706 | ||
979f8671 MC |
707 | /* |
708 | * Main entry point - enable HW performance counters. | |
709 | */ | |
a4eaf7f1 | 710 | static void alpha_pmu_enable(struct pmu *pmu) |
979f8671 MC |
711 | { |
712 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | |
713 | ||
714 | if (cpuc->enabled) | |
715 | return; | |
716 | ||
717 | cpuc->enabled = 1; | |
718 | barrier(); | |
719 | ||
720 | if (cpuc->n_events > 0) { | |
721 | /* Update cpuc with information from any new scheduled events. */ | |
722 | maybe_change_configuration(cpuc); | |
723 | ||
724 | /* Start counting the desired events. */ | |
725 | wrperfmon(PERFMON_CMD_LOGGING_OPTIONS, EV67_PCTR_MODE_AGGREGATE); | |
726 | wrperfmon(PERFMON_CMD_DESIRED_EVENTS, cpuc->config); | |
727 | wrperfmon(PERFMON_CMD_ENABLE, cpuc->idx_mask); | |
728 | } | |
729 | } | |
730 | ||
731 | ||
732 | /* | |
733 | * Main entry point - disable HW performance counters. | |
734 | */ | |
735 | ||
a4eaf7f1 | 736 | static void alpha_pmu_disable(struct pmu *pmu) |
979f8671 MC |
737 | { |
738 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | |
739 | ||
740 | if (!cpuc->enabled) | |
741 | return; | |
742 | ||
743 | cpuc->enabled = 0; | |
744 | cpuc->n_added = 0; | |
745 | ||
746 | wrperfmon(PERFMON_CMD_DISABLE, cpuc->idx_mask); | |
747 | } | |
748 | ||
33696fc0 | 749 | static struct pmu pmu = { |
a4eaf7f1 PZ |
750 | .pmu_enable = alpha_pmu_enable, |
751 | .pmu_disable = alpha_pmu_disable, | |
33696fc0 | 752 | .event_init = alpha_pmu_event_init, |
a4eaf7f1 PZ |
753 | .add = alpha_pmu_add, |
754 | .del = alpha_pmu_del, | |
755 | .start = alpha_pmu_start, | |
756 | .stop = alpha_pmu_stop, | |
33696fc0 | 757 | .read = alpha_pmu_read, |
33696fc0 PZ |
758 | }; |
759 | ||
979f8671 MC |
760 | |
761 | /* | |
762 | * Main entry point - don't know when this is called but it | |
763 | * obviously dumps debug info. | |
764 | */ | |
765 | void perf_event_print_debug(void) | |
766 | { | |
767 | unsigned long flags; | |
768 | unsigned long pcr; | |
769 | int pcr0, pcr1; | |
770 | int cpu; | |
771 | ||
772 | if (!supported_cpu()) | |
773 | return; | |
774 | ||
775 | local_irq_save(flags); | |
776 | ||
777 | cpu = smp_processor_id(); | |
778 | ||
779 | pcr = wrperfmon(PERFMON_CMD_READ, 0); | |
780 | pcr0 = (pcr >> alpha_pmu->pmc_count_shift[0]) & alpha_pmu->pmc_count_mask[0]; | |
781 | pcr1 = (pcr >> alpha_pmu->pmc_count_shift[1]) & alpha_pmu->pmc_count_mask[1]; | |
782 | ||
783 | pr_info("CPU#%d: PCTR0[%06x] PCTR1[%06x]\n", cpu, pcr0, pcr1); | |
784 | ||
785 | local_irq_restore(flags); | |
786 | } | |
787 | ||
788 | ||
789 | /* | |
790 | * Performance Monitoring Interrupt Service Routine called when a PMC | |
791 | * overflows. The PMC that overflowed is passed in la_ptr. | |
792 | */ | |
793 | static void alpha_perf_event_irq_handler(unsigned long la_ptr, | |
794 | struct pt_regs *regs) | |
795 | { | |
796 | struct cpu_hw_events *cpuc; | |
797 | struct perf_sample_data data; | |
798 | struct perf_event *event; | |
799 | struct hw_perf_event *hwc; | |
800 | int idx, j; | |
801 | ||
802 | __get_cpu_var(irq_pmi_count)++; | |
803 | cpuc = &__get_cpu_var(cpu_hw_events); | |
804 | ||
805 | /* Completely counting through the PMC's period to trigger a new PMC | |
806 | * overflow interrupt while in this interrupt routine is utterly | |
807 | * disastrous! The EV6 and EV67 counters are sufficiently large to | |
808 | * prevent this but to be really sure disable the PMCs. | |
809 | */ | |
810 | wrperfmon(PERFMON_CMD_DISABLE, cpuc->idx_mask); | |
811 | ||
812 | /* la_ptr is the counter that overflowed. */ | |
15ac9a39 | 813 | if (unlikely(la_ptr >= alpha_pmu->num_pmcs)) { |
979f8671 MC |
814 | /* This should never occur! */ |
815 | irq_err_count++; | |
816 | pr_warning("PMI: silly index %ld\n", la_ptr); | |
817 | wrperfmon(PERFMON_CMD_ENABLE, cpuc->idx_mask); | |
818 | return; | |
819 | } | |
820 | ||
821 | idx = la_ptr; | |
822 | ||
823 | perf_sample_data_init(&data, 0); | |
824 | for (j = 0; j < cpuc->n_events; j++) { | |
825 | if (cpuc->current_idx[j] == idx) | |
826 | break; | |
827 | } | |
828 | ||
829 | if (unlikely(j == cpuc->n_events)) { | |
830 | /* This can occur if the event is disabled right on a PMC overflow. */ | |
831 | wrperfmon(PERFMON_CMD_ENABLE, cpuc->idx_mask); | |
832 | return; | |
833 | } | |
834 | ||
835 | event = cpuc->event[j]; | |
836 | ||
837 | if (unlikely(!event)) { | |
838 | /* This should never occur! */ | |
839 | irq_err_count++; | |
840 | pr_warning("PMI: No event at index %d!\n", idx); | |
841 | wrperfmon(PERFMON_CMD_ENABLE, cpuc->idx_mask); | |
842 | return; | |
843 | } | |
844 | ||
845 | hwc = &event->hw; | |
846 | alpha_perf_event_update(event, hwc, idx, alpha_pmu->pmc_max_period[idx]+1); | |
847 | data.period = event->hw.last_period; | |
848 | ||
849 | if (alpha_perf_event_set_period(event, hwc, idx)) { | |
a8b0ca17 | 850 | if (perf_event_overflow(event, &data, regs)) { |
979f8671 MC |
851 | /* Interrupts coming too quickly; "throttle" the |
852 | * counter, i.e., disable it for a little while. | |
853 | */ | |
65175c07 | 854 | alpha_pmu_stop(event, 0); |
979f8671 MC |
855 | } |
856 | } | |
857 | wrperfmon(PERFMON_CMD_ENABLE, cpuc->idx_mask); | |
858 | ||
859 | return; | |
860 | } | |
861 | ||
862 | ||
863 | ||
864 | /* | |
865 | * Init call to initialise performance events at kernel startup. | |
866 | */ | |
004417a6 | 867 | int __init init_hw_perf_events(void) |
979f8671 MC |
868 | { |
869 | pr_info("Performance events: "); | |
870 | ||
871 | if (!supported_cpu()) { | |
872 | pr_cont("No support for your CPU.\n"); | |
004417a6 | 873 | return 0; |
979f8671 MC |
874 | } |
875 | ||
876 | pr_cont("Supported CPU type!\n"); | |
877 | ||
878 | /* Override performance counter IRQ vector */ | |
879 | ||
880 | perf_irq = alpha_perf_event_irq_handler; | |
881 | ||
882 | /* And set up PMU specification */ | |
883 | alpha_pmu = &ev67_pmu; | |
b0a873eb | 884 | |
2e80a82a | 885 | perf_pmu_register(&pmu, "cpu", PERF_TYPE_RAW); |
979f8671 | 886 | |
004417a6 PZ |
887 | return 0; |
888 | } | |
889 | early_initcall(init_hw_perf_events); |