Commit | Line | Data |
---|---|---|
18f2190d MJ |
1 | /* |
2 | * Cell Broadband Engine OProfile Support | |
3 | * | |
4 | * (C) Copyright IBM Corporation 2006 | |
5 | * | |
6 | * Author: David Erb (djerb@us.ibm.com) | |
7 | * Modifications: | |
8 | * Carl Love <carll@us.ibm.com> | |
9 | * Maynard Johnson <maynardj@us.ibm.com> | |
10 | * | |
11 | * This program is free software; you can redistribute it and/or | |
12 | * modify it under the terms of the GNU General Public License | |
13 | * as published by the Free Software Foundation; either version | |
14 | * 2 of the License, or (at your option) any later version. | |
15 | */ | |
16 | ||
17 | #include <linux/cpufreq.h> | |
18 | #include <linux/delay.h> | |
19 | #include <linux/init.h> | |
20 | #include <linux/jiffies.h> | |
21 | #include <linux/kthread.h> | |
22 | #include <linux/oprofile.h> | |
23 | #include <linux/percpu.h> | |
24 | #include <linux/smp.h> | |
25 | #include <linux/spinlock.h> | |
26 | #include <linux/timer.h> | |
27 | #include <asm/cell-pmu.h> | |
28 | #include <asm/cputable.h> | |
29 | #include <asm/firmware.h> | |
30 | #include <asm/io.h> | |
31 | #include <asm/oprofile_impl.h> | |
32 | #include <asm/processor.h> | |
33 | #include <asm/prom.h> | |
34 | #include <asm/ptrace.h> | |
35 | #include <asm/reg.h> | |
36 | #include <asm/rtas.h> | |
37 | #include <asm/system.h> | |
38 | ||
39 | #include "../platforms/cell/interrupt.h" | |
91a69c96 | 40 | #include "../platforms/cell/cbe_regs.h" |
18f2190d MJ |
41 | |
42 | #define PPU_CYCLES_EVENT_NUM 1 /* event number for CYCLES */ | |
c7eb7347 MJ |
43 | #define PPU_CYCLES_GRP_NUM 1 /* special group number for identifying |
44 | * PPU_CYCLES event | |
45 | */ | |
18f2190d MJ |
46 | #define CBE_COUNT_ALL_CYCLES 0x42800000 /* PPU cycle event specifier */ |
47 | ||
bcb63e25 CL |
48 | #define NUM_THREADS 2 /* number of physical threads in |
49 | * physical processor | |
50 | */ | |
51 | #define NUM_TRACE_BUS_WORDS 4 | |
52 | #define NUM_INPUT_BUS_WORDS 2 | |
53 | ||
18f2190d MJ |
54 | |
55 | struct pmc_cntrl_data { | |
56 | unsigned long vcntr; | |
57 | unsigned long evnts; | |
58 | unsigned long masks; | |
59 | unsigned long enabled; | |
60 | }; | |
61 | ||
62 | /* | |
63 | * ibm,cbe-perftools rtas parameters | |
64 | */ | |
65 | ||
66 | struct pm_signal { | |
67 | u16 cpu; /* Processor to modify */ | |
68 | u16 sub_unit; /* hw subunit this applies to (if applicable) */ | |
c7eb7347 | 69 | short int signal_group; /* Signal Group to Enable/Disable */ |
18f2190d MJ |
70 | u8 bus_word; /* Enable/Disable on this Trace/Trigger/Event |
71 | * Bus Word(s) (bitmask) | |
72 | */ | |
73 | u8 bit; /* Trigger/Event bit (if applicable) */ | |
74 | }; | |
75 | ||
76 | /* | |
77 | * rtas call arguments | |
78 | */ | |
79 | enum { | |
80 | SUBFUNC_RESET = 1, | |
81 | SUBFUNC_ACTIVATE = 2, | |
82 | SUBFUNC_DEACTIVATE = 3, | |
83 | ||
84 | PASSTHRU_IGNORE = 0, | |
85 | PASSTHRU_ENABLE = 1, | |
86 | PASSTHRU_DISABLE = 2, | |
87 | }; | |
88 | ||
89 | struct pm_cntrl { | |
90 | u16 enable; | |
91 | u16 stop_at_max; | |
92 | u16 trace_mode; | |
93 | u16 freeze; | |
94 | u16 count_mode; | |
95 | }; | |
96 | ||
97 | static struct { | |
98 | u32 group_control; | |
99 | u32 debug_bus_control; | |
100 | struct pm_cntrl pm_cntrl; | |
101 | u32 pm07_cntrl[NR_PHYS_CTRS]; | |
102 | } pm_regs; | |
103 | ||
18f2190d MJ |
104 | #define GET_SUB_UNIT(x) ((x & 0x0000f000) >> 12) |
105 | #define GET_BUS_WORD(x) ((x & 0x000000f0) >> 4) | |
106 | #define GET_BUS_TYPE(x) ((x & 0x00000300) >> 8) | |
107 | #define GET_POLARITY(x) ((x & 0x00000002) >> 1) | |
108 | #define GET_COUNT_CYCLES(x) (x & 0x00000001) | |
109 | #define GET_INPUT_CONTROL(x) ((x & 0x00000004) >> 2) | |
110 | ||
18f2190d MJ |
111 | static DEFINE_PER_CPU(unsigned long[NR_PHYS_CTRS], pmc_values); |
112 | ||
113 | static struct pmc_cntrl_data pmc_cntrl[NUM_THREADS][NR_PHYS_CTRS]; | |
114 | ||
115 | /* Interpetation of hdw_thread: | |
116 | * 0 - even virtual cpus 0, 2, 4,... | |
117 | * 1 - odd virtual cpus 1, 3, 5, ... | |
118 | */ | |
119 | static u32 hdw_thread; | |
120 | ||
121 | static u32 virt_cntr_inter_mask; | |
122 | static struct timer_list timer_virt_cntr; | |
123 | ||
124 | /* pm_signal needs to be global since it is initialized in | |
125 | * cell_reg_setup at the time when the necessary information | |
126 | * is available. | |
127 | */ | |
128 | static struct pm_signal pm_signal[NR_PHYS_CTRS]; | |
129 | static int pm_rtas_token; | |
130 | ||
131 | static u32 reset_value[NR_PHYS_CTRS]; | |
132 | static int num_counters; | |
133 | static int oprofile_running; | |
057b184a | 134 | static DEFINE_SPINLOCK(virt_cntr_lock); |
18f2190d MJ |
135 | |
136 | static u32 ctr_enabled; | |
137 | ||
bcb63e25 CL |
138 | static unsigned char trace_bus[NUM_TRACE_BUS_WORDS]; |
139 | static unsigned char input_bus[NUM_INPUT_BUS_WORDS]; | |
18f2190d MJ |
140 | |
141 | /* | |
142 | * Firmware interface functions | |
143 | */ | |
144 | static int | |
145 | rtas_ibm_cbe_perftools(int subfunc, int passthru, | |
146 | void *address, unsigned long length) | |
147 | { | |
148 | u64 paddr = __pa(address); | |
149 | ||
150 | return rtas_call(pm_rtas_token, 5, 1, NULL, subfunc, passthru, | |
151 | paddr >> 32, paddr & 0xffffffff, length); | |
152 | } | |
153 | ||
154 | static void pm_rtas_reset_signals(u32 node) | |
155 | { | |
156 | int ret; | |
157 | struct pm_signal pm_signal_local; | |
158 | ||
159 | /* The debug bus is being set to the passthru disable state. | |
160 | * However, the FW still expects atleast one legal signal routing | |
161 | * entry or it will return an error on the arguments. If we don't | |
162 | * supply a valid entry, we must ignore all return values. Ignoring | |
163 | * all return values means we might miss an error we should be | |
164 | * concerned about. | |
165 | */ | |
166 | ||
167 | /* fw expects physical cpu #. */ | |
168 | pm_signal_local.cpu = node; | |
169 | pm_signal_local.signal_group = 21; | |
170 | pm_signal_local.bus_word = 1; | |
171 | pm_signal_local.sub_unit = 0; | |
172 | pm_signal_local.bit = 0; | |
173 | ||
174 | ret = rtas_ibm_cbe_perftools(SUBFUNC_RESET, PASSTHRU_DISABLE, | |
175 | &pm_signal_local, | |
176 | sizeof(struct pm_signal)); | |
177 | ||
178 | if (ret) | |
179 | printk(KERN_WARNING "%s: rtas returned: %d\n", | |
180 | __FUNCTION__, ret); | |
181 | } | |
182 | ||
183 | static void pm_rtas_activate_signals(u32 node, u32 count) | |
184 | { | |
185 | int ret; | |
c7eb7347 | 186 | int i, j; |
18f2190d MJ |
187 | struct pm_signal pm_signal_local[NR_PHYS_CTRS]; |
188 | ||
c7eb7347 MJ |
189 | /* There is no debug setup required for the cycles event. |
190 | * Note that only events in the same group can be used. | |
191 | * Otherwise, there will be conflicts in correctly routing | |
192 | * the signals on the debug bus. It is the responsiblity | |
193 | * of the OProfile user tool to check the events are in | |
194 | * the same group. | |
195 | */ | |
196 | i = 0; | |
18f2190d | 197 | for (j = 0; j < count; j++) { |
c7eb7347 MJ |
198 | if (pm_signal[j].signal_group != PPU_CYCLES_GRP_NUM) { |
199 | ||
200 | /* fw expects physical cpu # */ | |
201 | pm_signal_local[i].cpu = node; | |
202 | pm_signal_local[i].signal_group | |
203 | = pm_signal[j].signal_group; | |
204 | pm_signal_local[i].bus_word = pm_signal[j].bus_word; | |
205 | pm_signal_local[i].sub_unit = pm_signal[j].sub_unit; | |
206 | pm_signal_local[i].bit = pm_signal[j].bit; | |
207 | i++; | |
208 | } | |
18f2190d MJ |
209 | } |
210 | ||
c7eb7347 MJ |
211 | if (i != 0) { |
212 | ret = rtas_ibm_cbe_perftools(SUBFUNC_ACTIVATE, PASSTHRU_ENABLE, | |
213 | pm_signal_local, | |
214 | i * sizeof(struct pm_signal)); | |
18f2190d | 215 | |
c7eb7347 MJ |
216 | if (ret) |
217 | printk(KERN_WARNING "%s: rtas returned: %d\n", | |
218 | __FUNCTION__, ret); | |
219 | } | |
18f2190d MJ |
220 | } |
221 | ||
222 | /* | |
223 | * PM Signal functions | |
224 | */ | |
225 | static void set_pm_event(u32 ctr, int event, u32 unit_mask) | |
226 | { | |
227 | struct pm_signal *p; | |
228 | u32 signal_bit; | |
229 | u32 bus_word, bus_type, count_cycles, polarity, input_control; | |
230 | int j, i; | |
231 | ||
232 | if (event == PPU_CYCLES_EVENT_NUM) { | |
233 | /* Special Event: Count all cpu cycles */ | |
234 | pm_regs.pm07_cntrl[ctr] = CBE_COUNT_ALL_CYCLES; | |
235 | p = &(pm_signal[ctr]); | |
c7eb7347 | 236 | p->signal_group = PPU_CYCLES_GRP_NUM; |
18f2190d MJ |
237 | p->bus_word = 1; |
238 | p->sub_unit = 0; | |
239 | p->bit = 0; | |
240 | goto out; | |
241 | } else { | |
242 | pm_regs.pm07_cntrl[ctr] = 0; | |
243 | } | |
244 | ||
245 | bus_word = GET_BUS_WORD(unit_mask); | |
246 | bus_type = GET_BUS_TYPE(unit_mask); | |
247 | count_cycles = GET_COUNT_CYCLES(unit_mask); | |
248 | polarity = GET_POLARITY(unit_mask); | |
249 | input_control = GET_INPUT_CONTROL(unit_mask); | |
250 | signal_bit = (event % 100); | |
251 | ||
252 | p = &(pm_signal[ctr]); | |
253 | ||
254 | p->signal_group = event / 100; | |
255 | p->bus_word = bus_word; | |
bcb63e25 | 256 | p->sub_unit = (unit_mask & 0x0000f000) >> 12; |
18f2190d MJ |
257 | |
258 | pm_regs.pm07_cntrl[ctr] = 0; | |
259 | pm_regs.pm07_cntrl[ctr] |= PM07_CTR_COUNT_CYCLES(count_cycles); | |
260 | pm_regs.pm07_cntrl[ctr] |= PM07_CTR_POLARITY(polarity); | |
261 | pm_regs.pm07_cntrl[ctr] |= PM07_CTR_INPUT_CONTROL(input_control); | |
262 | ||
bcb63e25 CL |
263 | /* Some of the islands signal selection is based on 64 bit words. |
264 | * The debug bus words are 32 bits, the input words to the performance | |
265 | * counters are defined as 32 bits. Need to convert the 64 bit island | |
266 | * specification to the appropriate 32 input bit and bus word for the | |
267 | * performance counter event selection. See the CELL Performance | |
268 | * monitoring signals manual and the Perf cntr hardware descriptions | |
269 | * for the details. | |
270 | */ | |
18f2190d MJ |
271 | if (input_control == 0) { |
272 | if (signal_bit > 31) { | |
273 | signal_bit -= 32; | |
274 | if (bus_word == 0x3) | |
275 | bus_word = 0x2; | |
276 | else if (bus_word == 0xc) | |
277 | bus_word = 0x8; | |
278 | } | |
279 | ||
280 | if ((bus_type == 0) && p->signal_group >= 60) | |
281 | bus_type = 2; | |
282 | if ((bus_type == 1) && p->signal_group >= 50) | |
283 | bus_type = 0; | |
284 | ||
285 | pm_regs.pm07_cntrl[ctr] |= PM07_CTR_INPUT_MUX(signal_bit); | |
286 | } else { | |
287 | pm_regs.pm07_cntrl[ctr] = 0; | |
288 | p->bit = signal_bit; | |
289 | } | |
290 | ||
bcb63e25 | 291 | for (i = 0; i < NUM_TRACE_BUS_WORDS; i++) { |
18f2190d MJ |
292 | if (bus_word & (1 << i)) { |
293 | pm_regs.debug_bus_control |= | |
294 | (bus_type << (31 - (2 * i) + 1)); | |
295 | ||
bcb63e25 | 296 | for (j = 0; j < NUM_INPUT_BUS_WORDS; j++) { |
18f2190d MJ |
297 | if (input_bus[j] == 0xff) { |
298 | input_bus[j] = i; | |
299 | pm_regs.group_control |= | |
300 | (i << (31 - i)); | |
301 | break; | |
302 | } | |
303 | } | |
304 | } | |
305 | } | |
306 | out: | |
307 | ; | |
308 | } | |
309 | ||
bcb63e25 | 310 | static void write_pm_cntrl(int cpu) |
18f2190d | 311 | { |
bcb63e25 CL |
312 | /* Oprofile will use 32 bit counters, set bits 7:10 to 0 |
313 | * pmregs.pm_cntrl is a global | |
314 | */ | |
315 | ||
18f2190d | 316 | u32 val = 0; |
bcb63e25 | 317 | if (pm_regs.pm_cntrl.enable == 1) |
18f2190d MJ |
318 | val |= CBE_PM_ENABLE_PERF_MON; |
319 | ||
bcb63e25 | 320 | if (pm_regs.pm_cntrl.stop_at_max == 1) |
18f2190d MJ |
321 | val |= CBE_PM_STOP_AT_MAX; |
322 | ||
bcb63e25 CL |
323 | if (pm_regs.pm_cntrl.trace_mode == 1) |
324 | val |= CBE_PM_TRACE_MODE_SET(pm_regs.pm_cntrl.trace_mode); | |
18f2190d | 325 | |
bcb63e25 | 326 | if (pm_regs.pm_cntrl.freeze == 1) |
18f2190d MJ |
327 | val |= CBE_PM_FREEZE_ALL_CTRS; |
328 | ||
329 | /* Routine set_count_mode must be called previously to set | |
330 | * the count mode based on the user selection of user and kernel. | |
331 | */ | |
bcb63e25 | 332 | val |= CBE_PM_COUNT_MODE_SET(pm_regs.pm_cntrl.count_mode); |
18f2190d MJ |
333 | cbe_write_pm(cpu, pm_control, val); |
334 | } | |
335 | ||
336 | static inline void | |
bcb63e25 | 337 | set_count_mode(u32 kernel, u32 user) |
18f2190d MJ |
338 | { |
339 | /* The user must specify user and kernel if they want them. If | |
bcb63e25 CL |
340 | * neither is specified, OProfile will count in hypervisor mode. |
341 | * pm_regs.pm_cntrl is a global | |
18f2190d MJ |
342 | */ |
343 | if (kernel) { | |
344 | if (user) | |
bcb63e25 | 345 | pm_regs.pm_cntrl.count_mode = CBE_COUNT_ALL_MODES; |
18f2190d | 346 | else |
bcb63e25 CL |
347 | pm_regs.pm_cntrl.count_mode = |
348 | CBE_COUNT_SUPERVISOR_MODE; | |
18f2190d MJ |
349 | } else { |
350 | if (user) | |
bcb63e25 | 351 | pm_regs.pm_cntrl.count_mode = CBE_COUNT_PROBLEM_MODE; |
18f2190d | 352 | else |
bcb63e25 CL |
353 | pm_regs.pm_cntrl.count_mode = |
354 | CBE_COUNT_HYPERVISOR_MODE; | |
18f2190d MJ |
355 | } |
356 | } | |
357 | ||
358 | static inline void enable_ctr(u32 cpu, u32 ctr, u32 * pm07_cntrl) | |
359 | { | |
360 | ||
bcb63e25 | 361 | pm07_cntrl[ctr] |= CBE_PM_CTR_ENABLE; |
18f2190d MJ |
362 | cbe_write_pm07_control(cpu, ctr, pm07_cntrl[ctr]); |
363 | } | |
364 | ||
365 | /* | |
366 | * Oprofile is expected to collect data on all CPUs simultaneously. | |
367 | * However, there is one set of performance counters per node. There are | |
368 | * two hardware threads or virtual CPUs on each node. Hence, OProfile must | |
369 | * multiplex in time the performance counter collection on the two virtual | |
370 | * CPUs. The multiplexing of the performance counters is done by this | |
371 | * virtual counter routine. | |
372 | * | |
373 | * The pmc_values used below is defined as 'per-cpu' but its use is | |
374 | * more akin to 'per-node'. We need to store two sets of counter | |
375 | * values per node -- one for the previous run and one for the next. | |
376 | * The per-cpu[NR_PHYS_CTRS] gives us the storage we need. Each odd/even | |
377 | * pair of per-cpu arrays is used for storing the previous and next | |
378 | * pmc values for a given node. | |
379 | * NOTE: We use the per-cpu variable to improve cache performance. | |
380 | */ | |
381 | static void cell_virtual_cntr(unsigned long data) | |
382 | { | |
383 | /* This routine will alternate loading the virtual counters for | |
384 | * virtual CPUs | |
385 | */ | |
386 | int i, prev_hdw_thread, next_hdw_thread; | |
387 | u32 cpu; | |
388 | unsigned long flags; | |
389 | ||
390 | /* Make sure that the interrupt_hander and | |
391 | * the virt counter are not both playing with | |
392 | * the counters on the same node. | |
393 | */ | |
394 | ||
395 | spin_lock_irqsave(&virt_cntr_lock, flags); | |
396 | ||
397 | prev_hdw_thread = hdw_thread; | |
398 | ||
399 | /* switch the cpu handling the interrupts */ | |
400 | hdw_thread = 1 ^ hdw_thread; | |
401 | next_hdw_thread = hdw_thread; | |
402 | ||
bcb63e25 CL |
403 | for (i = 0; i < num_counters; i++) |
404 | /* There are some per thread events. Must do the | |
405 | * set event, for the thread that is being started | |
406 | */ | |
407 | set_pm_event(i, | |
408 | pmc_cntrl[next_hdw_thread][i].evnts, | |
409 | pmc_cntrl[next_hdw_thread][i].masks); | |
410 | ||
18f2190d MJ |
411 | /* The following is done only once per each node, but |
412 | * we need cpu #, not node #, to pass to the cbe_xxx functions. | |
413 | */ | |
414 | for_each_online_cpu(cpu) { | |
415 | if (cbe_get_hw_thread_id(cpu)) | |
416 | continue; | |
417 | ||
418 | /* stop counters, save counter values, restore counts | |
419 | * for previous thread | |
420 | */ | |
421 | cbe_disable_pm(cpu); | |
422 | cbe_disable_pm_interrupts(cpu); | |
423 | for (i = 0; i < num_counters; i++) { | |
424 | per_cpu(pmc_values, cpu + prev_hdw_thread)[i] | |
425 | = cbe_read_ctr(cpu, i); | |
426 | ||
427 | if (per_cpu(pmc_values, cpu + next_hdw_thread)[i] | |
428 | == 0xFFFFFFFF) | |
429 | /* If the cntr value is 0xffffffff, we must | |
430 | * reset that to 0xfffffff0 when the current | |
bcb63e25 CL |
431 | * thread is restarted. This will generate a |
432 | * new interrupt and make sure that we never | |
433 | * restore the counters to the max value. If | |
434 | * the counters were restored to the max value, | |
435 | * they do not increment and no interrupts are | |
436 | * generated. Hence no more samples will be | |
437 | * collected on that cpu. | |
18f2190d MJ |
438 | */ |
439 | cbe_write_ctr(cpu, i, 0xFFFFFFF0); | |
440 | else | |
441 | cbe_write_ctr(cpu, i, | |
442 | per_cpu(pmc_values, | |
443 | cpu + | |
444 | next_hdw_thread)[i]); | |
445 | } | |
446 | ||
447 | /* Switch to the other thread. Change the interrupt | |
448 | * and control regs to be scheduled on the CPU | |
449 | * corresponding to the thread to execute. | |
450 | */ | |
451 | for (i = 0; i < num_counters; i++) { | |
452 | if (pmc_cntrl[next_hdw_thread][i].enabled) { | |
453 | /* There are some per thread events. | |
454 | * Must do the set event, enable_cntr | |
455 | * for each cpu. | |
456 | */ | |
18f2190d MJ |
457 | enable_ctr(cpu, i, |
458 | pm_regs.pm07_cntrl); | |
459 | } else { | |
460 | cbe_write_pm07_control(cpu, i, 0); | |
461 | } | |
462 | } | |
463 | ||
464 | /* Enable interrupts on the CPU thread that is starting */ | |
465 | cbe_enable_pm_interrupts(cpu, next_hdw_thread, | |
466 | virt_cntr_inter_mask); | |
467 | cbe_enable_pm(cpu); | |
468 | } | |
469 | ||
470 | spin_unlock_irqrestore(&virt_cntr_lock, flags); | |
471 | ||
472 | mod_timer(&timer_virt_cntr, jiffies + HZ / 10); | |
473 | } | |
474 | ||
475 | static void start_virt_cntrs(void) | |
476 | { | |
477 | init_timer(&timer_virt_cntr); | |
478 | timer_virt_cntr.function = cell_virtual_cntr; | |
479 | timer_virt_cntr.data = 0UL; | |
480 | timer_virt_cntr.expires = jiffies + HZ / 10; | |
481 | add_timer(&timer_virt_cntr); | |
482 | } | |
483 | ||
484 | /* This function is called once for all cpus combined */ | |
485 | static void | |
486 | cell_reg_setup(struct op_counter_config *ctr, | |
487 | struct op_system_config *sys, int num_ctrs) | |
488 | { | |
489 | int i, j, cpu; | |
490 | ||
491 | pm_rtas_token = rtas_token("ibm,cbe-perftools"); | |
492 | if (pm_rtas_token == RTAS_UNKNOWN_SERVICE) { | |
493 | printk(KERN_WARNING "%s: RTAS_UNKNOWN_SERVICE\n", | |
494 | __FUNCTION__); | |
495 | goto out; | |
496 | } | |
497 | ||
498 | num_counters = num_ctrs; | |
499 | ||
500 | pm_regs.group_control = 0; | |
501 | pm_regs.debug_bus_control = 0; | |
502 | ||
503 | /* setup the pm_control register */ | |
504 | memset(&pm_regs.pm_cntrl, 0, sizeof(struct pm_cntrl)); | |
505 | pm_regs.pm_cntrl.stop_at_max = 1; | |
506 | pm_regs.pm_cntrl.trace_mode = 0; | |
507 | pm_regs.pm_cntrl.freeze = 1; | |
508 | ||
bcb63e25 | 509 | set_count_mode(sys->enable_kernel, sys->enable_user); |
18f2190d MJ |
510 | |
511 | /* Setup the thread 0 events */ | |
512 | for (i = 0; i < num_ctrs; ++i) { | |
513 | ||
514 | pmc_cntrl[0][i].evnts = ctr[i].event; | |
515 | pmc_cntrl[0][i].masks = ctr[i].unit_mask; | |
516 | pmc_cntrl[0][i].enabled = ctr[i].enabled; | |
517 | pmc_cntrl[0][i].vcntr = i; | |
518 | ||
519 | for_each_possible_cpu(j) | |
520 | per_cpu(pmc_values, j)[i] = 0; | |
521 | } | |
522 | ||
523 | /* Setup the thread 1 events, map the thread 0 event to the | |
524 | * equivalent thread 1 event. | |
525 | */ | |
526 | for (i = 0; i < num_ctrs; ++i) { | |
527 | if ((ctr[i].event >= 2100) && (ctr[i].event <= 2111)) | |
528 | pmc_cntrl[1][i].evnts = ctr[i].event + 19; | |
529 | else if (ctr[i].event == 2203) | |
530 | pmc_cntrl[1][i].evnts = ctr[i].event; | |
531 | else if ((ctr[i].event >= 2200) && (ctr[i].event <= 2215)) | |
532 | pmc_cntrl[1][i].evnts = ctr[i].event + 16; | |
533 | else | |
534 | pmc_cntrl[1][i].evnts = ctr[i].event; | |
535 | ||
536 | pmc_cntrl[1][i].masks = ctr[i].unit_mask; | |
537 | pmc_cntrl[1][i].enabled = ctr[i].enabled; | |
538 | pmc_cntrl[1][i].vcntr = i; | |
539 | } | |
540 | ||
bcb63e25 | 541 | for (i = 0; i < NUM_TRACE_BUS_WORDS; i++) |
18f2190d MJ |
542 | trace_bus[i] = 0xff; |
543 | ||
bcb63e25 | 544 | for (i = 0; i < NUM_INPUT_BUS_WORDS; i++) |
18f2190d MJ |
545 | input_bus[i] = 0xff; |
546 | ||
547 | /* Our counters count up, and "count" refers to | |
548 | * how much before the next interrupt, and we interrupt | |
549 | * on overflow. So we calculate the starting value | |
550 | * which will give us "count" until overflow. | |
551 | * Then we set the events on the enabled counters. | |
552 | */ | |
553 | for (i = 0; i < num_counters; ++i) { | |
554 | /* start with virtual counter set 0 */ | |
555 | if (pmc_cntrl[0][i].enabled) { | |
556 | /* Using 32bit counters, reset max - count */ | |
557 | reset_value[i] = 0xFFFFFFFF - ctr[i].count; | |
558 | set_pm_event(i, | |
559 | pmc_cntrl[0][i].evnts, | |
560 | pmc_cntrl[0][i].masks); | |
561 | ||
562 | /* global, used by cell_cpu_setup */ | |
563 | ctr_enabled |= (1 << i); | |
564 | } | |
565 | } | |
566 | ||
567 | /* initialize the previous counts for the virtual cntrs */ | |
568 | for_each_online_cpu(cpu) | |
569 | for (i = 0; i < num_counters; ++i) { | |
570 | per_cpu(pmc_values, cpu)[i] = reset_value[i]; | |
571 | } | |
572 | out: | |
573 | ; | |
574 | } | |
575 | ||
576 | /* This function is called once for each cpu */ | |
577 | static void cell_cpu_setup(struct op_counter_config *cntr) | |
578 | { | |
579 | u32 cpu = smp_processor_id(); | |
580 | u32 num_enabled = 0; | |
581 | int i; | |
582 | ||
583 | /* There is one performance monitor per processor chip (i.e. node), | |
584 | * so we only need to perform this function once per node. | |
585 | */ | |
586 | if (cbe_get_hw_thread_id(cpu)) | |
587 | goto out; | |
588 | ||
589 | if (pm_rtas_token == RTAS_UNKNOWN_SERVICE) { | |
590 | printk(KERN_WARNING "%s: RTAS_UNKNOWN_SERVICE\n", | |
591 | __FUNCTION__); | |
592 | goto out; | |
593 | } | |
594 | ||
595 | /* Stop all counters */ | |
596 | cbe_disable_pm(cpu); | |
597 | cbe_disable_pm_interrupts(cpu); | |
598 | ||
599 | cbe_write_pm(cpu, pm_interval, 0); | |
600 | cbe_write_pm(cpu, pm_start_stop, 0); | |
601 | cbe_write_pm(cpu, group_control, pm_regs.group_control); | |
602 | cbe_write_pm(cpu, debug_bus_control, pm_regs.debug_bus_control); | |
bcb63e25 | 603 | write_pm_cntrl(cpu); |
18f2190d MJ |
604 | |
605 | for (i = 0; i < num_counters; ++i) { | |
606 | if (ctr_enabled & (1 << i)) { | |
607 | pm_signal[num_enabled].cpu = cbe_cpu_to_node(cpu); | |
608 | num_enabled++; | |
609 | } | |
610 | } | |
611 | ||
612 | pm_rtas_activate_signals(cbe_cpu_to_node(cpu), num_enabled); | |
613 | out: | |
614 | ; | |
615 | } | |
616 | ||
617 | static void cell_global_start(struct op_counter_config *ctr) | |
618 | { | |
619 | u32 cpu; | |
620 | u32 interrupt_mask = 0; | |
621 | u32 i; | |
622 | ||
623 | /* This routine gets called once for the system. | |
624 | * There is one performance monitor per node, so we | |
625 | * only need to perform this function once per node. | |
626 | */ | |
627 | for_each_online_cpu(cpu) { | |
628 | if (cbe_get_hw_thread_id(cpu)) | |
629 | continue; | |
630 | ||
631 | interrupt_mask = 0; | |
632 | ||
633 | for (i = 0; i < num_counters; ++i) { | |
634 | if (ctr_enabled & (1 << i)) { | |
635 | cbe_write_ctr(cpu, i, reset_value[i]); | |
636 | enable_ctr(cpu, i, pm_regs.pm07_cntrl); | |
637 | interrupt_mask |= | |
638 | CBE_PM_CTR_OVERFLOW_INTR(i); | |
639 | } else { | |
640 | /* Disable counter */ | |
641 | cbe_write_pm07_control(cpu, i, 0); | |
642 | } | |
643 | } | |
644 | ||
bcb63e25 | 645 | cbe_get_and_clear_pm_interrupts(cpu); |
18f2190d MJ |
646 | cbe_enable_pm_interrupts(cpu, hdw_thread, interrupt_mask); |
647 | cbe_enable_pm(cpu); | |
648 | } | |
649 | ||
650 | virt_cntr_inter_mask = interrupt_mask; | |
651 | oprofile_running = 1; | |
652 | smp_wmb(); | |
653 | ||
654 | /* NOTE: start_virt_cntrs will result in cell_virtual_cntr() being | |
655 | * executed which manipulates the PMU. We start the "virtual counter" | |
656 | * here so that we do not need to synchronize access to the PMU in | |
657 | * the above for-loop. | |
658 | */ | |
659 | start_virt_cntrs(); | |
660 | } | |
661 | ||
662 | static void cell_global_stop(void) | |
663 | { | |
664 | int cpu; | |
665 | ||
666 | /* This routine will be called once for the system. | |
667 | * There is one performance monitor per node, so we | |
668 | * only need to perform this function once per node. | |
669 | */ | |
670 | del_timer_sync(&timer_virt_cntr); | |
671 | oprofile_running = 0; | |
672 | smp_wmb(); | |
673 | ||
674 | for_each_online_cpu(cpu) { | |
675 | if (cbe_get_hw_thread_id(cpu)) | |
676 | continue; | |
677 | ||
678 | cbe_sync_irq(cbe_cpu_to_node(cpu)); | |
679 | /* Stop the counters */ | |
680 | cbe_disable_pm(cpu); | |
681 | ||
682 | /* Deactivate the signals */ | |
683 | pm_rtas_reset_signals(cbe_cpu_to_node(cpu)); | |
684 | ||
685 | /* Deactivate interrupts */ | |
686 | cbe_disable_pm_interrupts(cpu); | |
687 | } | |
688 | } | |
689 | ||
690 | static void | |
691 | cell_handle_interrupt(struct pt_regs *regs, struct op_counter_config *ctr) | |
692 | { | |
693 | u32 cpu; | |
694 | u64 pc; | |
695 | int is_kernel; | |
696 | unsigned long flags = 0; | |
697 | u32 interrupt_mask; | |
698 | int i; | |
699 | ||
700 | cpu = smp_processor_id(); | |
701 | ||
702 | /* Need to make sure the interrupt handler and the virt counter | |
703 | * routine are not running at the same time. See the | |
704 | * cell_virtual_cntr() routine for additional comments. | |
705 | */ | |
706 | spin_lock_irqsave(&virt_cntr_lock, flags); | |
707 | ||
708 | /* Need to disable and reenable the performance counters | |
709 | * to get the desired behavior from the hardware. This | |
710 | * is hardware specific. | |
711 | */ | |
712 | ||
713 | cbe_disable_pm(cpu); | |
714 | ||
bcb63e25 | 715 | interrupt_mask = cbe_get_and_clear_pm_interrupts(cpu); |
18f2190d MJ |
716 | |
717 | /* If the interrupt mask has been cleared, then the virt cntr | |
718 | * has cleared the interrupt. When the thread that generated | |
719 | * the interrupt is restored, the data count will be restored to | |
720 | * 0xffffff0 to cause the interrupt to be regenerated. | |
721 | */ | |
722 | ||
723 | if ((oprofile_running == 1) && (interrupt_mask != 0)) { | |
724 | pc = regs->nip; | |
725 | is_kernel = is_kernel_addr(pc); | |
726 | ||
727 | for (i = 0; i < num_counters; ++i) { | |
728 | if ((interrupt_mask & CBE_PM_CTR_OVERFLOW_INTR(i)) | |
729 | && ctr[i].enabled) { | |
730 | oprofile_add_pc(pc, is_kernel, i); | |
731 | cbe_write_ctr(cpu, i, reset_value[i]); | |
732 | } | |
733 | } | |
734 | ||
735 | /* The counters were frozen by the interrupt. | |
736 | * Reenable the interrupt and restart the counters. | |
737 | * If there was a race between the interrupt handler and | |
738 | * the virtual counter routine. The virutal counter | |
739 | * routine may have cleared the interrupts. Hence must | |
740 | * use the virt_cntr_inter_mask to re-enable the interrupts. | |
741 | */ | |
742 | cbe_enable_pm_interrupts(cpu, hdw_thread, | |
743 | virt_cntr_inter_mask); | |
744 | ||
745 | /* The writes to the various performance counters only writes | |
746 | * to a latch. The new values (interrupt setting bits, reset | |
747 | * counter value etc.) are not copied to the actual registers | |
748 | * until the performance monitor is enabled. In order to get | |
749 | * this to work as desired, the permormance monitor needs to | |
beb7dd86 | 750 | * be disabled while writing to the latches. This is a |
18f2190d MJ |
751 | * HW design issue. |
752 | */ | |
753 | cbe_enable_pm(cpu); | |
754 | } | |
755 | spin_unlock_irqrestore(&virt_cntr_lock, flags); | |
756 | } | |
757 | ||
758 | struct op_powerpc_model op_model_cell = { | |
759 | .reg_setup = cell_reg_setup, | |
760 | .cpu_setup = cell_cpu_setup, | |
761 | .global_start = cell_global_start, | |
762 | .global_stop = cell_global_stop, | |
763 | .handle_interrupt = cell_handle_interrupt, | |
764 | }; |