Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * Copyright (C) 2004 Anton Blanchard <anton@au.ibm.com>, IBM | |
3 | * | |
4 | * This program is free software; you can redistribute it and/or | |
5 | * modify it under the terms of the GNU General Public License | |
6 | * as published by the Free Software Foundation; either version | |
7 | * 2 of the License, or (at your option) any later version. | |
8 | */ | |
9 | ||
10 | #include <linux/oprofile.h> | |
11 | #include <linux/init.h> | |
12 | #include <linux/smp.h> | |
13 | #include <asm/ptrace.h> | |
14 | #include <asm/system.h> | |
15 | #include <asm/processor.h> | |
16 | #include <asm/cputable.h> | |
17 | #include <asm/systemcfg.h> | |
18 | #include <asm/rtas.h> | |
dca85932 | 19 | #include <asm/oprofile_impl.h> |
1da177e4 LT |
20 | |
21 | #define dbg(args...) | |
22 | ||
1da177e4 LT |
23 | static unsigned long reset_value[OP_MAX_COUNTER]; |
24 | ||
1da177e4 LT |
25 | static int oprofile_running; |
26 | static int mmcra_has_sihv; | |
27 | ||
28 | /* mmcr values are set in power4_reg_setup, used in power4_cpu_setup */ | |
29 | static u32 mmcr0_val; | |
30 | static u64 mmcr1_val; | |
31 | static u32 mmcra_val; | |
32 | ||
33 | /* | |
34 | * Since we do not have an NMI, backtracing through spinlocks is | |
35 | * only a best guess. In light of this, allow it to be disabled at | |
36 | * runtime. | |
37 | */ | |
38 | static int backtrace_spinlocks; | |
39 | ||
40 | static void power4_reg_setup(struct op_counter_config *ctr, | |
41 | struct op_system_config *sys, | |
42 | int num_ctrs) | |
43 | { | |
44 | int i; | |
45 | ||
1da177e4 LT |
46 | /* |
47 | * SIHV / SIPR bits are only implemented on POWER4+ (GQ) and above. | |
48 | * However we disable it on all POWER4 until we verify it works | |
49 | * (I was seeing some strange behaviour last time I tried). | |
50 | * | |
51 | * It has been verified to work on POWER5 so we enable it there. | |
52 | */ | |
53 | if (cpu_has_feature(CPU_FTR_MMCRA_SIHV)) | |
54 | mmcra_has_sihv = 1; | |
55 | ||
56 | /* | |
57 | * The performance counter event settings are given in the mmcr0, | |
58 | * mmcr1 and mmcra values passed from the user in the | |
59 | * op_system_config structure (sys variable). | |
60 | */ | |
61 | mmcr0_val = sys->mmcr0; | |
62 | mmcr1_val = sys->mmcr1; | |
63 | mmcra_val = sys->mmcra; | |
64 | ||
65 | backtrace_spinlocks = sys->backtrace_spinlocks; | |
66 | ||
a6908cd0 | 67 | for (i = 0; i < cur_cpu_spec->num_pmcs; ++i) |
1da177e4 LT |
68 | reset_value[i] = 0x80000000UL - ctr[i].count; |
69 | ||
70 | /* setup user and kernel profiling */ | |
71 | if (sys->enable_kernel) | |
72 | mmcr0_val &= ~MMCR0_KERNEL_DISABLE; | |
73 | else | |
74 | mmcr0_val |= MMCR0_KERNEL_DISABLE; | |
75 | ||
76 | if (sys->enable_user) | |
77 | mmcr0_val &= ~MMCR0_PROBLEM_DISABLE; | |
78 | else | |
79 | mmcr0_val |= MMCR0_PROBLEM_DISABLE; | |
80 | } | |
81 | ||
82 | extern void ppc64_enable_pmcs(void); | |
83 | ||
84 | static void power4_cpu_setup(void *unused) | |
85 | { | |
86 | unsigned int mmcr0 = mmcr0_val; | |
87 | unsigned long mmcra = mmcra_val; | |
88 | ||
89 | ppc64_enable_pmcs(); | |
90 | ||
91 | /* set the freeze bit */ | |
92 | mmcr0 |= MMCR0_FC; | |
93 | mtspr(SPRN_MMCR0, mmcr0); | |
94 | ||
95 | mmcr0 |= MMCR0_FCM1|MMCR0_PMXE|MMCR0_FCECE; | |
96 | mmcr0 |= MMCR0_PMC1CE|MMCR0_PMCjCE; | |
97 | mtspr(SPRN_MMCR0, mmcr0); | |
98 | ||
99 | mtspr(SPRN_MMCR1, mmcr1_val); | |
100 | ||
101 | mmcra |= MMCRA_SAMPLE_ENABLE; | |
102 | mtspr(SPRN_MMCRA, mmcra); | |
103 | ||
104 | dbg("setup on cpu %d, mmcr0 %lx\n", smp_processor_id(), | |
105 | mfspr(SPRN_MMCR0)); | |
106 | dbg("setup on cpu %d, mmcr1 %lx\n", smp_processor_id(), | |
107 | mfspr(SPRN_MMCR1)); | |
108 | dbg("setup on cpu %d, mmcra %lx\n", smp_processor_id(), | |
109 | mfspr(SPRN_MMCRA)); | |
110 | } | |
111 | ||
112 | static void power4_start(struct op_counter_config *ctr) | |
113 | { | |
114 | int i; | |
115 | unsigned int mmcr0; | |
116 | ||
117 | /* set the PMM bit (see comment below) */ | |
118 | mtmsrd(mfmsr() | MSR_PMM); | |
119 | ||
a6908cd0 | 120 | for (i = 0; i < cur_cpu_spec->num_pmcs; ++i) { |
1da177e4 LT |
121 | if (ctr[i].enabled) { |
122 | ctr_write(i, reset_value[i]); | |
123 | } else { | |
124 | ctr_write(i, 0); | |
125 | } | |
126 | } | |
127 | ||
128 | mmcr0 = mfspr(SPRN_MMCR0); | |
129 | ||
130 | /* | |
131 | * We must clear the PMAO bit on some (GQ) chips. Just do it | |
132 | * all the time | |
133 | */ | |
134 | mmcr0 &= ~MMCR0_PMAO; | |
135 | ||
136 | /* | |
137 | * now clear the freeze bit, counting will not start until we | |
138 | * rfid from this excetion, because only at that point will | |
139 | * the PMM bit be cleared | |
140 | */ | |
141 | mmcr0 &= ~MMCR0_FC; | |
142 | mtspr(SPRN_MMCR0, mmcr0); | |
143 | ||
144 | oprofile_running = 1; | |
145 | ||
146 | dbg("start on cpu %d, mmcr0 %x\n", smp_processor_id(), mmcr0); | |
147 | } | |
148 | ||
149 | static void power4_stop(void) | |
150 | { | |
151 | unsigned int mmcr0; | |
152 | ||
153 | /* freeze counters */ | |
154 | mmcr0 = mfspr(SPRN_MMCR0); | |
155 | mmcr0 |= MMCR0_FC; | |
156 | mtspr(SPRN_MMCR0, mmcr0); | |
157 | ||
158 | oprofile_running = 0; | |
159 | ||
160 | dbg("stop on cpu %d, mmcr0 %x\n", smp_processor_id(), mmcr0); | |
161 | ||
162 | mb(); | |
163 | } | |
164 | ||
165 | /* Fake functions used by canonicalize_pc */ | |
166 | static void __attribute_used__ hypervisor_bucket(void) | |
167 | { | |
168 | } | |
169 | ||
170 | static void __attribute_used__ rtas_bucket(void) | |
171 | { | |
172 | } | |
173 | ||
174 | static void __attribute_used__ kernel_unknown_bucket(void) | |
175 | { | |
176 | } | |
177 | ||
178 | static unsigned long check_spinlock_pc(struct pt_regs *regs, | |
179 | unsigned long profile_pc) | |
180 | { | |
181 | unsigned long pc = instruction_pointer(regs); | |
182 | ||
183 | /* | |
184 | * If both the SIAR (sampled instruction) and the perfmon exception | |
185 | * occurred in a spinlock region then we account the sample to the | |
186 | * calling function. This isnt 100% correct, we really need soft | |
187 | * IRQ disable so we always get the perfmon exception at the | |
188 | * point at which the SIAR is set. | |
189 | */ | |
190 | if (backtrace_spinlocks && in_lock_functions(pc) && | |
191 | in_lock_functions(profile_pc)) | |
192 | return regs->link; | |
193 | else | |
194 | return profile_pc; | |
195 | } | |
196 | ||
197 | /* | |
198 | * On GQ and newer the MMCRA stores the HV and PR bits at the time | |
199 | * the SIAR was sampled. We use that to work out if the SIAR was sampled in | |
200 | * the hypervisor, our exception vectors or RTAS. | |
201 | */ | |
202 | static unsigned long get_pc(struct pt_regs *regs) | |
203 | { | |
204 | unsigned long pc = mfspr(SPRN_SIAR); | |
205 | unsigned long mmcra; | |
206 | ||
207 | /* Cant do much about it */ | |
208 | if (!mmcra_has_sihv) | |
209 | return check_spinlock_pc(regs, pc); | |
210 | ||
211 | mmcra = mfspr(SPRN_MMCRA); | |
212 | ||
213 | /* Were we in the hypervisor? */ | |
214 | if ((systemcfg->platform == PLATFORM_PSERIES_LPAR) && | |
215 | (mmcra & MMCRA_SIHV)) | |
216 | /* function descriptor madness */ | |
217 | return *((unsigned long *)hypervisor_bucket); | |
218 | ||
219 | /* We were in userspace, nothing to do */ | |
220 | if (mmcra & MMCRA_SIPR) | |
221 | return pc; | |
222 | ||
223 | #ifdef CONFIG_PPC_RTAS | |
224 | /* Were we in RTAS? */ | |
225 | if (pc >= rtas.base && pc < (rtas.base + rtas.size)) | |
226 | /* function descriptor madness */ | |
227 | return *((unsigned long *)rtas_bucket); | |
228 | #endif | |
229 | ||
230 | /* Were we in our exception vectors or SLB real mode miss handler? */ | |
231 | if (pc < 0x1000000UL) | |
232 | return (unsigned long)__va(pc); | |
233 | ||
234 | /* Not sure where we were */ | |
235 | if (pc < KERNELBASE) | |
236 | /* function descriptor madness */ | |
237 | return *((unsigned long *)kernel_unknown_bucket); | |
238 | ||
239 | return check_spinlock_pc(regs, pc); | |
240 | } | |
241 | ||
242 | static int get_kernel(unsigned long pc) | |
243 | { | |
244 | int is_kernel; | |
245 | ||
246 | if (!mmcra_has_sihv) { | |
247 | is_kernel = (pc >= KERNELBASE); | |
248 | } else { | |
249 | unsigned long mmcra = mfspr(SPRN_MMCRA); | |
250 | is_kernel = ((mmcra & MMCRA_SIPR) == 0); | |
251 | } | |
252 | ||
253 | return is_kernel; | |
254 | } | |
255 | ||
256 | static void power4_handle_interrupt(struct pt_regs *regs, | |
257 | struct op_counter_config *ctr) | |
258 | { | |
259 | unsigned long pc; | |
260 | int is_kernel; | |
261 | int val; | |
262 | int i; | |
263 | unsigned int mmcr0; | |
264 | ||
265 | pc = get_pc(regs); | |
266 | is_kernel = get_kernel(pc); | |
267 | ||
268 | /* set the PMM bit (see comment below) */ | |
269 | mtmsrd(mfmsr() | MSR_PMM); | |
270 | ||
a6908cd0 | 271 | for (i = 0; i < cur_cpu_spec->num_pmcs; ++i) { |
1da177e4 LT |
272 | val = ctr_read(i); |
273 | if (val < 0) { | |
274 | if (oprofile_running && ctr[i].enabled) { | |
275 | oprofile_add_pc(pc, is_kernel, i); | |
276 | ctr_write(i, reset_value[i]); | |
277 | } else { | |
278 | ctr_write(i, 0); | |
279 | } | |
280 | } | |
281 | } | |
282 | ||
283 | mmcr0 = mfspr(SPRN_MMCR0); | |
284 | ||
285 | /* reset the perfmon trigger */ | |
286 | mmcr0 |= MMCR0_PMXE; | |
287 | ||
288 | /* | |
289 | * We must clear the PMAO bit on some (GQ) chips. Just do it | |
290 | * all the time | |
291 | */ | |
292 | mmcr0 &= ~MMCR0_PMAO; | |
293 | ||
294 | /* | |
295 | * now clear the freeze bit, counting will not start until we | |
296 | * rfid from this exception, because only at that point will | |
297 | * the PMM bit be cleared | |
298 | */ | |
299 | mmcr0 &= ~MMCR0_FC; | |
300 | mtspr(SPRN_MMCR0, mmcr0); | |
301 | } | |
302 | ||
a3e48c10 | 303 | struct op_powerpc_model op_model_power4 = { |
1da177e4 LT |
304 | .reg_setup = power4_reg_setup, |
305 | .cpu_setup = power4_cpu_setup, | |
306 | .start = power4_start, | |
307 | .stop = power4_stop, | |
308 | .handle_interrupt = power4_handle_interrupt, | |
309 | }; |