Commit | Line | Data |
---|---|---|
de0428a7 KW |
1 | #include <linux/perf_event.h> |
2 | #include <linux/types.h> | |
3 | ||
4 | #include <asm/perf_event.h> | |
5 | #include <asm/msr.h> | |
3e702ff6 | 6 | #include <asm/insn.h> |
de0428a7 KW |
7 | |
8 | #include "perf_event.h" | |
caff2bef PZ |
9 | |
10 | enum { | |
11 | LBR_FORMAT_32 = 0x00, | |
12 | LBR_FORMAT_LIP = 0x01, | |
13 | LBR_FORMAT_EIP = 0x02, | |
14 | LBR_FORMAT_EIP_FLAGS = 0x03, | |
135c5612 AK |
15 | LBR_FORMAT_EIP_FLAGS2 = 0x04, |
16 | LBR_FORMAT_MAX_KNOWN = LBR_FORMAT_EIP_FLAGS2, | |
17 | }; | |
18 | ||
19 | static enum { | |
20 | LBR_EIP_FLAGS = 1, | |
21 | LBR_TSX = 2, | |
22 | } lbr_desc[LBR_FORMAT_MAX_KNOWN + 1] = { | |
23 | [LBR_FORMAT_EIP_FLAGS] = LBR_EIP_FLAGS, | |
24 | [LBR_FORMAT_EIP_FLAGS2] = LBR_EIP_FLAGS | LBR_TSX, | |
caff2bef PZ |
25 | }; |
26 | ||
c5cc2cd9 SE |
27 | /* |
28 | * Intel LBR_SELECT bits | |
29 | * Intel Vol3a, April 2011, Section 16.7 Table 16-10 | |
30 | * | |
31 | * Hardware branch filter (not available on all CPUs) | |
32 | */ | |
33 | #define LBR_KERNEL_BIT 0 /* do not capture at ring0 */ | |
34 | #define LBR_USER_BIT 1 /* do not capture at ring > 0 */ | |
35 | #define LBR_JCC_BIT 2 /* do not capture conditional branches */ | |
36 | #define LBR_REL_CALL_BIT 3 /* do not capture relative calls */ | |
37 | #define LBR_IND_CALL_BIT 4 /* do not capture indirect calls */ | |
38 | #define LBR_RETURN_BIT 5 /* do not capture near returns */ | |
39 | #define LBR_IND_JMP_BIT 6 /* do not capture indirect jumps */ | |
40 | #define LBR_REL_JMP_BIT 7 /* do not capture relative jumps */ | |
41 | #define LBR_FAR_BIT 8 /* do not capture far branches */ | |
e9d7f7cd | 42 | #define LBR_CALL_STACK_BIT 9 /* enable call stack */ |
c5cc2cd9 SE |
43 | |
44 | #define LBR_KERNEL (1 << LBR_KERNEL_BIT) | |
45 | #define LBR_USER (1 << LBR_USER_BIT) | |
46 | #define LBR_JCC (1 << LBR_JCC_BIT) | |
47 | #define LBR_REL_CALL (1 << LBR_REL_CALL_BIT) | |
48 | #define LBR_IND_CALL (1 << LBR_IND_CALL_BIT) | |
49 | #define LBR_RETURN (1 << LBR_RETURN_BIT) | |
50 | #define LBR_REL_JMP (1 << LBR_REL_JMP_BIT) | |
51 | #define LBR_IND_JMP (1 << LBR_IND_JMP_BIT) | |
52 | #define LBR_FAR (1 << LBR_FAR_BIT) | |
e9d7f7cd | 53 | #define LBR_CALL_STACK (1 << LBR_CALL_STACK_BIT) |
c5cc2cd9 SE |
54 | |
55 | #define LBR_PLM (LBR_KERNEL | LBR_USER) | |
56 | ||
57 | #define LBR_SEL_MASK 0x1ff /* valid bits in LBR_SELECT */ | |
58 | #define LBR_NOT_SUPP -1 /* LBR filter not supported */ | |
59 | #define LBR_IGN 0 /* ignored */ | |
60 | ||
61 | #define LBR_ANY \ | |
62 | (LBR_JCC |\ | |
63 | LBR_REL_CALL |\ | |
64 | LBR_IND_CALL |\ | |
65 | LBR_RETURN |\ | |
66 | LBR_REL_JMP |\ | |
67 | LBR_IND_JMP |\ | |
68 | LBR_FAR) | |
69 | ||
70 | #define LBR_FROM_FLAG_MISPRED (1ULL << 63) | |
135c5612 AK |
71 | #define LBR_FROM_FLAG_IN_TX (1ULL << 62) |
72 | #define LBR_FROM_FLAG_ABORT (1ULL << 61) | |
c5cc2cd9 | 73 | |
3e702ff6 SE |
74 | /* |
75 | * x86control flow change classification | |
76 | * x86control flow changes include branches, interrupts, traps, faults | |
77 | */ | |
78 | enum { | |
e9d7f7cd YZ |
79 | X86_BR_NONE = 0, /* unknown */ |
80 | ||
81 | X86_BR_USER = 1 << 0, /* branch target is user */ | |
82 | X86_BR_KERNEL = 1 << 1, /* branch target is kernel */ | |
83 | ||
84 | X86_BR_CALL = 1 << 2, /* call */ | |
85 | X86_BR_RET = 1 << 3, /* return */ | |
86 | X86_BR_SYSCALL = 1 << 4, /* syscall */ | |
87 | X86_BR_SYSRET = 1 << 5, /* syscall return */ | |
88 | X86_BR_INT = 1 << 6, /* sw interrupt */ | |
89 | X86_BR_IRET = 1 << 7, /* return from interrupt */ | |
90 | X86_BR_JCC = 1 << 8, /* conditional */ | |
91 | X86_BR_JMP = 1 << 9, /* jump */ | |
92 | X86_BR_IRQ = 1 << 10,/* hw interrupt or trap or fault */ | |
93 | X86_BR_IND_CALL = 1 << 11,/* indirect calls */ | |
94 | X86_BR_ABORT = 1 << 12,/* transaction abort */ | |
95 | X86_BR_IN_TX = 1 << 13,/* in transaction */ | |
96 | X86_BR_NO_TX = 1 << 14,/* not in transaction */ | |
97 | X86_BR_CALL_STACK = 1 << 15,/* call stack */ | |
3e702ff6 SE |
98 | }; |
99 | ||
100 | #define X86_BR_PLM (X86_BR_USER | X86_BR_KERNEL) | |
135c5612 | 101 | #define X86_BR_ANYTX (X86_BR_NO_TX | X86_BR_IN_TX) |
3e702ff6 SE |
102 | |
103 | #define X86_BR_ANY \ | |
104 | (X86_BR_CALL |\ | |
105 | X86_BR_RET |\ | |
106 | X86_BR_SYSCALL |\ | |
107 | X86_BR_SYSRET |\ | |
108 | X86_BR_INT |\ | |
109 | X86_BR_IRET |\ | |
110 | X86_BR_JCC |\ | |
111 | X86_BR_JMP |\ | |
112 | X86_BR_IRQ |\ | |
135c5612 | 113 | X86_BR_ABORT |\ |
3e702ff6 SE |
114 | X86_BR_IND_CALL) |
115 | ||
116 | #define X86_BR_ALL (X86_BR_PLM | X86_BR_ANY) | |
117 | ||
118 | #define X86_BR_ANY_CALL \ | |
119 | (X86_BR_CALL |\ | |
120 | X86_BR_IND_CALL |\ | |
121 | X86_BR_SYSCALL |\ | |
122 | X86_BR_IRQ |\ | |
123 | X86_BR_INT) | |
124 | ||
125 | static void intel_pmu_lbr_filter(struct cpu_hw_events *cpuc); | |
126 | ||
caff2bef PZ |
127 | /* |
128 | * We only support LBR implementations that have FREEZE_LBRS_ON_PMI | |
129 | * otherwise it becomes near impossible to get a reliable stack. | |
130 | */ | |
131 | ||
caff2bef PZ |
132 | static void __intel_pmu_lbr_enable(void) |
133 | { | |
89cbc767 | 134 | struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); |
2c70d008 | 135 | u64 debugctl, lbr_select = 0; |
60ce0fbd | 136 | |
2c70d008 YZ |
137 | if (cpuc->lbr_sel) { |
138 | lbr_select = cpuc->lbr_sel->config; | |
139 | wrmsrl(MSR_LBR_SELECT, lbr_select); | |
140 | } | |
caff2bef PZ |
141 | |
142 | rdmsrl(MSR_IA32_DEBUGCTLMSR, debugctl); | |
2c70d008 YZ |
143 | debugctl |= DEBUGCTLMSR_LBR; |
144 | /* | |
145 | * LBR callstack does not work well with FREEZE_LBRS_ON_PMI. | |
146 | * If FREEZE_LBRS_ON_PMI is set, PMI near call/return instructions | |
147 | * may cause superfluous increase/decrease of LBR_TOS. | |
148 | */ | |
149 | if (!(lbr_select & LBR_CALL_STACK)) | |
150 | debugctl |= DEBUGCTLMSR_FREEZE_LBRS_ON_PMI; | |
caff2bef PZ |
151 | wrmsrl(MSR_IA32_DEBUGCTLMSR, debugctl); |
152 | } | |
153 | ||
154 | static void __intel_pmu_lbr_disable(void) | |
155 | { | |
156 | u64 debugctl; | |
157 | ||
158 | rdmsrl(MSR_IA32_DEBUGCTLMSR, debugctl); | |
7c5ecaf7 | 159 | debugctl &= ~(DEBUGCTLMSR_LBR | DEBUGCTLMSR_FREEZE_LBRS_ON_PMI); |
caff2bef PZ |
160 | wrmsrl(MSR_IA32_DEBUGCTLMSR, debugctl); |
161 | } | |
162 | ||
163 | static void intel_pmu_lbr_reset_32(void) | |
164 | { | |
165 | int i; | |
166 | ||
167 | for (i = 0; i < x86_pmu.lbr_nr; i++) | |
168 | wrmsrl(x86_pmu.lbr_from + i, 0); | |
169 | } | |
170 | ||
171 | static void intel_pmu_lbr_reset_64(void) | |
172 | { | |
173 | int i; | |
174 | ||
175 | for (i = 0; i < x86_pmu.lbr_nr; i++) { | |
176 | wrmsrl(x86_pmu.lbr_from + i, 0); | |
177 | wrmsrl(x86_pmu.lbr_to + i, 0); | |
178 | } | |
179 | } | |
180 | ||
de0428a7 | 181 | void intel_pmu_lbr_reset(void) |
caff2bef | 182 | { |
74846d35 PZ |
183 | if (!x86_pmu.lbr_nr) |
184 | return; | |
185 | ||
8db909a7 | 186 | if (x86_pmu.intel_cap.lbr_format == LBR_FORMAT_32) |
caff2bef PZ |
187 | intel_pmu_lbr_reset_32(); |
188 | else | |
189 | intel_pmu_lbr_reset_64(); | |
190 | } | |
191 | ||
76cb2c61 YZ |
192 | /* |
193 | * TOS = most recently recorded branch | |
194 | */ | |
195 | static inline u64 intel_pmu_lbr_tos(void) | |
196 | { | |
197 | u64 tos; | |
198 | ||
199 | rdmsrl(x86_pmu.lbr_tos, tos); | |
200 | return tos; | |
201 | } | |
202 | ||
203 | enum { | |
204 | LBR_NONE, | |
205 | LBR_VALID, | |
206 | }; | |
207 | ||
208 | static void __intel_pmu_lbr_restore(struct x86_perf_task_context *task_ctx) | |
209 | { | |
210 | int i; | |
211 | unsigned lbr_idx, mask; | |
212 | u64 tos; | |
213 | ||
214 | if (task_ctx->lbr_callstack_users == 0 || | |
215 | task_ctx->lbr_stack_state == LBR_NONE) { | |
216 | intel_pmu_lbr_reset(); | |
217 | return; | |
218 | } | |
219 | ||
220 | mask = x86_pmu.lbr_nr - 1; | |
221 | tos = intel_pmu_lbr_tos(); | |
222 | for (i = 0; i < x86_pmu.lbr_nr; i++) { | |
223 | lbr_idx = (tos - i) & mask; | |
224 | wrmsrl(x86_pmu.lbr_from + lbr_idx, task_ctx->lbr_from[i]); | |
225 | wrmsrl(x86_pmu.lbr_to + lbr_idx, task_ctx->lbr_to[i]); | |
226 | } | |
227 | task_ctx->lbr_stack_state = LBR_NONE; | |
228 | } | |
229 | ||
230 | static void __intel_pmu_lbr_save(struct x86_perf_task_context *task_ctx) | |
231 | { | |
232 | int i; | |
233 | unsigned lbr_idx, mask; | |
234 | u64 tos; | |
235 | ||
236 | if (task_ctx->lbr_callstack_users == 0) { | |
237 | task_ctx->lbr_stack_state = LBR_NONE; | |
238 | return; | |
239 | } | |
240 | ||
241 | mask = x86_pmu.lbr_nr - 1; | |
242 | tos = intel_pmu_lbr_tos(); | |
243 | for (i = 0; i < x86_pmu.lbr_nr; i++) { | |
244 | lbr_idx = (tos - i) & mask; | |
245 | rdmsrl(x86_pmu.lbr_from + lbr_idx, task_ctx->lbr_from[i]); | |
246 | rdmsrl(x86_pmu.lbr_to + lbr_idx, task_ctx->lbr_to[i]); | |
247 | } | |
248 | task_ctx->lbr_stack_state = LBR_VALID; | |
249 | } | |
250 | ||
2a0ad3b3 YZ |
251 | void intel_pmu_lbr_sched_task(struct perf_event_context *ctx, bool sched_in) |
252 | { | |
253 | struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); | |
76cb2c61 | 254 | struct x86_perf_task_context *task_ctx; |
2a0ad3b3 YZ |
255 | |
256 | if (!x86_pmu.lbr_nr) | |
257 | return; | |
258 | ||
76cb2c61 YZ |
259 | /* |
260 | * If LBR callstack feature is enabled and the stack was saved when | |
261 | * the task was scheduled out, restore the stack. Otherwise flush | |
262 | * the LBR stack. | |
263 | */ | |
264 | task_ctx = ctx ? ctx->task_ctx_data : NULL; | |
265 | if (task_ctx) { | |
266 | if (sched_in) { | |
267 | __intel_pmu_lbr_restore(task_ctx); | |
268 | cpuc->lbr_context = ctx; | |
269 | } else { | |
270 | __intel_pmu_lbr_save(task_ctx); | |
271 | } | |
272 | return; | |
273 | } | |
274 | ||
2a0ad3b3 YZ |
275 | /* |
276 | * When sampling the branck stack in system-wide, it may be | |
277 | * necessary to flush the stack on context switch. This happens | |
278 | * when the branch stack does not tag its entries with the pid | |
279 | * of the current task. Otherwise it becomes impossible to | |
280 | * associate a branch entry with a task. This ambiguity is more | |
281 | * likely to appear when the branch stack supports priv level | |
282 | * filtering and the user sets it to monitor only at the user | |
283 | * level (which could be a useful measurement in system-wide | |
284 | * mode). In that case, the risk is high of having a branch | |
285 | * stack with branch from multiple tasks. | |
286 | */ | |
287 | if (sched_in) { | |
288 | intel_pmu_lbr_reset(); | |
289 | cpuc->lbr_context = ctx; | |
290 | } | |
291 | } | |
292 | ||
63f0c1d8 YZ |
293 | static inline bool branch_user_callstack(unsigned br_sel) |
294 | { | |
295 | return (br_sel & X86_BR_USER) && (br_sel & X86_BR_CALL_STACK); | |
296 | } | |
297 | ||
de0428a7 | 298 | void intel_pmu_lbr_enable(struct perf_event *event) |
caff2bef | 299 | { |
89cbc767 | 300 | struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); |
63f0c1d8 | 301 | struct x86_perf_task_context *task_ctx; |
caff2bef PZ |
302 | |
303 | if (!x86_pmu.lbr_nr) | |
304 | return; | |
305 | ||
caff2bef | 306 | /* |
b83a46e7 PZ |
307 | * Reset the LBR stack if we changed task context to |
308 | * avoid data leaks. | |
caff2bef | 309 | */ |
b83a46e7 | 310 | if (event->ctx->task && cpuc->lbr_context != event->ctx) { |
caff2bef PZ |
311 | intel_pmu_lbr_reset(); |
312 | cpuc->lbr_context = event->ctx; | |
313 | } | |
3e702ff6 | 314 | cpuc->br_sel = event->hw.branch_reg.reg; |
caff2bef | 315 | |
63f0c1d8 YZ |
316 | if (branch_user_callstack(cpuc->br_sel) && event->ctx && |
317 | event->ctx->task_ctx_data) { | |
318 | task_ctx = event->ctx->task_ctx_data; | |
319 | task_ctx->lbr_callstack_users++; | |
320 | } | |
321 | ||
caff2bef | 322 | cpuc->lbr_users++; |
2a0ad3b3 | 323 | perf_sched_cb_inc(event->ctx->pmu); |
caff2bef PZ |
324 | } |
325 | ||
de0428a7 | 326 | void intel_pmu_lbr_disable(struct perf_event *event) |
caff2bef | 327 | { |
89cbc767 | 328 | struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); |
63f0c1d8 | 329 | struct x86_perf_task_context *task_ctx; |
caff2bef PZ |
330 | |
331 | if (!x86_pmu.lbr_nr) | |
332 | return; | |
333 | ||
63f0c1d8 YZ |
334 | if (branch_user_callstack(cpuc->br_sel) && event->ctx && |
335 | event->ctx->task_ctx_data) { | |
336 | task_ctx = event->ctx->task_ctx_data; | |
337 | task_ctx->lbr_callstack_users--; | |
338 | } | |
339 | ||
caff2bef | 340 | cpuc->lbr_users--; |
b83a46e7 | 341 | WARN_ON_ONCE(cpuc->lbr_users < 0); |
2a0ad3b3 | 342 | perf_sched_cb_dec(event->ctx->pmu); |
2df202bf | 343 | |
60ce0fbd | 344 | if (cpuc->enabled && !cpuc->lbr_users) { |
2df202bf | 345 | __intel_pmu_lbr_disable(); |
60ce0fbd SE |
346 | /* avoid stale pointer */ |
347 | cpuc->lbr_context = NULL; | |
348 | } | |
caff2bef PZ |
349 | } |
350 | ||
de0428a7 | 351 | void intel_pmu_lbr_enable_all(void) |
caff2bef | 352 | { |
89cbc767 | 353 | struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); |
caff2bef PZ |
354 | |
355 | if (cpuc->lbr_users) | |
356 | __intel_pmu_lbr_enable(); | |
357 | } | |
358 | ||
de0428a7 | 359 | void intel_pmu_lbr_disable_all(void) |
caff2bef | 360 | { |
89cbc767 | 361 | struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); |
caff2bef PZ |
362 | |
363 | if (cpuc->lbr_users) | |
364 | __intel_pmu_lbr_disable(); | |
365 | } | |
366 | ||
caff2bef PZ |
367 | static void intel_pmu_lbr_read_32(struct cpu_hw_events *cpuc) |
368 | { | |
369 | unsigned long mask = x86_pmu.lbr_nr - 1; | |
370 | u64 tos = intel_pmu_lbr_tos(); | |
371 | int i; | |
372 | ||
63fb3f9b | 373 | for (i = 0; i < x86_pmu.lbr_nr; i++) { |
caff2bef PZ |
374 | unsigned long lbr_idx = (tos - i) & mask; |
375 | union { | |
376 | struct { | |
377 | u32 from; | |
378 | u32 to; | |
379 | }; | |
380 | u64 lbr; | |
381 | } msr_lastbranch; | |
382 | ||
383 | rdmsrl(x86_pmu.lbr_from + lbr_idx, msr_lastbranch.lbr); | |
384 | ||
bce38cd5 SE |
385 | cpuc->lbr_entries[i].from = msr_lastbranch.from; |
386 | cpuc->lbr_entries[i].to = msr_lastbranch.to; | |
387 | cpuc->lbr_entries[i].mispred = 0; | |
388 | cpuc->lbr_entries[i].predicted = 0; | |
389 | cpuc->lbr_entries[i].reserved = 0; | |
caff2bef PZ |
390 | } |
391 | cpuc->lbr_stack.nr = i; | |
392 | } | |
393 | ||
caff2bef PZ |
394 | /* |
395 | * Due to lack of segmentation in Linux the effective address (offset) | |
396 | * is the same as the linear address, allowing us to merge the LIP and EIP | |
397 | * LBR formats. | |
398 | */ | |
399 | static void intel_pmu_lbr_read_64(struct cpu_hw_events *cpuc) | |
400 | { | |
401 | unsigned long mask = x86_pmu.lbr_nr - 1; | |
8db909a7 | 402 | int lbr_format = x86_pmu.intel_cap.lbr_format; |
caff2bef PZ |
403 | u64 tos = intel_pmu_lbr_tos(); |
404 | int i; | |
b7af41a1 | 405 | int out = 0; |
caff2bef | 406 | |
63fb3f9b | 407 | for (i = 0; i < x86_pmu.lbr_nr; i++) { |
caff2bef | 408 | unsigned long lbr_idx = (tos - i) & mask; |
135c5612 AK |
409 | u64 from, to, mis = 0, pred = 0, in_tx = 0, abort = 0; |
410 | int skip = 0; | |
411 | int lbr_flags = lbr_desc[lbr_format]; | |
caff2bef PZ |
412 | |
413 | rdmsrl(x86_pmu.lbr_from + lbr_idx, from); | |
414 | rdmsrl(x86_pmu.lbr_to + lbr_idx, to); | |
415 | ||
135c5612 | 416 | if (lbr_flags & LBR_EIP_FLAGS) { |
bce38cd5 SE |
417 | mis = !!(from & LBR_FROM_FLAG_MISPRED); |
418 | pred = !mis; | |
135c5612 AK |
419 | skip = 1; |
420 | } | |
421 | if (lbr_flags & LBR_TSX) { | |
422 | in_tx = !!(from & LBR_FROM_FLAG_IN_TX); | |
423 | abort = !!(from & LBR_FROM_FLAG_ABORT); | |
424 | skip = 3; | |
caff2bef | 425 | } |
135c5612 | 426 | from = (u64)((((s64)from) << skip) >> skip); |
caff2bef | 427 | |
b7af41a1 AK |
428 | /* |
429 | * Some CPUs report duplicated abort records, | |
430 | * with the second entry not having an abort bit set. | |
431 | * Skip them here. This loop runs backwards, | |
432 | * so we need to undo the previous record. | |
433 | * If the abort just happened outside the window | |
434 | * the extra entry cannot be removed. | |
435 | */ | |
436 | if (abort && x86_pmu.lbr_double_abort && out > 0) | |
437 | out--; | |
438 | ||
439 | cpuc->lbr_entries[out].from = from; | |
440 | cpuc->lbr_entries[out].to = to; | |
441 | cpuc->lbr_entries[out].mispred = mis; | |
442 | cpuc->lbr_entries[out].predicted = pred; | |
443 | cpuc->lbr_entries[out].in_tx = in_tx; | |
444 | cpuc->lbr_entries[out].abort = abort; | |
445 | cpuc->lbr_entries[out].reserved = 0; | |
446 | out++; | |
caff2bef | 447 | } |
b7af41a1 | 448 | cpuc->lbr_stack.nr = out; |
caff2bef PZ |
449 | } |
450 | ||
de0428a7 | 451 | void intel_pmu_lbr_read(void) |
caff2bef | 452 | { |
89cbc767 | 453 | struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); |
caff2bef PZ |
454 | |
455 | if (!cpuc->lbr_users) | |
456 | return; | |
457 | ||
8db909a7 | 458 | if (x86_pmu.intel_cap.lbr_format == LBR_FORMAT_32) |
caff2bef PZ |
459 | intel_pmu_lbr_read_32(cpuc); |
460 | else | |
461 | intel_pmu_lbr_read_64(cpuc); | |
3e702ff6 SE |
462 | |
463 | intel_pmu_lbr_filter(cpuc); | |
464 | } | |
465 | ||
466 | /* | |
467 | * SW filter is used: | |
468 | * - in case there is no HW filter | |
469 | * - in case the HW filter has errata or limitations | |
470 | */ | |
e9d7f7cd | 471 | static int intel_pmu_setup_sw_lbr_filter(struct perf_event *event) |
3e702ff6 SE |
472 | { |
473 | u64 br_type = event->attr.branch_sample_type; | |
474 | int mask = 0; | |
475 | ||
476 | if (br_type & PERF_SAMPLE_BRANCH_USER) | |
477 | mask |= X86_BR_USER; | |
478 | ||
2b923c8f | 479 | if (br_type & PERF_SAMPLE_BRANCH_KERNEL) |
3e702ff6 SE |
480 | mask |= X86_BR_KERNEL; |
481 | ||
482 | /* we ignore BRANCH_HV here */ | |
483 | ||
484 | if (br_type & PERF_SAMPLE_BRANCH_ANY) | |
485 | mask |= X86_BR_ANY; | |
486 | ||
487 | if (br_type & PERF_SAMPLE_BRANCH_ANY_CALL) | |
488 | mask |= X86_BR_ANY_CALL; | |
489 | ||
490 | if (br_type & PERF_SAMPLE_BRANCH_ANY_RETURN) | |
491 | mask |= X86_BR_RET | X86_BR_IRET | X86_BR_SYSRET; | |
492 | ||
493 | if (br_type & PERF_SAMPLE_BRANCH_IND_CALL) | |
494 | mask |= X86_BR_IND_CALL; | |
135c5612 AK |
495 | |
496 | if (br_type & PERF_SAMPLE_BRANCH_ABORT_TX) | |
497 | mask |= X86_BR_ABORT; | |
498 | ||
499 | if (br_type & PERF_SAMPLE_BRANCH_IN_TX) | |
500 | mask |= X86_BR_IN_TX; | |
501 | ||
502 | if (br_type & PERF_SAMPLE_BRANCH_NO_TX) | |
503 | mask |= X86_BR_NO_TX; | |
504 | ||
37548914 AK |
505 | if (br_type & PERF_SAMPLE_BRANCH_COND) |
506 | mask |= X86_BR_JCC; | |
507 | ||
e9d7f7cd YZ |
508 | if (br_type & PERF_SAMPLE_BRANCH_CALL_STACK) { |
509 | if (!x86_pmu_has_lbr_callstack()) | |
510 | return -EOPNOTSUPP; | |
511 | if (mask & ~(X86_BR_USER | X86_BR_KERNEL)) | |
512 | return -EINVAL; | |
513 | mask |= X86_BR_CALL | X86_BR_IND_CALL | X86_BR_RET | | |
514 | X86_BR_CALL_STACK; | |
515 | } | |
516 | ||
3e702ff6 SE |
517 | /* |
518 | * stash actual user request into reg, it may | |
519 | * be used by fixup code for some CPU | |
520 | */ | |
521 | event->hw.branch_reg.reg = mask; | |
e9d7f7cd | 522 | return 0; |
caff2bef PZ |
523 | } |
524 | ||
60ce0fbd SE |
525 | /* |
526 | * setup the HW LBR filter | |
527 | * Used only when available, may not be enough to disambiguate | |
528 | * all branches, may need the help of the SW filter | |
529 | */ | |
530 | static int intel_pmu_setup_hw_lbr_filter(struct perf_event *event) | |
531 | { | |
532 | struct hw_perf_event_extra *reg; | |
533 | u64 br_type = event->attr.branch_sample_type; | |
27ac905b YZ |
534 | u64 mask = 0, v; |
535 | int i; | |
60ce0fbd | 536 | |
27ac905b YZ |
537 | for (i = 0; i < PERF_SAMPLE_BRANCH_SELECT_MAP_SIZE; i++) { |
538 | if (!(br_type & (1ULL << i))) | |
60ce0fbd SE |
539 | continue; |
540 | ||
27ac905b | 541 | v = x86_pmu.lbr_sel_map[i]; |
60ce0fbd SE |
542 | if (v == LBR_NOT_SUPP) |
543 | return -EOPNOTSUPP; | |
60ce0fbd | 544 | |
3e702ff6 SE |
545 | if (v != LBR_IGN) |
546 | mask |= v; | |
60ce0fbd SE |
547 | } |
548 | reg = &event->hw.branch_reg; | |
549 | reg->idx = EXTRA_REG_LBR; | |
550 | ||
e9d7f7cd YZ |
551 | /* |
552 | * The first 9 bits (LBR_SEL_MASK) in LBR_SELECT operate | |
553 | * in suppress mode. So LBR_SELECT should be set to | |
554 | * (~mask & LBR_SEL_MASK) | (mask & ~LBR_SEL_MASK) | |
555 | */ | |
556 | reg->config = mask ^ x86_pmu.lbr_sel_mask; | |
60ce0fbd SE |
557 | |
558 | return 0; | |
559 | } | |
560 | ||
60ce0fbd SE |
561 | int intel_pmu_setup_lbr_filter(struct perf_event *event) |
562 | { | |
3e702ff6 | 563 | int ret = 0; |
60ce0fbd SE |
564 | |
565 | /* | |
566 | * no LBR on this PMU | |
567 | */ | |
568 | if (!x86_pmu.lbr_nr) | |
569 | return -EOPNOTSUPP; | |
570 | ||
571 | /* | |
3e702ff6 | 572 | * setup SW LBR filter |
60ce0fbd | 573 | */ |
e9d7f7cd YZ |
574 | ret = intel_pmu_setup_sw_lbr_filter(event); |
575 | if (ret) | |
576 | return ret; | |
3e702ff6 SE |
577 | |
578 | /* | |
579 | * setup HW LBR filter, if any | |
580 | */ | |
581 | if (x86_pmu.lbr_sel_map) | |
582 | ret = intel_pmu_setup_hw_lbr_filter(event); | |
583 | ||
584 | return ret; | |
585 | } | |
586 | ||
587 | /* | |
588 | * return the type of control flow change at address "from" | |
589 | * intruction is not necessarily a branch (in case of interrupt). | |
590 | * | |
591 | * The branch type returned also includes the priv level of the | |
592 | * target of the control flow change (X86_BR_USER, X86_BR_KERNEL). | |
593 | * | |
594 | * If a branch type is unknown OR the instruction cannot be | |
595 | * decoded (e.g., text page not present), then X86_BR_NONE is | |
596 | * returned. | |
597 | */ | |
135c5612 | 598 | static int branch_type(unsigned long from, unsigned long to, int abort) |
3e702ff6 SE |
599 | { |
600 | struct insn insn; | |
601 | void *addr; | |
6ba48ff4 | 602 | int bytes_read, bytes_left; |
3e702ff6 SE |
603 | int ret = X86_BR_NONE; |
604 | int ext, to_plm, from_plm; | |
605 | u8 buf[MAX_INSN_SIZE]; | |
606 | int is64 = 0; | |
607 | ||
608 | to_plm = kernel_ip(to) ? X86_BR_KERNEL : X86_BR_USER; | |
609 | from_plm = kernel_ip(from) ? X86_BR_KERNEL : X86_BR_USER; | |
610 | ||
611 | /* | |
612 | * maybe zero if lbr did not fill up after a reset by the time | |
613 | * we get a PMU interrupt | |
614 | */ | |
615 | if (from == 0 || to == 0) | |
616 | return X86_BR_NONE; | |
617 | ||
135c5612 AK |
618 | if (abort) |
619 | return X86_BR_ABORT | to_plm; | |
620 | ||
3e702ff6 SE |
621 | if (from_plm == X86_BR_USER) { |
622 | /* | |
623 | * can happen if measuring at the user level only | |
624 | * and we interrupt in a kernel thread, e.g., idle. | |
625 | */ | |
626 | if (!current->mm) | |
627 | return X86_BR_NONE; | |
628 | ||
629 | /* may fail if text not present */ | |
6ba48ff4 DH |
630 | bytes_left = copy_from_user_nmi(buf, (void __user *)from, |
631 | MAX_INSN_SIZE); | |
632 | bytes_read = MAX_INSN_SIZE - bytes_left; | |
633 | if (!bytes_read) | |
3e702ff6 SE |
634 | return X86_BR_NONE; |
635 | ||
636 | addr = buf; | |
6e15eb3b PZ |
637 | } else { |
638 | /* | |
639 | * The LBR logs any address in the IP, even if the IP just | |
640 | * faulted. This means userspace can control the from address. | |
641 | * Ensure we don't blindy read any address by validating it is | |
642 | * a known text address. | |
643 | */ | |
6ba48ff4 | 644 | if (kernel_text_address(from)) { |
6e15eb3b | 645 | addr = (void *)from; |
6ba48ff4 DH |
646 | /* |
647 | * Assume we can get the maximum possible size | |
648 | * when grabbing kernel data. This is not | |
649 | * _strictly_ true since we could possibly be | |
650 | * executing up next to a memory hole, but | |
651 | * it is very unlikely to be a problem. | |
652 | */ | |
653 | bytes_read = MAX_INSN_SIZE; | |
654 | } else { | |
6e15eb3b | 655 | return X86_BR_NONE; |
6ba48ff4 | 656 | } |
6e15eb3b | 657 | } |
3e702ff6 SE |
658 | |
659 | /* | |
660 | * decoder needs to know the ABI especially | |
661 | * on 64-bit systems running 32-bit apps | |
662 | */ | |
663 | #ifdef CONFIG_X86_64 | |
664 | is64 = kernel_ip((unsigned long)addr) || !test_thread_flag(TIF_IA32); | |
665 | #endif | |
6ba48ff4 | 666 | insn_init(&insn, addr, bytes_read, is64); |
3e702ff6 | 667 | insn_get_opcode(&insn); |
6ba48ff4 DH |
668 | if (!insn.opcode.got) |
669 | return X86_BR_ABORT; | |
3e702ff6 SE |
670 | |
671 | switch (insn.opcode.bytes[0]) { | |
672 | case 0xf: | |
673 | switch (insn.opcode.bytes[1]) { | |
674 | case 0x05: /* syscall */ | |
675 | case 0x34: /* sysenter */ | |
676 | ret = X86_BR_SYSCALL; | |
677 | break; | |
678 | case 0x07: /* sysret */ | |
679 | case 0x35: /* sysexit */ | |
680 | ret = X86_BR_SYSRET; | |
681 | break; | |
682 | case 0x80 ... 0x8f: /* conditional */ | |
683 | ret = X86_BR_JCC; | |
684 | break; | |
685 | default: | |
686 | ret = X86_BR_NONE; | |
687 | } | |
688 | break; | |
689 | case 0x70 ... 0x7f: /* conditional */ | |
690 | ret = X86_BR_JCC; | |
691 | break; | |
692 | case 0xc2: /* near ret */ | |
693 | case 0xc3: /* near ret */ | |
694 | case 0xca: /* far ret */ | |
695 | case 0xcb: /* far ret */ | |
696 | ret = X86_BR_RET; | |
697 | break; | |
698 | case 0xcf: /* iret */ | |
699 | ret = X86_BR_IRET; | |
700 | break; | |
701 | case 0xcc ... 0xce: /* int */ | |
702 | ret = X86_BR_INT; | |
703 | break; | |
704 | case 0xe8: /* call near rel */ | |
705 | case 0x9a: /* call far absolute */ | |
706 | ret = X86_BR_CALL; | |
707 | break; | |
708 | case 0xe0 ... 0xe3: /* loop jmp */ | |
709 | ret = X86_BR_JCC; | |
710 | break; | |
711 | case 0xe9 ... 0xeb: /* jmp */ | |
712 | ret = X86_BR_JMP; | |
713 | break; | |
714 | case 0xff: /* call near absolute, call far absolute ind */ | |
715 | insn_get_modrm(&insn); | |
716 | ext = (insn.modrm.bytes[0] >> 3) & 0x7; | |
717 | switch (ext) { | |
718 | case 2: /* near ind call */ | |
719 | case 3: /* far ind call */ | |
720 | ret = X86_BR_IND_CALL; | |
721 | break; | |
722 | case 4: | |
723 | case 5: | |
724 | ret = X86_BR_JMP; | |
725 | break; | |
726 | } | |
727 | break; | |
728 | default: | |
729 | ret = X86_BR_NONE; | |
60ce0fbd SE |
730 | } |
731 | /* | |
3e702ff6 SE |
732 | * interrupts, traps, faults (and thus ring transition) may |
733 | * occur on any instructions. Thus, to classify them correctly, | |
734 | * we need to first look at the from and to priv levels. If they | |
735 | * are different and to is in the kernel, then it indicates | |
736 | * a ring transition. If the from instruction is not a ring | |
737 | * transition instr (syscall, systenter, int), then it means | |
738 | * it was a irq, trap or fault. | |
739 | * | |
740 | * we have no way of detecting kernel to kernel faults. | |
741 | */ | |
742 | if (from_plm == X86_BR_USER && to_plm == X86_BR_KERNEL | |
743 | && ret != X86_BR_SYSCALL && ret != X86_BR_INT) | |
744 | ret = X86_BR_IRQ; | |
745 | ||
746 | /* | |
747 | * branch priv level determined by target as | |
748 | * is done by HW when LBR_SELECT is implemented | |
60ce0fbd | 749 | */ |
3e702ff6 SE |
750 | if (ret != X86_BR_NONE) |
751 | ret |= to_plm; | |
60ce0fbd | 752 | |
3e702ff6 SE |
753 | return ret; |
754 | } | |
755 | ||
756 | /* | |
757 | * implement actual branch filter based on user demand. | |
758 | * Hardware may not exactly satisfy that request, thus | |
759 | * we need to inspect opcodes. Mismatched branches are | |
760 | * discarded. Therefore, the number of branches returned | |
761 | * in PERF_SAMPLE_BRANCH_STACK sample may vary. | |
762 | */ | |
763 | static void | |
764 | intel_pmu_lbr_filter(struct cpu_hw_events *cpuc) | |
765 | { | |
766 | u64 from, to; | |
767 | int br_sel = cpuc->br_sel; | |
768 | int i, j, type; | |
769 | bool compress = false; | |
770 | ||
771 | /* if sampling all branches, then nothing to filter */ | |
772 | if ((br_sel & X86_BR_ALL) == X86_BR_ALL) | |
773 | return; | |
774 | ||
775 | for (i = 0; i < cpuc->lbr_stack.nr; i++) { | |
776 | ||
777 | from = cpuc->lbr_entries[i].from; | |
778 | to = cpuc->lbr_entries[i].to; | |
779 | ||
135c5612 AK |
780 | type = branch_type(from, to, cpuc->lbr_entries[i].abort); |
781 | if (type != X86_BR_NONE && (br_sel & X86_BR_ANYTX)) { | |
782 | if (cpuc->lbr_entries[i].in_tx) | |
783 | type |= X86_BR_IN_TX; | |
784 | else | |
785 | type |= X86_BR_NO_TX; | |
786 | } | |
3e702ff6 SE |
787 | |
788 | /* if type does not correspond, then discard */ | |
789 | if (type == X86_BR_NONE || (br_sel & type) != type) { | |
790 | cpuc->lbr_entries[i].from = 0; | |
791 | compress = true; | |
792 | } | |
793 | } | |
794 | ||
795 | if (!compress) | |
796 | return; | |
797 | ||
798 | /* remove all entries with from=0 */ | |
799 | for (i = 0; i < cpuc->lbr_stack.nr; ) { | |
800 | if (!cpuc->lbr_entries[i].from) { | |
801 | j = i; | |
802 | while (++j < cpuc->lbr_stack.nr) | |
803 | cpuc->lbr_entries[j-1] = cpuc->lbr_entries[j]; | |
804 | cpuc->lbr_stack.nr--; | |
805 | if (!cpuc->lbr_entries[i].from) | |
806 | continue; | |
807 | } | |
808 | i++; | |
809 | } | |
60ce0fbd SE |
810 | } |
811 | ||
c5cc2cd9 SE |
812 | /* |
813 | * Map interface branch filters onto LBR filters | |
814 | */ | |
27ac905b YZ |
815 | static const int nhm_lbr_sel_map[PERF_SAMPLE_BRANCH_SELECT_MAP_SIZE] = { |
816 | [PERF_SAMPLE_BRANCH_ANY_SHIFT] = LBR_ANY, | |
817 | [PERF_SAMPLE_BRANCH_USER_SHIFT] = LBR_USER, | |
818 | [PERF_SAMPLE_BRANCH_KERNEL_SHIFT] = LBR_KERNEL, | |
819 | [PERF_SAMPLE_BRANCH_HV_SHIFT] = LBR_IGN, | |
820 | [PERF_SAMPLE_BRANCH_ANY_RETURN_SHIFT] = LBR_RETURN | LBR_REL_JMP | |
821 | | LBR_IND_JMP | LBR_FAR, | |
c5cc2cd9 SE |
822 | /* |
823 | * NHM/WSM erratum: must include REL_JMP+IND_JMP to get CALL branches | |
824 | */ | |
27ac905b | 825 | [PERF_SAMPLE_BRANCH_ANY_CALL_SHIFT] = |
c5cc2cd9 SE |
826 | LBR_REL_CALL | LBR_IND_CALL | LBR_REL_JMP | LBR_IND_JMP | LBR_FAR, |
827 | /* | |
828 | * NHM/WSM erratum: must include IND_JMP to capture IND_CALL | |
829 | */ | |
27ac905b YZ |
830 | [PERF_SAMPLE_BRANCH_IND_CALL_SHIFT] = LBR_IND_CALL | LBR_IND_JMP, |
831 | [PERF_SAMPLE_BRANCH_COND_SHIFT] = LBR_JCC, | |
c5cc2cd9 SE |
832 | }; |
833 | ||
27ac905b YZ |
834 | static const int snb_lbr_sel_map[PERF_SAMPLE_BRANCH_SELECT_MAP_SIZE] = { |
835 | [PERF_SAMPLE_BRANCH_ANY_SHIFT] = LBR_ANY, | |
836 | [PERF_SAMPLE_BRANCH_USER_SHIFT] = LBR_USER, | |
837 | [PERF_SAMPLE_BRANCH_KERNEL_SHIFT] = LBR_KERNEL, | |
838 | [PERF_SAMPLE_BRANCH_HV_SHIFT] = LBR_IGN, | |
839 | [PERF_SAMPLE_BRANCH_ANY_RETURN_SHIFT] = LBR_RETURN | LBR_FAR, | |
840 | [PERF_SAMPLE_BRANCH_ANY_CALL_SHIFT] = LBR_REL_CALL | LBR_IND_CALL | |
841 | | LBR_FAR, | |
842 | [PERF_SAMPLE_BRANCH_IND_CALL_SHIFT] = LBR_IND_CALL, | |
843 | [PERF_SAMPLE_BRANCH_COND_SHIFT] = LBR_JCC, | |
c5cc2cd9 SE |
844 | }; |
845 | ||
e9d7f7cd YZ |
846 | static const int hsw_lbr_sel_map[PERF_SAMPLE_BRANCH_SELECT_MAP_SIZE] = { |
847 | [PERF_SAMPLE_BRANCH_ANY_SHIFT] = LBR_ANY, | |
848 | [PERF_SAMPLE_BRANCH_USER_SHIFT] = LBR_USER, | |
849 | [PERF_SAMPLE_BRANCH_KERNEL_SHIFT] = LBR_KERNEL, | |
850 | [PERF_SAMPLE_BRANCH_HV_SHIFT] = LBR_IGN, | |
851 | [PERF_SAMPLE_BRANCH_ANY_RETURN_SHIFT] = LBR_RETURN | LBR_FAR, | |
852 | [PERF_SAMPLE_BRANCH_ANY_CALL_SHIFT] = LBR_REL_CALL | LBR_IND_CALL | |
853 | | LBR_FAR, | |
854 | [PERF_SAMPLE_BRANCH_IND_CALL_SHIFT] = LBR_IND_CALL, | |
855 | [PERF_SAMPLE_BRANCH_COND_SHIFT] = LBR_JCC, | |
856 | [PERF_SAMPLE_BRANCH_CALL_STACK_SHIFT] = LBR_REL_CALL | LBR_IND_CALL | |
857 | | LBR_RETURN | LBR_CALL_STACK, | |
858 | }; | |
859 | ||
c5cc2cd9 | 860 | /* core */ |
066ce64c | 861 | void __init intel_pmu_lbr_init_core(void) |
caff2bef | 862 | { |
caff2bef | 863 | x86_pmu.lbr_nr = 4; |
225ce539 SE |
864 | x86_pmu.lbr_tos = MSR_LBR_TOS; |
865 | x86_pmu.lbr_from = MSR_LBR_CORE_FROM; | |
866 | x86_pmu.lbr_to = MSR_LBR_CORE_TO; | |
c5cc2cd9 | 867 | |
3e702ff6 SE |
868 | /* |
869 | * SW branch filter usage: | |
870 | * - compensate for lack of HW filter | |
871 | */ | |
c5cc2cd9 | 872 | pr_cont("4-deep LBR, "); |
caff2bef PZ |
873 | } |
874 | ||
c5cc2cd9 | 875 | /* nehalem/westmere */ |
066ce64c | 876 | void __init intel_pmu_lbr_init_nhm(void) |
caff2bef | 877 | { |
caff2bef | 878 | x86_pmu.lbr_nr = 16; |
225ce539 SE |
879 | x86_pmu.lbr_tos = MSR_LBR_TOS; |
880 | x86_pmu.lbr_from = MSR_LBR_NHM_FROM; | |
881 | x86_pmu.lbr_to = MSR_LBR_NHM_TO; | |
c5cc2cd9 SE |
882 | |
883 | x86_pmu.lbr_sel_mask = LBR_SEL_MASK; | |
884 | x86_pmu.lbr_sel_map = nhm_lbr_sel_map; | |
885 | ||
3e702ff6 SE |
886 | /* |
887 | * SW branch filter usage: | |
888 | * - workaround LBR_SEL errata (see above) | |
889 | * - support syscall, sysret capture. | |
890 | * That requires LBR_FAR but that means far | |
891 | * jmp need to be filtered out | |
892 | */ | |
c5cc2cd9 | 893 | pr_cont("16-deep LBR, "); |
caff2bef PZ |
894 | } |
895 | ||
c5cc2cd9 | 896 | /* sandy bridge */ |
066ce64c | 897 | void __init intel_pmu_lbr_init_snb(void) |
c5cc2cd9 SE |
898 | { |
899 | x86_pmu.lbr_nr = 16; | |
900 | x86_pmu.lbr_tos = MSR_LBR_TOS; | |
901 | x86_pmu.lbr_from = MSR_LBR_NHM_FROM; | |
902 | x86_pmu.lbr_to = MSR_LBR_NHM_TO; | |
903 | ||
904 | x86_pmu.lbr_sel_mask = LBR_SEL_MASK; | |
905 | x86_pmu.lbr_sel_map = snb_lbr_sel_map; | |
906 | ||
3e702ff6 SE |
907 | /* |
908 | * SW branch filter usage: | |
909 | * - support syscall, sysret capture. | |
910 | * That requires LBR_FAR but that means far | |
911 | * jmp need to be filtered out | |
912 | */ | |
c5cc2cd9 SE |
913 | pr_cont("16-deep LBR, "); |
914 | } | |
915 | ||
e9d7f7cd YZ |
916 | /* haswell */ |
917 | void intel_pmu_lbr_init_hsw(void) | |
918 | { | |
919 | x86_pmu.lbr_nr = 16; | |
920 | x86_pmu.lbr_tos = MSR_LBR_TOS; | |
921 | x86_pmu.lbr_from = MSR_LBR_NHM_FROM; | |
922 | x86_pmu.lbr_to = MSR_LBR_NHM_TO; | |
923 | ||
924 | x86_pmu.lbr_sel_mask = LBR_SEL_MASK; | |
925 | x86_pmu.lbr_sel_map = hsw_lbr_sel_map; | |
926 | ||
927 | pr_cont("16-deep LBR, "); | |
928 | } | |
929 | ||
c5cc2cd9 | 930 | /* atom */ |
066ce64c | 931 | void __init intel_pmu_lbr_init_atom(void) |
caff2bef | 932 | { |
88c9a65e SE |
933 | /* |
934 | * only models starting at stepping 10 seems | |
935 | * to have an operational LBR which can freeze | |
936 | * on PMU interrupt | |
937 | */ | |
3ec18cd8 SE |
938 | if (boot_cpu_data.x86_model == 28 |
939 | && boot_cpu_data.x86_mask < 10) { | |
88c9a65e SE |
940 | pr_cont("LBR disabled due to erratum"); |
941 | return; | |
942 | } | |
943 | ||
caff2bef | 944 | x86_pmu.lbr_nr = 8; |
225ce539 SE |
945 | x86_pmu.lbr_tos = MSR_LBR_TOS; |
946 | x86_pmu.lbr_from = MSR_LBR_CORE_FROM; | |
947 | x86_pmu.lbr_to = MSR_LBR_CORE_TO; | |
c5cc2cd9 | 948 | |
3e702ff6 SE |
949 | /* |
950 | * SW branch filter usage: | |
951 | * - compensate for lack of HW filter | |
952 | */ | |
c5cc2cd9 | 953 | pr_cont("8-deep LBR, "); |
caff2bef | 954 | } |