License cleanup: add SPDX GPL-2.0 license identifier to files with no license
[linux-2.6-block.git] / arch / x86 / events / intel / lbr.c
CommitLineData
b2441318 1// SPDX-License-Identifier: GPL-2.0
de0428a7
KW
2#include <linux/perf_event.h>
3#include <linux/types.h>
4
5#include <asm/perf_event.h>
6#include <asm/msr.h>
3e702ff6 7#include <asm/insn.h>
de0428a7 8
27f6d22b 9#include "../perf_event.h"
caff2bef
PZ
10
11enum {
12 LBR_FORMAT_32 = 0x00,
13 LBR_FORMAT_LIP = 0x01,
14 LBR_FORMAT_EIP = 0x02,
15 LBR_FORMAT_EIP_FLAGS = 0x03,
135c5612 16 LBR_FORMAT_EIP_FLAGS2 = 0x04,
50eab8f6 17 LBR_FORMAT_INFO = 0x05,
8b92c3a7
KL
18 LBR_FORMAT_TIME = 0x06,
19 LBR_FORMAT_MAX_KNOWN = LBR_FORMAT_TIME,
135c5612
AK
20};
21
e91c8d97 22static const enum {
135c5612
AK
23 LBR_EIP_FLAGS = 1,
24 LBR_TSX = 2,
25} lbr_desc[LBR_FORMAT_MAX_KNOWN + 1] = {
26 [LBR_FORMAT_EIP_FLAGS] = LBR_EIP_FLAGS,
27 [LBR_FORMAT_EIP_FLAGS2] = LBR_EIP_FLAGS | LBR_TSX,
caff2bef
PZ
28};
29
c5cc2cd9
SE
30/*
31 * Intel LBR_SELECT bits
32 * Intel Vol3a, April 2011, Section 16.7 Table 16-10
33 *
34 * Hardware branch filter (not available on all CPUs)
35 */
36#define LBR_KERNEL_BIT 0 /* do not capture at ring0 */
37#define LBR_USER_BIT 1 /* do not capture at ring > 0 */
38#define LBR_JCC_BIT 2 /* do not capture conditional branches */
39#define LBR_REL_CALL_BIT 3 /* do not capture relative calls */
40#define LBR_IND_CALL_BIT 4 /* do not capture indirect calls */
41#define LBR_RETURN_BIT 5 /* do not capture near returns */
42#define LBR_IND_JMP_BIT 6 /* do not capture indirect jumps */
43#define LBR_REL_JMP_BIT 7 /* do not capture relative jumps */
44#define LBR_FAR_BIT 8 /* do not capture far branches */
e9d7f7cd 45#define LBR_CALL_STACK_BIT 9 /* enable call stack */
c5cc2cd9 46
b16a5b52
AK
47/*
48 * Following bit only exists in Linux; we mask it out before writing it to
49 * the actual MSR. But it helps the constraint perf code to understand
50 * that this is a separate configuration.
51 */
52#define LBR_NO_INFO_BIT 63 /* don't read LBR_INFO. */
53
c5cc2cd9
SE
54#define LBR_KERNEL (1 << LBR_KERNEL_BIT)
55#define LBR_USER (1 << LBR_USER_BIT)
56#define LBR_JCC (1 << LBR_JCC_BIT)
57#define LBR_REL_CALL (1 << LBR_REL_CALL_BIT)
58#define LBR_IND_CALL (1 << LBR_IND_CALL_BIT)
59#define LBR_RETURN (1 << LBR_RETURN_BIT)
60#define LBR_REL_JMP (1 << LBR_REL_JMP_BIT)
61#define LBR_IND_JMP (1 << LBR_IND_JMP_BIT)
62#define LBR_FAR (1 << LBR_FAR_BIT)
e9d7f7cd 63#define LBR_CALL_STACK (1 << LBR_CALL_STACK_BIT)
b16a5b52 64#define LBR_NO_INFO (1ULL << LBR_NO_INFO_BIT)
c5cc2cd9
SE
65
66#define LBR_PLM (LBR_KERNEL | LBR_USER)
67
cf3beb7c 68#define LBR_SEL_MASK 0x3ff /* valid bits in LBR_SELECT */
c5cc2cd9
SE
69#define LBR_NOT_SUPP -1 /* LBR filter not supported */
70#define LBR_IGN 0 /* ignored */
71
72#define LBR_ANY \
73 (LBR_JCC |\
74 LBR_REL_CALL |\
75 LBR_IND_CALL |\
76 LBR_RETURN |\
77 LBR_REL_JMP |\
78 LBR_IND_JMP |\
79 LBR_FAR)
80
3812bba8
DCC
81#define LBR_FROM_FLAG_MISPRED BIT_ULL(63)
82#define LBR_FROM_FLAG_IN_TX BIT_ULL(62)
83#define LBR_FROM_FLAG_ABORT BIT_ULL(61)
c5cc2cd9 84
19fc9ddd
DCC
85#define LBR_FROM_SIGNEXT_2MSB (BIT_ULL(60) | BIT_ULL(59))
86
3e702ff6
SE
87/*
88 * x86control flow change classification
89 * x86control flow changes include branches, interrupts, traps, faults
90 */
91enum {
e9d7f7cd
YZ
92 X86_BR_NONE = 0, /* unknown */
93
94 X86_BR_USER = 1 << 0, /* branch target is user */
95 X86_BR_KERNEL = 1 << 1, /* branch target is kernel */
96
97 X86_BR_CALL = 1 << 2, /* call */
98 X86_BR_RET = 1 << 3, /* return */
99 X86_BR_SYSCALL = 1 << 4, /* syscall */
100 X86_BR_SYSRET = 1 << 5, /* syscall return */
101 X86_BR_INT = 1 << 6, /* sw interrupt */
102 X86_BR_IRET = 1 << 7, /* return from interrupt */
103 X86_BR_JCC = 1 << 8, /* conditional */
104 X86_BR_JMP = 1 << 9, /* jump */
105 X86_BR_IRQ = 1 << 10,/* hw interrupt or trap or fault */
106 X86_BR_IND_CALL = 1 << 11,/* indirect calls */
107 X86_BR_ABORT = 1 << 12,/* transaction abort */
108 X86_BR_IN_TX = 1 << 13,/* in transaction */
109 X86_BR_NO_TX = 1 << 14,/* not in transaction */
aa54ae9b
YZ
110 X86_BR_ZERO_CALL = 1 << 15,/* zero length call */
111 X86_BR_CALL_STACK = 1 << 16,/* call stack */
7b74cfb2 112 X86_BR_IND_JMP = 1 << 17,/* indirect jump */
d5c7f9dc
JY
113
114 X86_BR_TYPE_SAVE = 1 << 18,/* indicate to save branch type */
115
3e702ff6
SE
116};
117
118#define X86_BR_PLM (X86_BR_USER | X86_BR_KERNEL)
135c5612 119#define X86_BR_ANYTX (X86_BR_NO_TX | X86_BR_IN_TX)
3e702ff6
SE
120
121#define X86_BR_ANY \
122 (X86_BR_CALL |\
123 X86_BR_RET |\
124 X86_BR_SYSCALL |\
125 X86_BR_SYSRET |\
126 X86_BR_INT |\
127 X86_BR_IRET |\
128 X86_BR_JCC |\
129 X86_BR_JMP |\
130 X86_BR_IRQ |\
135c5612 131 X86_BR_ABORT |\
aa54ae9b 132 X86_BR_IND_CALL |\
7b74cfb2 133 X86_BR_IND_JMP |\
aa54ae9b 134 X86_BR_ZERO_CALL)
3e702ff6
SE
135
136#define X86_BR_ALL (X86_BR_PLM | X86_BR_ANY)
137
138#define X86_BR_ANY_CALL \
139 (X86_BR_CALL |\
140 X86_BR_IND_CALL |\
aa54ae9b 141 X86_BR_ZERO_CALL |\
3e702ff6
SE
142 X86_BR_SYSCALL |\
143 X86_BR_IRQ |\
144 X86_BR_INT)
145
146static void intel_pmu_lbr_filter(struct cpu_hw_events *cpuc);
147
caff2bef
PZ
148/*
149 * We only support LBR implementations that have FREEZE_LBRS_ON_PMI
150 * otherwise it becomes near impossible to get a reliable stack.
151 */
152
1a78d937 153static void __intel_pmu_lbr_enable(bool pmi)
caff2bef 154{
89cbc767 155 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
cd1f11de 156 u64 debugctl, lbr_select = 0, orig_debugctl;
60ce0fbd 157
425507fa
AK
158 /*
159 * No need to unfreeze manually, as v4 can do that as part
160 * of the GLOBAL_STATUS ack.
161 */
162 if (pmi && x86_pmu.version >= 4)
163 return;
164
1a78d937
AK
165 /*
166 * No need to reprogram LBR_SELECT in a PMI, as it
167 * did not change.
168 */
96f3eda6 169 if (cpuc->lbr_sel)
b16a5b52 170 lbr_select = cpuc->lbr_sel->config & x86_pmu.lbr_sel_mask;
6fc2e830 171 if (!pmi && cpuc->lbr_sel)
2c70d008 172 wrmsrl(MSR_LBR_SELECT, lbr_select);
caff2bef
PZ
173
174 rdmsrl(MSR_IA32_DEBUGCTLMSR, debugctl);
cd1f11de 175 orig_debugctl = debugctl;
2c70d008
YZ
176 debugctl |= DEBUGCTLMSR_LBR;
177 /*
178 * LBR callstack does not work well with FREEZE_LBRS_ON_PMI.
179 * If FREEZE_LBRS_ON_PMI is set, PMI near call/return instructions
180 * may cause superfluous increase/decrease of LBR_TOS.
181 */
182 if (!(lbr_select & LBR_CALL_STACK))
183 debugctl |= DEBUGCTLMSR_FREEZE_LBRS_ON_PMI;
cd1f11de
AK
184 if (orig_debugctl != debugctl)
185 wrmsrl(MSR_IA32_DEBUGCTLMSR, debugctl);
caff2bef
PZ
186}
187
188static void __intel_pmu_lbr_disable(void)
189{
190 u64 debugctl;
191
192 rdmsrl(MSR_IA32_DEBUGCTLMSR, debugctl);
7c5ecaf7 193 debugctl &= ~(DEBUGCTLMSR_LBR | DEBUGCTLMSR_FREEZE_LBRS_ON_PMI);
caff2bef
PZ
194 wrmsrl(MSR_IA32_DEBUGCTLMSR, debugctl);
195}
196
197static void intel_pmu_lbr_reset_32(void)
198{
199 int i;
200
201 for (i = 0; i < x86_pmu.lbr_nr; i++)
202 wrmsrl(x86_pmu.lbr_from + i, 0);
203}
204
205static void intel_pmu_lbr_reset_64(void)
206{
207 int i;
208
209 for (i = 0; i < x86_pmu.lbr_nr; i++) {
210 wrmsrl(x86_pmu.lbr_from + i, 0);
211 wrmsrl(x86_pmu.lbr_to + i, 0);
50eab8f6
AK
212 if (x86_pmu.intel_cap.lbr_format == LBR_FORMAT_INFO)
213 wrmsrl(MSR_LBR_INFO_0 + i, 0);
caff2bef
PZ
214 }
215}
216
de0428a7 217void intel_pmu_lbr_reset(void)
caff2bef 218{
74846d35
PZ
219 if (!x86_pmu.lbr_nr)
220 return;
221
8db909a7 222 if (x86_pmu.intel_cap.lbr_format == LBR_FORMAT_32)
caff2bef
PZ
223 intel_pmu_lbr_reset_32();
224 else
225 intel_pmu_lbr_reset_64();
226}
227
76cb2c61
YZ
228/*
229 * TOS = most recently recorded branch
230 */
231static inline u64 intel_pmu_lbr_tos(void)
232{
233 u64 tos;
234
235 rdmsrl(x86_pmu.lbr_tos, tos);
236 return tos;
237}
238
239enum {
240 LBR_NONE,
241 LBR_VALID,
242};
243
19fc9ddd
DCC
244/*
245 * For formats with LBR_TSX flags (e.g. LBR_FORMAT_EIP_FLAGS2), bits 61:62 in
246 * MSR_LAST_BRANCH_FROM_x are the TSX flags when TSX is supported, but when
247 * TSX is not supported they have no consistent behavior:
248 *
249 * - For wrmsr(), bits 61:62 are considered part of the sign extension.
250 * - For HW updates (branch captures) bits 61:62 are always OFF and are not
251 * part of the sign extension.
252 *
253 * Therefore, if:
254 *
255 * 1) LBR has TSX format
256 * 2) CPU has no TSX support enabled
257 *
258 * ... then any value passed to wrmsr() must be sign extended to 63 bits and any
259 * value from rdmsr() must be converted to have a 61 bits sign extension,
260 * ignoring the TSX flags.
261 */
262static inline bool lbr_from_signext_quirk_needed(void)
263{
264 int lbr_format = x86_pmu.intel_cap.lbr_format;
265 bool tsx_support = boot_cpu_has(X86_FEATURE_HLE) ||
266 boot_cpu_has(X86_FEATURE_RTM);
267
268 return !tsx_support && (lbr_desc[lbr_format] & LBR_TSX);
269}
270
271DEFINE_STATIC_KEY_FALSE(lbr_from_quirk_key);
272
273/* If quirk is enabled, ensure sign extension is 63 bits: */
274inline u64 lbr_from_signext_quirk_wr(u64 val)
275{
276 if (static_branch_unlikely(&lbr_from_quirk_key)) {
277 /*
278 * Sign extend into bits 61:62 while preserving bit 63.
279 *
280 * Quirk is enabled when TSX is disabled. Therefore TSX bits
281 * in val are always OFF and must be changed to be sign
282 * extension bits. Since bits 59:60 are guaranteed to be
283 * part of the sign extension bits, we can just copy them
284 * to 61:62.
285 */
286 val |= (LBR_FROM_SIGNEXT_2MSB & val) << 2;
287 }
288 return val;
289}
290
71adae99
DCC
291/*
292 * If quirk is needed, ensure sign extension is 61 bits:
293 */
e91c8d97 294static u64 lbr_from_signext_quirk_rd(u64 val)
71adae99 295{
d4cf1949 296 if (static_branch_unlikely(&lbr_from_quirk_key)) {
71adae99
DCC
297 /*
298 * Quirk is on when TSX is not enabled. Therefore TSX
299 * flags must be read as OFF.
300 */
301 val &= ~(LBR_FROM_FLAG_IN_TX | LBR_FROM_FLAG_ABORT);
d4cf1949
PZ
302 }
303 return val;
304}
305
306static inline void wrlbr_from(unsigned int idx, u64 val)
307{
308 val = lbr_from_signext_quirk_wr(val);
309 wrmsrl(x86_pmu.lbr_from + idx, val);
310}
311
312static inline void wrlbr_to(unsigned int idx, u64 val)
313{
314 wrmsrl(x86_pmu.lbr_to + idx, val);
315}
316
317static inline u64 rdlbr_from(unsigned int idx)
318{
319 u64 val;
320
321 rdmsrl(x86_pmu.lbr_from + idx, val);
322
323 return lbr_from_signext_quirk_rd(val);
324}
325
326static inline u64 rdlbr_to(unsigned int idx)
327{
328 u64 val;
329
aefbc4d0 330 rdmsrl(x86_pmu.lbr_to + idx, val);
d4cf1949 331
71adae99
DCC
332 return val;
333}
334
76cb2c61
YZ
335static void __intel_pmu_lbr_restore(struct x86_perf_task_context *task_ctx)
336{
337 int i;
338 unsigned lbr_idx, mask;
339 u64 tos;
340
341 if (task_ctx->lbr_callstack_users == 0 ||
342 task_ctx->lbr_stack_state == LBR_NONE) {
343 intel_pmu_lbr_reset();
344 return;
345 }
346
347 mask = x86_pmu.lbr_nr - 1;
b28ae956 348 tos = task_ctx->tos;
90405aa0 349 for (i = 0; i < tos; i++) {
76cb2c61 350 lbr_idx = (tos - i) & mask;
d4cf1949
PZ
351 wrlbr_from(lbr_idx, task_ctx->lbr_from[i]);
352 wrlbr_to (lbr_idx, task_ctx->lbr_to[i]);
353
50eab8f6 354 if (x86_pmu.intel_cap.lbr_format == LBR_FORMAT_INFO)
e0573364 355 wrmsrl(MSR_LBR_INFO_0 + lbr_idx, task_ctx->lbr_info[i]);
76cb2c61 356 }
b28ae956 357 wrmsrl(x86_pmu.lbr_tos, tos);
76cb2c61
YZ
358 task_ctx->lbr_stack_state = LBR_NONE;
359}
360
361static void __intel_pmu_lbr_save(struct x86_perf_task_context *task_ctx)
362{
76cb2c61 363 unsigned lbr_idx, mask;
d4cf1949
PZ
364 u64 tos;
365 int i;
76cb2c61
YZ
366
367 if (task_ctx->lbr_callstack_users == 0) {
368 task_ctx->lbr_stack_state = LBR_NONE;
369 return;
370 }
371
372 mask = x86_pmu.lbr_nr - 1;
373 tos = intel_pmu_lbr_tos();
90405aa0 374 for (i = 0; i < tos; i++) {
76cb2c61 375 lbr_idx = (tos - i) & mask;
d4cf1949
PZ
376 task_ctx->lbr_from[i] = rdlbr_from(lbr_idx);
377 task_ctx->lbr_to[i] = rdlbr_to(lbr_idx);
50eab8f6 378 if (x86_pmu.intel_cap.lbr_format == LBR_FORMAT_INFO)
e0573364 379 rdmsrl(MSR_LBR_INFO_0 + lbr_idx, task_ctx->lbr_info[i]);
76cb2c61 380 }
b28ae956 381 task_ctx->tos = tos;
76cb2c61
YZ
382 task_ctx->lbr_stack_state = LBR_VALID;
383}
384
2a0ad3b3
YZ
385void intel_pmu_lbr_sched_task(struct perf_event_context *ctx, bool sched_in)
386{
df6c3db8 387 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
76cb2c61 388 struct x86_perf_task_context *task_ctx;
2a0ad3b3 389
df6c3db8
JO
390 if (!cpuc->lbr_users)
391 return;
392
76cb2c61
YZ
393 /*
394 * If LBR callstack feature is enabled and the stack was saved when
395 * the task was scheduled out, restore the stack. Otherwise flush
396 * the LBR stack.
397 */
398 task_ctx = ctx ? ctx->task_ctx_data : NULL;
399 if (task_ctx) {
3e2c1a67 400 if (sched_in)
76cb2c61 401 __intel_pmu_lbr_restore(task_ctx);
3e2c1a67 402 else
76cb2c61 403 __intel_pmu_lbr_save(task_ctx);
76cb2c61
YZ
404 return;
405 }
406
2a0ad3b3 407 /*
3e2c1a67
PZ
408 * Since a context switch can flip the address space and LBR entries
409 * are not tagged with an identifier, we need to wipe the LBR, even for
410 * per-cpu events. You simply cannot resolve the branches from the old
411 * address space.
412 */
413 if (sched_in)
2a0ad3b3 414 intel_pmu_lbr_reset();
2a0ad3b3
YZ
415}
416
63f0c1d8
YZ
417static inline bool branch_user_callstack(unsigned br_sel)
418{
419 return (br_sel & X86_BR_USER) && (br_sel & X86_BR_CALL_STACK);
420}
421
68f7082f 422void intel_pmu_lbr_add(struct perf_event *event)
caff2bef 423{
89cbc767 424 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
63f0c1d8 425 struct x86_perf_task_context *task_ctx;
caff2bef
PZ
426
427 if (!x86_pmu.lbr_nr)
428 return;
429
3e702ff6 430 cpuc->br_sel = event->hw.branch_reg.reg;
caff2bef 431
a5dcff62 432 if (branch_user_callstack(cpuc->br_sel) && event->ctx->task_ctx_data) {
63f0c1d8
YZ
433 task_ctx = event->ctx->task_ctx_data;
434 task_ctx->lbr_callstack_users++;
435 }
436
3e2c1a67
PZ
437 /*
438 * Request pmu::sched_task() callback, which will fire inside the
439 * regular perf event scheduling, so that call will:
440 *
441 * - restore or wipe; when LBR-callstack,
442 * - wipe; otherwise,
443 *
444 * when this is from __perf_event_task_sched_in().
445 *
446 * However, if this is from perf_install_in_context(), no such callback
447 * will follow and we'll need to reset the LBR here if this is the
448 * first LBR event.
449 *
450 * The problem is, we cannot tell these cases apart... but we can
451 * exclude the biggest chunk of cases by looking at
452 * event->total_time_running. An event that has accrued runtime cannot
453 * be 'new'. Conversely, a new event can get installed through the
454 * context switch path for the first time.
455 */
2a0ad3b3 456 perf_sched_cb_inc(event->ctx->pmu);
3e2c1a67
PZ
457 if (!cpuc->lbr_users++ && !event->total_time_running)
458 intel_pmu_lbr_reset();
caff2bef
PZ
459}
460
68f7082f 461void intel_pmu_lbr_del(struct perf_event *event)
caff2bef 462{
89cbc767 463 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
63f0c1d8 464 struct x86_perf_task_context *task_ctx;
caff2bef
PZ
465
466 if (!x86_pmu.lbr_nr)
467 return;
468
5c38181c
DC
469 if (branch_user_callstack(cpuc->br_sel) &&
470 event->ctx->task_ctx_data) {
63f0c1d8
YZ
471 task_ctx = event->ctx->task_ctx_data;
472 task_ctx->lbr_callstack_users--;
473 }
474
caff2bef 475 cpuc->lbr_users--;
b83a46e7 476 WARN_ON_ONCE(cpuc->lbr_users < 0);
2a0ad3b3 477 perf_sched_cb_dec(event->ctx->pmu);
caff2bef
PZ
478}
479
1a78d937 480void intel_pmu_lbr_enable_all(bool pmi)
caff2bef 481{
89cbc767 482 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
caff2bef
PZ
483
484 if (cpuc->lbr_users)
1a78d937 485 __intel_pmu_lbr_enable(pmi);
caff2bef
PZ
486}
487
de0428a7 488void intel_pmu_lbr_disable_all(void)
caff2bef 489{
89cbc767 490 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
caff2bef
PZ
491
492 if (cpuc->lbr_users)
493 __intel_pmu_lbr_disable();
494}
495
caff2bef
PZ
496static void intel_pmu_lbr_read_32(struct cpu_hw_events *cpuc)
497{
498 unsigned long mask = x86_pmu.lbr_nr - 1;
499 u64 tos = intel_pmu_lbr_tos();
500 int i;
501
63fb3f9b 502 for (i = 0; i < x86_pmu.lbr_nr; i++) {
caff2bef
PZ
503 unsigned long lbr_idx = (tos - i) & mask;
504 union {
505 struct {
506 u32 from;
507 u32 to;
508 };
509 u64 lbr;
510 } msr_lastbranch;
511
512 rdmsrl(x86_pmu.lbr_from + lbr_idx, msr_lastbranch.lbr);
513
bce38cd5
SE
514 cpuc->lbr_entries[i].from = msr_lastbranch.from;
515 cpuc->lbr_entries[i].to = msr_lastbranch.to;
516 cpuc->lbr_entries[i].mispred = 0;
517 cpuc->lbr_entries[i].predicted = 0;
f2200ac3
PZ
518 cpuc->lbr_entries[i].in_tx = 0;
519 cpuc->lbr_entries[i].abort = 0;
520 cpuc->lbr_entries[i].cycles = 0;
d5c7f9dc 521 cpuc->lbr_entries[i].type = 0;
bce38cd5 522 cpuc->lbr_entries[i].reserved = 0;
caff2bef
PZ
523 }
524 cpuc->lbr_stack.nr = i;
525}
526
caff2bef
PZ
527/*
528 * Due to lack of segmentation in Linux the effective address (offset)
529 * is the same as the linear address, allowing us to merge the LIP and EIP
530 * LBR formats.
531 */
532static void intel_pmu_lbr_read_64(struct cpu_hw_events *cpuc)
533{
6fc2e830 534 bool need_info = false;
caff2bef 535 unsigned long mask = x86_pmu.lbr_nr - 1;
8db909a7 536 int lbr_format = x86_pmu.intel_cap.lbr_format;
caff2bef
PZ
537 u64 tos = intel_pmu_lbr_tos();
538 int i;
b7af41a1 539 int out = 0;
90405aa0 540 int num = x86_pmu.lbr_nr;
caff2bef 541
6fc2e830
SE
542 if (cpuc->lbr_sel) {
543 need_info = !(cpuc->lbr_sel->config & LBR_NO_INFO);
544 if (cpuc->lbr_sel->config & LBR_CALL_STACK)
545 num = tos;
546 }
90405aa0
AK
547
548 for (i = 0; i < num; i++) {
caff2bef 549 unsigned long lbr_idx = (tos - i) & mask;
135c5612
AK
550 u64 from, to, mis = 0, pred = 0, in_tx = 0, abort = 0;
551 int skip = 0;
50eab8f6 552 u16 cycles = 0;
135c5612 553 int lbr_flags = lbr_desc[lbr_format];
caff2bef 554
d4cf1949
PZ
555 from = rdlbr_from(lbr_idx);
556 to = rdlbr_to(lbr_idx);
caff2bef 557
b16a5b52 558 if (lbr_format == LBR_FORMAT_INFO && need_info) {
50eab8f6
AK
559 u64 info;
560
561 rdmsrl(MSR_LBR_INFO_0 + lbr_idx, info);
562 mis = !!(info & LBR_INFO_MISPRED);
563 pred = !mis;
564 in_tx = !!(info & LBR_INFO_IN_TX);
565 abort = !!(info & LBR_INFO_ABORT);
566 cycles = (info & LBR_INFO_CYCLES);
567 }
8b92c3a7
KL
568
569 if (lbr_format == LBR_FORMAT_TIME) {
570 mis = !!(from & LBR_FROM_FLAG_MISPRED);
571 pred = !mis;
572 skip = 1;
573 cycles = ((to >> 48) & LBR_INFO_CYCLES);
574
575 to = (u64)((((s64)to) << 16) >> 16);
576 }
577
135c5612 578 if (lbr_flags & LBR_EIP_FLAGS) {
bce38cd5
SE
579 mis = !!(from & LBR_FROM_FLAG_MISPRED);
580 pred = !mis;
135c5612
AK
581 skip = 1;
582 }
583 if (lbr_flags & LBR_TSX) {
584 in_tx = !!(from & LBR_FROM_FLAG_IN_TX);
585 abort = !!(from & LBR_FROM_FLAG_ABORT);
586 skip = 3;
caff2bef 587 }
135c5612 588 from = (u64)((((s64)from) << skip) >> skip);
caff2bef 589
b7af41a1
AK
590 /*
591 * Some CPUs report duplicated abort records,
592 * with the second entry not having an abort bit set.
593 * Skip them here. This loop runs backwards,
594 * so we need to undo the previous record.
595 * If the abort just happened outside the window
596 * the extra entry cannot be removed.
597 */
598 if (abort && x86_pmu.lbr_double_abort && out > 0)
599 out--;
600
601 cpuc->lbr_entries[out].from = from;
602 cpuc->lbr_entries[out].to = to;
603 cpuc->lbr_entries[out].mispred = mis;
604 cpuc->lbr_entries[out].predicted = pred;
605 cpuc->lbr_entries[out].in_tx = in_tx;
606 cpuc->lbr_entries[out].abort = abort;
50eab8f6 607 cpuc->lbr_entries[out].cycles = cycles;
d5c7f9dc 608 cpuc->lbr_entries[out].type = 0;
b7af41a1
AK
609 cpuc->lbr_entries[out].reserved = 0;
610 out++;
caff2bef 611 }
b7af41a1 612 cpuc->lbr_stack.nr = out;
caff2bef
PZ
613}
614
de0428a7 615void intel_pmu_lbr_read(void)
caff2bef 616{
89cbc767 617 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
caff2bef
PZ
618
619 if (!cpuc->lbr_users)
620 return;
621
8db909a7 622 if (x86_pmu.intel_cap.lbr_format == LBR_FORMAT_32)
caff2bef
PZ
623 intel_pmu_lbr_read_32(cpuc);
624 else
625 intel_pmu_lbr_read_64(cpuc);
3e702ff6
SE
626
627 intel_pmu_lbr_filter(cpuc);
628}
629
630/*
631 * SW filter is used:
632 * - in case there is no HW filter
633 * - in case the HW filter has errata or limitations
634 */
e9d7f7cd 635static int intel_pmu_setup_sw_lbr_filter(struct perf_event *event)
3e702ff6
SE
636{
637 u64 br_type = event->attr.branch_sample_type;
638 int mask = 0;
639
640 if (br_type & PERF_SAMPLE_BRANCH_USER)
641 mask |= X86_BR_USER;
642
2b923c8f 643 if (br_type & PERF_SAMPLE_BRANCH_KERNEL)
3e702ff6
SE
644 mask |= X86_BR_KERNEL;
645
646 /* we ignore BRANCH_HV here */
647
648 if (br_type & PERF_SAMPLE_BRANCH_ANY)
649 mask |= X86_BR_ANY;
650
651 if (br_type & PERF_SAMPLE_BRANCH_ANY_CALL)
652 mask |= X86_BR_ANY_CALL;
653
654 if (br_type & PERF_SAMPLE_BRANCH_ANY_RETURN)
655 mask |= X86_BR_RET | X86_BR_IRET | X86_BR_SYSRET;
656
657 if (br_type & PERF_SAMPLE_BRANCH_IND_CALL)
658 mask |= X86_BR_IND_CALL;
135c5612
AK
659
660 if (br_type & PERF_SAMPLE_BRANCH_ABORT_TX)
661 mask |= X86_BR_ABORT;
662
663 if (br_type & PERF_SAMPLE_BRANCH_IN_TX)
664 mask |= X86_BR_IN_TX;
665
666 if (br_type & PERF_SAMPLE_BRANCH_NO_TX)
667 mask |= X86_BR_NO_TX;
668
37548914
AK
669 if (br_type & PERF_SAMPLE_BRANCH_COND)
670 mask |= X86_BR_JCC;
671
e9d7f7cd
YZ
672 if (br_type & PERF_SAMPLE_BRANCH_CALL_STACK) {
673 if (!x86_pmu_has_lbr_callstack())
674 return -EOPNOTSUPP;
675 if (mask & ~(X86_BR_USER | X86_BR_KERNEL))
676 return -EINVAL;
677 mask |= X86_BR_CALL | X86_BR_IND_CALL | X86_BR_RET |
678 X86_BR_CALL_STACK;
679 }
680
7b74cfb2
SE
681 if (br_type & PERF_SAMPLE_BRANCH_IND_JUMP)
682 mask |= X86_BR_IND_JMP;
683
d892819f
SE
684 if (br_type & PERF_SAMPLE_BRANCH_CALL)
685 mask |= X86_BR_CALL | X86_BR_ZERO_CALL;
d5c7f9dc
JY
686
687 if (br_type & PERF_SAMPLE_BRANCH_TYPE_SAVE)
688 mask |= X86_BR_TYPE_SAVE;
689
3e702ff6
SE
690 /*
691 * stash actual user request into reg, it may
692 * be used by fixup code for some CPU
693 */
694 event->hw.branch_reg.reg = mask;
e9d7f7cd 695 return 0;
caff2bef
PZ
696}
697
60ce0fbd
SE
698/*
699 * setup the HW LBR filter
700 * Used only when available, may not be enough to disambiguate
701 * all branches, may need the help of the SW filter
702 */
703static int intel_pmu_setup_hw_lbr_filter(struct perf_event *event)
704{
705 struct hw_perf_event_extra *reg;
706 u64 br_type = event->attr.branch_sample_type;
27ac905b
YZ
707 u64 mask = 0, v;
708 int i;
60ce0fbd 709
2c44b193 710 for (i = 0; i < PERF_SAMPLE_BRANCH_MAX_SHIFT; i++) {
27ac905b 711 if (!(br_type & (1ULL << i)))
60ce0fbd
SE
712 continue;
713
27ac905b 714 v = x86_pmu.lbr_sel_map[i];
60ce0fbd
SE
715 if (v == LBR_NOT_SUPP)
716 return -EOPNOTSUPP;
60ce0fbd 717
3e702ff6
SE
718 if (v != LBR_IGN)
719 mask |= v;
60ce0fbd 720 }
b16a5b52 721
60ce0fbd
SE
722 reg = &event->hw.branch_reg;
723 reg->idx = EXTRA_REG_LBR;
724
e9d7f7cd
YZ
725 /*
726 * The first 9 bits (LBR_SEL_MASK) in LBR_SELECT operate
727 * in suppress mode. So LBR_SELECT should be set to
728 * (~mask & LBR_SEL_MASK) | (mask & ~LBR_SEL_MASK)
cf3beb7c
KL
729 * But the 10th bit LBR_CALL_STACK does not operate
730 * in suppress mode.
e9d7f7cd 731 */
cf3beb7c 732 reg->config = mask ^ (x86_pmu.lbr_sel_mask & ~LBR_CALL_STACK);
60ce0fbd 733
b16a5b52
AK
734 if ((br_type & PERF_SAMPLE_BRANCH_NO_CYCLES) &&
735 (br_type & PERF_SAMPLE_BRANCH_NO_FLAGS) &&
736 (x86_pmu.intel_cap.lbr_format == LBR_FORMAT_INFO))
737 reg->config |= LBR_NO_INFO;
738
60ce0fbd
SE
739 return 0;
740}
741
60ce0fbd
SE
742int intel_pmu_setup_lbr_filter(struct perf_event *event)
743{
3e702ff6 744 int ret = 0;
60ce0fbd
SE
745
746 /*
747 * no LBR on this PMU
748 */
749 if (!x86_pmu.lbr_nr)
750 return -EOPNOTSUPP;
751
752 /*
3e702ff6 753 * setup SW LBR filter
60ce0fbd 754 */
e9d7f7cd
YZ
755 ret = intel_pmu_setup_sw_lbr_filter(event);
756 if (ret)
757 return ret;
3e702ff6
SE
758
759 /*
760 * setup HW LBR filter, if any
761 */
762 if (x86_pmu.lbr_sel_map)
763 ret = intel_pmu_setup_hw_lbr_filter(event);
764
765 return ret;
766}
767
768/*
769 * return the type of control flow change at address "from"
6a6256f9 770 * instruction is not necessarily a branch (in case of interrupt).
3e702ff6
SE
771 *
772 * The branch type returned also includes the priv level of the
773 * target of the control flow change (X86_BR_USER, X86_BR_KERNEL).
774 *
775 * If a branch type is unknown OR the instruction cannot be
776 * decoded (e.g., text page not present), then X86_BR_NONE is
777 * returned.
778 */
135c5612 779static int branch_type(unsigned long from, unsigned long to, int abort)
3e702ff6
SE
780{
781 struct insn insn;
782 void *addr;
6ba48ff4 783 int bytes_read, bytes_left;
3e702ff6
SE
784 int ret = X86_BR_NONE;
785 int ext, to_plm, from_plm;
786 u8 buf[MAX_INSN_SIZE];
787 int is64 = 0;
788
789 to_plm = kernel_ip(to) ? X86_BR_KERNEL : X86_BR_USER;
790 from_plm = kernel_ip(from) ? X86_BR_KERNEL : X86_BR_USER;
791
792 /*
793 * maybe zero if lbr did not fill up after a reset by the time
794 * we get a PMU interrupt
795 */
796 if (from == 0 || to == 0)
797 return X86_BR_NONE;
798
135c5612
AK
799 if (abort)
800 return X86_BR_ABORT | to_plm;
801
3e702ff6
SE
802 if (from_plm == X86_BR_USER) {
803 /*
804 * can happen if measuring at the user level only
805 * and we interrupt in a kernel thread, e.g., idle.
806 */
807 if (!current->mm)
808 return X86_BR_NONE;
809
810 /* may fail if text not present */
6ba48ff4
DH
811 bytes_left = copy_from_user_nmi(buf, (void __user *)from,
812 MAX_INSN_SIZE);
813 bytes_read = MAX_INSN_SIZE - bytes_left;
814 if (!bytes_read)
3e702ff6
SE
815 return X86_BR_NONE;
816
817 addr = buf;
6e15eb3b
PZ
818 } else {
819 /*
820 * The LBR logs any address in the IP, even if the IP just
821 * faulted. This means userspace can control the from address.
822 * Ensure we don't blindy read any address by validating it is
823 * a known text address.
824 */
6ba48ff4 825 if (kernel_text_address(from)) {
6e15eb3b 826 addr = (void *)from;
6ba48ff4
DH
827 /*
828 * Assume we can get the maximum possible size
829 * when grabbing kernel data. This is not
830 * _strictly_ true since we could possibly be
831 * executing up next to a memory hole, but
832 * it is very unlikely to be a problem.
833 */
834 bytes_read = MAX_INSN_SIZE;
835 } else {
6e15eb3b 836 return X86_BR_NONE;
6ba48ff4 837 }
6e15eb3b 838 }
3e702ff6
SE
839
840 /*
841 * decoder needs to know the ABI especially
842 * on 64-bit systems running 32-bit apps
843 */
844#ifdef CONFIG_X86_64
845 is64 = kernel_ip((unsigned long)addr) || !test_thread_flag(TIF_IA32);
846#endif
6ba48ff4 847 insn_init(&insn, addr, bytes_read, is64);
3e702ff6 848 insn_get_opcode(&insn);
6ba48ff4
DH
849 if (!insn.opcode.got)
850 return X86_BR_ABORT;
3e702ff6
SE
851
852 switch (insn.opcode.bytes[0]) {
853 case 0xf:
854 switch (insn.opcode.bytes[1]) {
855 case 0x05: /* syscall */
856 case 0x34: /* sysenter */
857 ret = X86_BR_SYSCALL;
858 break;
859 case 0x07: /* sysret */
860 case 0x35: /* sysexit */
861 ret = X86_BR_SYSRET;
862 break;
863 case 0x80 ... 0x8f: /* conditional */
864 ret = X86_BR_JCC;
865 break;
866 default:
867 ret = X86_BR_NONE;
868 }
869 break;
870 case 0x70 ... 0x7f: /* conditional */
871 ret = X86_BR_JCC;
872 break;
873 case 0xc2: /* near ret */
874 case 0xc3: /* near ret */
875 case 0xca: /* far ret */
876 case 0xcb: /* far ret */
877 ret = X86_BR_RET;
878 break;
879 case 0xcf: /* iret */
880 ret = X86_BR_IRET;
881 break;
882 case 0xcc ... 0xce: /* int */
883 ret = X86_BR_INT;
884 break;
885 case 0xe8: /* call near rel */
aa54ae9b
YZ
886 insn_get_immediate(&insn);
887 if (insn.immediate1.value == 0) {
888 /* zero length call */
889 ret = X86_BR_ZERO_CALL;
890 break;
891 }
3e702ff6
SE
892 case 0x9a: /* call far absolute */
893 ret = X86_BR_CALL;
894 break;
895 case 0xe0 ... 0xe3: /* loop jmp */
896 ret = X86_BR_JCC;
897 break;
898 case 0xe9 ... 0xeb: /* jmp */
899 ret = X86_BR_JMP;
900 break;
901 case 0xff: /* call near absolute, call far absolute ind */
902 insn_get_modrm(&insn);
903 ext = (insn.modrm.bytes[0] >> 3) & 0x7;
904 switch (ext) {
905 case 2: /* near ind call */
906 case 3: /* far ind call */
907 ret = X86_BR_IND_CALL;
908 break;
909 case 4:
910 case 5:
7b74cfb2 911 ret = X86_BR_IND_JMP;
3e702ff6
SE
912 break;
913 }
914 break;
915 default:
916 ret = X86_BR_NONE;
60ce0fbd
SE
917 }
918 /*
3e702ff6
SE
919 * interrupts, traps, faults (and thus ring transition) may
920 * occur on any instructions. Thus, to classify them correctly,
921 * we need to first look at the from and to priv levels. If they
922 * are different and to is in the kernel, then it indicates
923 * a ring transition. If the from instruction is not a ring
924 * transition instr (syscall, systenter, int), then it means
925 * it was a irq, trap or fault.
926 *
927 * we have no way of detecting kernel to kernel faults.
928 */
929 if (from_plm == X86_BR_USER && to_plm == X86_BR_KERNEL
930 && ret != X86_BR_SYSCALL && ret != X86_BR_INT)
931 ret = X86_BR_IRQ;
932
933 /*
934 * branch priv level determined by target as
935 * is done by HW when LBR_SELECT is implemented
60ce0fbd 936 */
3e702ff6
SE
937 if (ret != X86_BR_NONE)
938 ret |= to_plm;
60ce0fbd 939
3e702ff6
SE
940 return ret;
941}
942
d5c7f9dc
JY
943#define X86_BR_TYPE_MAP_MAX 16
944
945static int branch_map[X86_BR_TYPE_MAP_MAX] = {
946 PERF_BR_CALL, /* X86_BR_CALL */
947 PERF_BR_RET, /* X86_BR_RET */
948 PERF_BR_SYSCALL, /* X86_BR_SYSCALL */
949 PERF_BR_SYSRET, /* X86_BR_SYSRET */
950 PERF_BR_UNKNOWN, /* X86_BR_INT */
951 PERF_BR_UNKNOWN, /* X86_BR_IRET */
952 PERF_BR_COND, /* X86_BR_JCC */
953 PERF_BR_UNCOND, /* X86_BR_JMP */
954 PERF_BR_UNKNOWN, /* X86_BR_IRQ */
955 PERF_BR_IND_CALL, /* X86_BR_IND_CALL */
956 PERF_BR_UNKNOWN, /* X86_BR_ABORT */
957 PERF_BR_UNKNOWN, /* X86_BR_IN_TX */
958 PERF_BR_UNKNOWN, /* X86_BR_NO_TX */
959 PERF_BR_CALL, /* X86_BR_ZERO_CALL */
960 PERF_BR_UNKNOWN, /* X86_BR_CALL_STACK */
961 PERF_BR_IND, /* X86_BR_IND_JMP */
962};
963
964static int
965common_branch_type(int type)
966{
967 int i;
968
969 type >>= 2; /* skip X86_BR_USER and X86_BR_KERNEL */
970
971 if (type) {
972 i = __ffs(type);
973 if (i < X86_BR_TYPE_MAP_MAX)
974 return branch_map[i];
975 }
976
977 return PERF_BR_UNKNOWN;
978}
979
3e702ff6
SE
980/*
981 * implement actual branch filter based on user demand.
982 * Hardware may not exactly satisfy that request, thus
983 * we need to inspect opcodes. Mismatched branches are
984 * discarded. Therefore, the number of branches returned
985 * in PERF_SAMPLE_BRANCH_STACK sample may vary.
986 */
987static void
988intel_pmu_lbr_filter(struct cpu_hw_events *cpuc)
989{
990 u64 from, to;
991 int br_sel = cpuc->br_sel;
992 int i, j, type;
993 bool compress = false;
994
995 /* if sampling all branches, then nothing to filter */
d5c7f9dc
JY
996 if (((br_sel & X86_BR_ALL) == X86_BR_ALL) &&
997 ((br_sel & X86_BR_TYPE_SAVE) != X86_BR_TYPE_SAVE))
3e702ff6
SE
998 return;
999
1000 for (i = 0; i < cpuc->lbr_stack.nr; i++) {
1001
1002 from = cpuc->lbr_entries[i].from;
1003 to = cpuc->lbr_entries[i].to;
1004
135c5612
AK
1005 type = branch_type(from, to, cpuc->lbr_entries[i].abort);
1006 if (type != X86_BR_NONE && (br_sel & X86_BR_ANYTX)) {
1007 if (cpuc->lbr_entries[i].in_tx)
1008 type |= X86_BR_IN_TX;
1009 else
1010 type |= X86_BR_NO_TX;
1011 }
3e702ff6
SE
1012
1013 /* if type does not correspond, then discard */
1014 if (type == X86_BR_NONE || (br_sel & type) != type) {
1015 cpuc->lbr_entries[i].from = 0;
1016 compress = true;
1017 }
d5c7f9dc
JY
1018
1019 if ((br_sel & X86_BR_TYPE_SAVE) == X86_BR_TYPE_SAVE)
1020 cpuc->lbr_entries[i].type = common_branch_type(type);
3e702ff6
SE
1021 }
1022
1023 if (!compress)
1024 return;
1025
1026 /* remove all entries with from=0 */
1027 for (i = 0; i < cpuc->lbr_stack.nr; ) {
1028 if (!cpuc->lbr_entries[i].from) {
1029 j = i;
1030 while (++j < cpuc->lbr_stack.nr)
1031 cpuc->lbr_entries[j-1] = cpuc->lbr_entries[j];
1032 cpuc->lbr_stack.nr--;
1033 if (!cpuc->lbr_entries[i].from)
1034 continue;
1035 }
1036 i++;
1037 }
60ce0fbd
SE
1038}
1039
c5cc2cd9
SE
1040/*
1041 * Map interface branch filters onto LBR filters
1042 */
2c44b193 1043static const int nhm_lbr_sel_map[PERF_SAMPLE_BRANCH_MAX_SHIFT] = {
27ac905b
YZ
1044 [PERF_SAMPLE_BRANCH_ANY_SHIFT] = LBR_ANY,
1045 [PERF_SAMPLE_BRANCH_USER_SHIFT] = LBR_USER,
1046 [PERF_SAMPLE_BRANCH_KERNEL_SHIFT] = LBR_KERNEL,
1047 [PERF_SAMPLE_BRANCH_HV_SHIFT] = LBR_IGN,
1048 [PERF_SAMPLE_BRANCH_ANY_RETURN_SHIFT] = LBR_RETURN | LBR_REL_JMP
1049 | LBR_IND_JMP | LBR_FAR,
c5cc2cd9
SE
1050 /*
1051 * NHM/WSM erratum: must include REL_JMP+IND_JMP to get CALL branches
1052 */
27ac905b 1053 [PERF_SAMPLE_BRANCH_ANY_CALL_SHIFT] =
c5cc2cd9
SE
1054 LBR_REL_CALL | LBR_IND_CALL | LBR_REL_JMP | LBR_IND_JMP | LBR_FAR,
1055 /*
1056 * NHM/WSM erratum: must include IND_JMP to capture IND_CALL
1057 */
27ac905b
YZ
1058 [PERF_SAMPLE_BRANCH_IND_CALL_SHIFT] = LBR_IND_CALL | LBR_IND_JMP,
1059 [PERF_SAMPLE_BRANCH_COND_SHIFT] = LBR_JCC,
7b74cfb2 1060 [PERF_SAMPLE_BRANCH_IND_JUMP_SHIFT] = LBR_IND_JMP,
c5cc2cd9
SE
1061};
1062
2c44b193 1063static const int snb_lbr_sel_map[PERF_SAMPLE_BRANCH_MAX_SHIFT] = {
27ac905b
YZ
1064 [PERF_SAMPLE_BRANCH_ANY_SHIFT] = LBR_ANY,
1065 [PERF_SAMPLE_BRANCH_USER_SHIFT] = LBR_USER,
1066 [PERF_SAMPLE_BRANCH_KERNEL_SHIFT] = LBR_KERNEL,
1067 [PERF_SAMPLE_BRANCH_HV_SHIFT] = LBR_IGN,
1068 [PERF_SAMPLE_BRANCH_ANY_RETURN_SHIFT] = LBR_RETURN | LBR_FAR,
1069 [PERF_SAMPLE_BRANCH_ANY_CALL_SHIFT] = LBR_REL_CALL | LBR_IND_CALL
1070 | LBR_FAR,
1071 [PERF_SAMPLE_BRANCH_IND_CALL_SHIFT] = LBR_IND_CALL,
1072 [PERF_SAMPLE_BRANCH_COND_SHIFT] = LBR_JCC,
7b74cfb2 1073 [PERF_SAMPLE_BRANCH_IND_JUMP_SHIFT] = LBR_IND_JMP,
d892819f 1074 [PERF_SAMPLE_BRANCH_CALL_SHIFT] = LBR_REL_CALL,
c5cc2cd9
SE
1075};
1076
2c44b193 1077static const int hsw_lbr_sel_map[PERF_SAMPLE_BRANCH_MAX_SHIFT] = {
e9d7f7cd
YZ
1078 [PERF_SAMPLE_BRANCH_ANY_SHIFT] = LBR_ANY,
1079 [PERF_SAMPLE_BRANCH_USER_SHIFT] = LBR_USER,
1080 [PERF_SAMPLE_BRANCH_KERNEL_SHIFT] = LBR_KERNEL,
1081 [PERF_SAMPLE_BRANCH_HV_SHIFT] = LBR_IGN,
1082 [PERF_SAMPLE_BRANCH_ANY_RETURN_SHIFT] = LBR_RETURN | LBR_FAR,
1083 [PERF_SAMPLE_BRANCH_ANY_CALL_SHIFT] = LBR_REL_CALL | LBR_IND_CALL
1084 | LBR_FAR,
1085 [PERF_SAMPLE_BRANCH_IND_CALL_SHIFT] = LBR_IND_CALL,
1086 [PERF_SAMPLE_BRANCH_COND_SHIFT] = LBR_JCC,
1087 [PERF_SAMPLE_BRANCH_CALL_STACK_SHIFT] = LBR_REL_CALL | LBR_IND_CALL
1088 | LBR_RETURN | LBR_CALL_STACK,
7b74cfb2 1089 [PERF_SAMPLE_BRANCH_IND_JUMP_SHIFT] = LBR_IND_JMP,
d892819f 1090 [PERF_SAMPLE_BRANCH_CALL_SHIFT] = LBR_REL_CALL,
e9d7f7cd
YZ
1091};
1092
c5cc2cd9 1093/* core */
066ce64c 1094void __init intel_pmu_lbr_init_core(void)
caff2bef 1095{
caff2bef 1096 x86_pmu.lbr_nr = 4;
225ce539
SE
1097 x86_pmu.lbr_tos = MSR_LBR_TOS;
1098 x86_pmu.lbr_from = MSR_LBR_CORE_FROM;
1099 x86_pmu.lbr_to = MSR_LBR_CORE_TO;
c5cc2cd9 1100
3e702ff6
SE
1101 /*
1102 * SW branch filter usage:
1103 * - compensate for lack of HW filter
1104 */
caff2bef
PZ
1105}
1106
c5cc2cd9 1107/* nehalem/westmere */
066ce64c 1108void __init intel_pmu_lbr_init_nhm(void)
caff2bef 1109{
caff2bef 1110 x86_pmu.lbr_nr = 16;
225ce539
SE
1111 x86_pmu.lbr_tos = MSR_LBR_TOS;
1112 x86_pmu.lbr_from = MSR_LBR_NHM_FROM;
1113 x86_pmu.lbr_to = MSR_LBR_NHM_TO;
c5cc2cd9
SE
1114
1115 x86_pmu.lbr_sel_mask = LBR_SEL_MASK;
1116 x86_pmu.lbr_sel_map = nhm_lbr_sel_map;
1117
3e702ff6
SE
1118 /*
1119 * SW branch filter usage:
1120 * - workaround LBR_SEL errata (see above)
1121 * - support syscall, sysret capture.
1122 * That requires LBR_FAR but that means far
1123 * jmp need to be filtered out
1124 */
caff2bef
PZ
1125}
1126
c5cc2cd9 1127/* sandy bridge */
066ce64c 1128void __init intel_pmu_lbr_init_snb(void)
c5cc2cd9
SE
1129{
1130 x86_pmu.lbr_nr = 16;
1131 x86_pmu.lbr_tos = MSR_LBR_TOS;
1132 x86_pmu.lbr_from = MSR_LBR_NHM_FROM;
1133 x86_pmu.lbr_to = MSR_LBR_NHM_TO;
1134
1135 x86_pmu.lbr_sel_mask = LBR_SEL_MASK;
1136 x86_pmu.lbr_sel_map = snb_lbr_sel_map;
1137
3e702ff6
SE
1138 /*
1139 * SW branch filter usage:
1140 * - support syscall, sysret capture.
1141 * That requires LBR_FAR but that means far
1142 * jmp need to be filtered out
1143 */
c5cc2cd9
SE
1144}
1145
e9d7f7cd
YZ
1146/* haswell */
1147void intel_pmu_lbr_init_hsw(void)
1148{
1149 x86_pmu.lbr_nr = 16;
1150 x86_pmu.lbr_tos = MSR_LBR_TOS;
1151 x86_pmu.lbr_from = MSR_LBR_NHM_FROM;
1152 x86_pmu.lbr_to = MSR_LBR_NHM_TO;
1153
1154 x86_pmu.lbr_sel_mask = LBR_SEL_MASK;
1155 x86_pmu.lbr_sel_map = hsw_lbr_sel_map;
19fc9ddd
DCC
1156
1157 if (lbr_from_signext_quirk_needed())
1158 static_branch_enable(&lbr_from_quirk_key);
e9d7f7cd
YZ
1159}
1160
9a92e16f
AK
1161/* skylake */
1162__init void intel_pmu_lbr_init_skl(void)
1163{
1164 x86_pmu.lbr_nr = 32;
1165 x86_pmu.lbr_tos = MSR_LBR_TOS;
1166 x86_pmu.lbr_from = MSR_LBR_NHM_FROM;
1167 x86_pmu.lbr_to = MSR_LBR_NHM_TO;
1168
1169 x86_pmu.lbr_sel_mask = LBR_SEL_MASK;
1170 x86_pmu.lbr_sel_map = hsw_lbr_sel_map;
1171
1172 /*
1173 * SW branch filter usage:
1174 * - support syscall, sysret capture.
1175 * That requires LBR_FAR but that means far
1176 * jmp need to be filtered out
1177 */
9a92e16f
AK
1178}
1179
c5cc2cd9 1180/* atom */
066ce64c 1181void __init intel_pmu_lbr_init_atom(void)
caff2bef 1182{
88c9a65e
SE
1183 /*
1184 * only models starting at stepping 10 seems
1185 * to have an operational LBR which can freeze
1186 * on PMU interrupt
1187 */
3ec18cd8
SE
1188 if (boot_cpu_data.x86_model == 28
1189 && boot_cpu_data.x86_mask < 10) {
88c9a65e
SE
1190 pr_cont("LBR disabled due to erratum");
1191 return;
1192 }
1193
caff2bef 1194 x86_pmu.lbr_nr = 8;
225ce539
SE
1195 x86_pmu.lbr_tos = MSR_LBR_TOS;
1196 x86_pmu.lbr_from = MSR_LBR_CORE_FROM;
1197 x86_pmu.lbr_to = MSR_LBR_CORE_TO;
c5cc2cd9 1198
3e702ff6
SE
1199 /*
1200 * SW branch filter usage:
1201 * - compensate for lack of HW filter
1202 */
caff2bef 1203}
1e7b9390 1204
f21d5adc
KL
1205/* slm */
1206void __init intel_pmu_lbr_init_slm(void)
1207{
1208 x86_pmu.lbr_nr = 8;
1209 x86_pmu.lbr_tos = MSR_LBR_TOS;
1210 x86_pmu.lbr_from = MSR_LBR_CORE_FROM;
1211 x86_pmu.lbr_to = MSR_LBR_CORE_TO;
1212
1213 x86_pmu.lbr_sel_mask = LBR_SEL_MASK;
1214 x86_pmu.lbr_sel_map = nhm_lbr_sel_map;
1215
1216 /*
1217 * SW branch filter usage:
1218 * - compensate for lack of HW filter
1219 */
1220 pr_cont("8-deep LBR, ");
1221}
1222
1e7b9390
HC
1223/* Knights Landing */
1224void intel_pmu_lbr_init_knl(void)
1225{
1226 x86_pmu.lbr_nr = 8;
1227 x86_pmu.lbr_tos = MSR_LBR_TOS;
1228 x86_pmu.lbr_from = MSR_LBR_NHM_FROM;
1229 x86_pmu.lbr_to = MSR_LBR_NHM_TO;
1230
1231 x86_pmu.lbr_sel_mask = LBR_SEL_MASK;
1232 x86_pmu.lbr_sel_map = snb_lbr_sel_map;
1e7b9390 1233}