Commit | Line | Data |
---|---|---|
b2441318 | 1 | // SPDX-License-Identifier: GPL-2.0 |
de0428a7 KW |
2 | #include <linux/bitops.h> |
3 | #include <linux/types.h> | |
4 | #include <linux/slab.h> | |
89e97eb8 | 5 | #include <linux/sched/clock.h> |
ca037701 | 6 | |
c1961a46 | 7 | #include <asm/cpu_entry_area.h> |
154fcf3a | 8 | #include <asm/debugreg.h> |
de0428a7 | 9 | #include <asm/perf_event.h> |
42f3bdc5 | 10 | #include <asm/tlbflush.h> |
3e702ff6 | 11 | #include <asm/insn.h> |
59e9f587 | 12 | #include <asm/io.h> |
efef7f18 | 13 | #include <asm/msr.h> |
89e97eb8 | 14 | #include <asm/timer.h> |
de0428a7 | 15 | |
27f6d22b | 16 | #include "../perf_event.h" |
ca037701 | 17 | |
10043e02 TG |
18 | /* Waste a full page so it can be mapped into the cpu_entry_area */ |
19 | DEFINE_PER_CPU_PAGE_ALIGNED(struct debug_store, cpu_debug_store); | |
20 | ||
ca037701 PZ |
21 | /* The size of a BTS record in bytes: */ |
22 | #define BTS_RECORD_SIZE 24 | |
23 | ||
9536c8d2 | 24 | #define PEBS_FIXUP_SIZE PAGE_SIZE |
ca037701 PZ |
25 | |
26 | /* | |
27 | * pebs_record_32 for p4 and core not supported | |
28 | ||
29 | struct pebs_record_32 { | |
30 | u32 flags, ip; | |
31 | u32 ax, bc, cx, dx; | |
32 | u32 si, di, bp, sp; | |
33 | }; | |
34 | ||
35 | */ | |
36 | ||
f20093ee SE |
37 | union intel_x86_pebs_dse { |
38 | u64 val; | |
39 | struct { | |
40 | unsigned int ld_dse:4; | |
41 | unsigned int ld_stlb_miss:1; | |
42 | unsigned int ld_locked:1; | |
61b985e3 KL |
43 | unsigned int ld_data_blk:1; |
44 | unsigned int ld_addr_blk:1; | |
45 | unsigned int ld_reserved:24; | |
f20093ee SE |
46 | }; |
47 | struct { | |
48 | unsigned int st_l1d_hit:1; | |
49 | unsigned int st_reserved1:3; | |
50 | unsigned int st_stlb_miss:1; | |
51 | unsigned int st_locked:1; | |
52 | unsigned int st_reserved2:26; | |
53 | }; | |
61b985e3 KL |
54 | struct { |
55 | unsigned int st_lat_dse:4; | |
56 | unsigned int st_lat_stlb_miss:1; | |
57 | unsigned int st_lat_locked:1; | |
58 | unsigned int ld_reserved3:26; | |
59 | }; | |
38aaf921 KL |
60 | struct { |
61 | unsigned int mtl_dse:5; | |
62 | unsigned int mtl_locked:1; | |
63 | unsigned int mtl_stlb_miss:1; | |
64 | unsigned int mtl_fwd_blk:1; | |
65 | unsigned int ld_reserved4:24; | |
66 | }; | |
608f6976 KL |
67 | struct { |
68 | unsigned int lnc_dse:8; | |
69 | unsigned int ld_reserved5:2; | |
70 | unsigned int lnc_stlb_miss:1; | |
71 | unsigned int lnc_locked:1; | |
72 | unsigned int lnc_data_blk:1; | |
73 | unsigned int lnc_addr_blk:1; | |
74 | unsigned int ld_reserved6:18; | |
75 | }; | |
f20093ee SE |
76 | }; |
77 | ||
78 | ||
79 | /* | |
80 | * Map PEBS Load Latency Data Source encodings to generic | |
81 | * memory data source information | |
82 | */ | |
83 | #define P(a, b) PERF_MEM_S(a, b) | |
84 | #define OP_LH (P(OP, LOAD) | P(LVL, HIT)) | |
6ae5fa61 AK |
85 | #define LEVEL(x) P(LVLNUM, x) |
86 | #define REM P(REMOTE, REMOTE) | |
f20093ee SE |
87 | #define SNOOP_NONE_MISS (P(SNOOP, NONE) | P(SNOOP, MISS)) |
88 | ||
e17dc653 | 89 | /* Version for Sandy Bridge and later */ |
608f6976 | 90 | static u64 pebs_data_source[PERF_PEBS_DATA_SOURCE_MAX] = { |
6ae5fa61 AK |
91 | P(OP, LOAD) | P(LVL, MISS) | LEVEL(L3) | P(SNOOP, NA),/* 0x00:ukn L3 */ |
92 | OP_LH | P(LVL, L1) | LEVEL(L1) | P(SNOOP, NONE), /* 0x01: L1 local */ | |
93 | OP_LH | P(LVL, LFB) | LEVEL(LFB) | P(SNOOP, NONE), /* 0x02: LFB hit */ | |
94 | OP_LH | P(LVL, L2) | LEVEL(L2) | P(SNOOP, NONE), /* 0x03: L2 hit */ | |
95 | OP_LH | P(LVL, L3) | LEVEL(L3) | P(SNOOP, NONE), /* 0x04: L3 hit */ | |
96 | OP_LH | P(LVL, L3) | LEVEL(L3) | P(SNOOP, MISS), /* 0x05: L3 hit, snoop miss */ | |
97 | OP_LH | P(LVL, L3) | LEVEL(L3) | P(SNOOP, HIT), /* 0x06: L3 hit, snoop hit */ | |
98 | OP_LH | P(LVL, L3) | LEVEL(L3) | P(SNOOP, HITM), /* 0x07: L3 hit, snoop hitm */ | |
99 | OP_LH | P(LVL, REM_CCE1) | REM | LEVEL(L3) | P(SNOOP, HIT), /* 0x08: L3 miss snoop hit */ | |
100 | OP_LH | P(LVL, REM_CCE1) | REM | LEVEL(L3) | P(SNOOP, HITM), /* 0x09: L3 miss snoop hitm*/ | |
101 | OP_LH | P(LVL, LOC_RAM) | LEVEL(RAM) | P(SNOOP, HIT), /* 0x0a: L3 miss, shared */ | |
102 | OP_LH | P(LVL, REM_RAM1) | REM | LEVEL(L3) | P(SNOOP, HIT), /* 0x0b: L3 miss, shared */ | |
103 | OP_LH | P(LVL, LOC_RAM) | LEVEL(RAM) | SNOOP_NONE_MISS, /* 0x0c: L3 miss, excl */ | |
104 | OP_LH | P(LVL, REM_RAM1) | LEVEL(RAM) | REM | SNOOP_NONE_MISS, /* 0x0d: L3 miss, excl */ | |
105 | OP_LH | P(LVL, IO) | LEVEL(NA) | P(SNOOP, NONE), /* 0x0e: I/O */ | |
106 | OP_LH | P(LVL, UNC) | LEVEL(NA) | P(SNOOP, NONE), /* 0x0f: uncached */ | |
f20093ee SE |
107 | }; |
108 | ||
e17dc653 AK |
109 | /* Patch up minor differences in the bits */ |
110 | void __init intel_pmu_pebs_data_source_nhm(void) | |
111 | { | |
6ae5fa61 AK |
112 | pebs_data_source[0x05] = OP_LH | P(LVL, L3) | LEVEL(L3) | P(SNOOP, HIT); |
113 | pebs_data_source[0x06] = OP_LH | P(LVL, L3) | LEVEL(L3) | P(SNOOP, HITM); | |
114 | pebs_data_source[0x07] = OP_LH | P(LVL, L3) | LEVEL(L3) | P(SNOOP, HITM); | |
115 | } | |
116 | ||
ccf170e9 | 117 | static void __init __intel_pmu_pebs_data_source_skl(bool pmem, u64 *data_source) |
6ae5fa61 AK |
118 | { |
119 | u64 pmem_or_l4 = pmem ? LEVEL(PMEM) : LEVEL(L4); | |
120 | ||
ccf170e9 KL |
121 | data_source[0x08] = OP_LH | pmem_or_l4 | P(SNOOP, HIT); |
122 | data_source[0x09] = OP_LH | pmem_or_l4 | REM | P(SNOOP, HIT); | |
123 | data_source[0x0b] = OP_LH | LEVEL(RAM) | REM | P(SNOOP, NONE); | |
124 | data_source[0x0c] = OP_LH | LEVEL(ANY_CACHE) | REM | P(SNOOPX, FWD); | |
125 | data_source[0x0d] = OP_LH | LEVEL(ANY_CACHE) | REM | P(SNOOP, HITM); | |
126 | } | |
127 | ||
128 | void __init intel_pmu_pebs_data_source_skl(bool pmem) | |
129 | { | |
130 | __intel_pmu_pebs_data_source_skl(pmem, pebs_data_source); | |
131 | } | |
132 | ||
24919fde | 133 | static void __init __intel_pmu_pebs_data_source_grt(u64 *data_source) |
ccf170e9 KL |
134 | { |
135 | data_source[0x05] = OP_LH | P(LVL, L3) | LEVEL(L3) | P(SNOOP, HIT); | |
136 | data_source[0x06] = OP_LH | P(LVL, L3) | LEVEL(L3) | P(SNOOP, HITM); | |
137 | data_source[0x08] = OP_LH | P(LVL, L3) | LEVEL(L3) | P(SNOOPX, FWD); | |
138 | } | |
139 | ||
24919fde KL |
140 | void __init intel_pmu_pebs_data_source_grt(void) |
141 | { | |
142 | __intel_pmu_pebs_data_source_grt(pebs_data_source); | |
143 | } | |
144 | ||
ccf170e9 KL |
145 | void __init intel_pmu_pebs_data_source_adl(void) |
146 | { | |
147 | u64 *data_source; | |
148 | ||
149 | data_source = x86_pmu.hybrid_pmu[X86_HYBRID_PMU_CORE_IDX].pebs_data_source; | |
150 | memcpy(data_source, pebs_data_source, sizeof(pebs_data_source)); | |
151 | __intel_pmu_pebs_data_source_skl(false, data_source); | |
152 | ||
153 | data_source = x86_pmu.hybrid_pmu[X86_HYBRID_PMU_ATOM_IDX].pebs_data_source; | |
154 | memcpy(data_source, pebs_data_source, sizeof(pebs_data_source)); | |
24919fde | 155 | __intel_pmu_pebs_data_source_grt(data_source); |
e17dc653 AK |
156 | } |
157 | ||
a430021f | 158 | static void __init __intel_pmu_pebs_data_source_cmt(u64 *data_source) |
38aaf921 KL |
159 | { |
160 | data_source[0x07] = OP_LH | P(LVL, L3) | LEVEL(L3) | P(SNOOPX, FWD); | |
161 | data_source[0x08] = OP_LH | P(LVL, L3) | LEVEL(L3) | P(SNOOP, HITM); | |
162 | data_source[0x0a] = OP_LH | P(LVL, LOC_RAM) | LEVEL(RAM) | P(SNOOP, NONE); | |
163 | data_source[0x0b] = OP_LH | LEVEL(RAM) | REM | P(SNOOP, NONE); | |
164 | data_source[0x0c] = OP_LH | LEVEL(RAM) | REM | P(SNOOPX, FWD); | |
165 | data_source[0x0d] = OP_LH | LEVEL(RAM) | REM | P(SNOOP, HITM); | |
166 | } | |
167 | ||
168 | void __init intel_pmu_pebs_data_source_mtl(void) | |
169 | { | |
170 | u64 *data_source; | |
171 | ||
172 | data_source = x86_pmu.hybrid_pmu[X86_HYBRID_PMU_CORE_IDX].pebs_data_source; | |
173 | memcpy(data_source, pebs_data_source, sizeof(pebs_data_source)); | |
174 | __intel_pmu_pebs_data_source_skl(false, data_source); | |
175 | ||
176 | data_source = x86_pmu.hybrid_pmu[X86_HYBRID_PMU_ATOM_IDX].pebs_data_source; | |
177 | memcpy(data_source, pebs_data_source, sizeof(pebs_data_source)); | |
a430021f KL |
178 | __intel_pmu_pebs_data_source_cmt(data_source); |
179 | } | |
180 | ||
d3fe6f0a DM |
181 | void __init intel_pmu_pebs_data_source_arl_h(void) |
182 | { | |
183 | u64 *data_source; | |
184 | ||
185 | intel_pmu_pebs_data_source_lnl(); | |
186 | ||
187 | data_source = x86_pmu.hybrid_pmu[X86_HYBRID_PMU_TINY_IDX].pebs_data_source; | |
188 | memcpy(data_source, pebs_data_source, sizeof(pebs_data_source)); | |
189 | __intel_pmu_pebs_data_source_cmt(data_source); | |
190 | } | |
191 | ||
a430021f KL |
192 | void __init intel_pmu_pebs_data_source_cmt(void) |
193 | { | |
194 | __intel_pmu_pebs_data_source_cmt(pebs_data_source); | |
38aaf921 KL |
195 | } |
196 | ||
608f6976 KL |
197 | /* Version for Lion Cove and later */ |
198 | static u64 lnc_pebs_data_source[PERF_PEBS_DATA_SOURCE_MAX] = { | |
199 | P(OP, LOAD) | P(LVL, MISS) | LEVEL(L3) | P(SNOOP, NA), /* 0x00: ukn L3 */ | |
200 | OP_LH | P(LVL, L1) | LEVEL(L1) | P(SNOOP, NONE), /* 0x01: L1 hit */ | |
201 | OP_LH | P(LVL, L1) | LEVEL(L1) | P(SNOOP, NONE), /* 0x02: L1 hit */ | |
202 | OP_LH | P(LVL, LFB) | LEVEL(LFB) | P(SNOOP, NONE), /* 0x03: LFB/L1 Miss Handling Buffer hit */ | |
203 | 0, /* 0x04: Reserved */ | |
204 | OP_LH | P(LVL, L2) | LEVEL(L2) | P(SNOOP, NONE), /* 0x05: L2 Hit */ | |
205 | OP_LH | LEVEL(L2_MHB) | P(SNOOP, NONE), /* 0x06: L2 Miss Handling Buffer Hit */ | |
206 | 0, /* 0x07: Reserved */ | |
207 | OP_LH | P(LVL, L3) | LEVEL(L3) | P(SNOOP, NONE), /* 0x08: L3 Hit */ | |
208 | 0, /* 0x09: Reserved */ | |
209 | 0, /* 0x0a: Reserved */ | |
210 | 0, /* 0x0b: Reserved */ | |
211 | OP_LH | P(LVL, L3) | LEVEL(L3) | P(SNOOPX, FWD), /* 0x0c: L3 Hit Snoop Fwd */ | |
212 | OP_LH | P(LVL, L3) | LEVEL(L3) | P(SNOOP, HITM), /* 0x0d: L3 Hit Snoop HitM */ | |
213 | 0, /* 0x0e: Reserved */ | |
214 | P(OP, LOAD) | P(LVL, MISS) | P(LVL, L3) | LEVEL(L3) | P(SNOOP, HITM), /* 0x0f: L3 Miss Snoop HitM */ | |
215 | OP_LH | LEVEL(MSC) | P(SNOOP, NONE), /* 0x10: Memory-side Cache Hit */ | |
216 | OP_LH | P(LVL, LOC_RAM) | LEVEL(RAM) | P(SNOOP, NONE), /* 0x11: Local Memory Hit */ | |
217 | }; | |
218 | ||
219 | void __init intel_pmu_pebs_data_source_lnl(void) | |
220 | { | |
221 | u64 *data_source; | |
222 | ||
223 | data_source = x86_pmu.hybrid_pmu[X86_HYBRID_PMU_CORE_IDX].pebs_data_source; | |
224 | memcpy(data_source, lnc_pebs_data_source, sizeof(lnc_pebs_data_source)); | |
225 | ||
226 | data_source = x86_pmu.hybrid_pmu[X86_HYBRID_PMU_ATOM_IDX].pebs_data_source; | |
227 | memcpy(data_source, pebs_data_source, sizeof(pebs_data_source)); | |
228 | __intel_pmu_pebs_data_source_cmt(data_source); | |
229 | } | |
230 | ||
9ad64c0f SE |
231 | static u64 precise_store_data(u64 status) |
232 | { | |
233 | union intel_x86_pebs_dse dse; | |
234 | u64 val = P(OP, STORE) | P(SNOOP, NA) | P(LVL, L1) | P(TLB, L2); | |
235 | ||
236 | dse.val = status; | |
237 | ||
238 | /* | |
239 | * bit 4: TLB access | |
240 | * 1 = stored missed 2nd level TLB | |
241 | * | |
242 | * so it either hit the walker or the OS | |
243 | * otherwise hit 2nd level TLB | |
244 | */ | |
245 | if (dse.st_stlb_miss) | |
246 | val |= P(TLB, MISS); | |
247 | else | |
248 | val |= P(TLB, HIT); | |
249 | ||
250 | /* | |
251 | * bit 0: hit L1 data cache | |
252 | * if not set, then all we know is that | |
253 | * it missed L1D | |
254 | */ | |
255 | if (dse.st_l1d_hit) | |
256 | val |= P(LVL, HIT); | |
257 | else | |
258 | val |= P(LVL, MISS); | |
259 | ||
260 | /* | |
261 | * bit 5: Locked prefix | |
262 | */ | |
263 | if (dse.st_locked) | |
264 | val |= P(LOCK, LOCKED); | |
265 | ||
266 | return val; | |
267 | } | |
268 | ||
c8aab2e0 | 269 | static u64 precise_datala_hsw(struct perf_event *event, u64 status) |
f9134f36 AK |
270 | { |
271 | union perf_mem_data_src dse; | |
272 | ||
770eee1f SE |
273 | dse.val = PERF_MEM_NA; |
274 | ||
275 | if (event->hw.flags & PERF_X86_EVENT_PEBS_ST_HSW) | |
276 | dse.mem_op = PERF_MEM_OP_STORE; | |
277 | else if (event->hw.flags & PERF_X86_EVENT_PEBS_LD_HSW) | |
278 | dse.mem_op = PERF_MEM_OP_LOAD; | |
722e76e6 SE |
279 | |
280 | /* | |
281 | * L1 info only valid for following events: | |
282 | * | |
283 | * MEM_UOPS_RETIRED.STLB_MISS_STORES | |
284 | * MEM_UOPS_RETIRED.LOCK_STORES | |
285 | * MEM_UOPS_RETIRED.SPLIT_STORES | |
286 | * MEM_UOPS_RETIRED.ALL_STORES | |
287 | */ | |
c8aab2e0 SE |
288 | if (event->hw.flags & PERF_X86_EVENT_PEBS_ST_HSW) { |
289 | if (status & 1) | |
290 | dse.mem_lvl = PERF_MEM_LVL_L1 | PERF_MEM_LVL_HIT; | |
291 | else | |
292 | dse.mem_lvl = PERF_MEM_LVL_L1 | PERF_MEM_LVL_MISS; | |
293 | } | |
f9134f36 AK |
294 | return dse.val; |
295 | } | |
296 | ||
39a41278 KL |
297 | static inline void pebs_set_tlb_lock(u64 *val, bool tlb, bool lock) |
298 | { | |
299 | /* | |
300 | * TLB access | |
301 | * 0 = did not miss 2nd level TLB | |
302 | * 1 = missed 2nd level TLB | |
303 | */ | |
304 | if (tlb) | |
305 | *val |= P(TLB, MISS) | P(TLB, L2); | |
306 | else | |
307 | *val |= P(TLB, HIT) | P(TLB, L1) | P(TLB, L2); | |
308 | ||
309 | /* locked prefix */ | |
310 | if (lock) | |
311 | *val |= P(LOCK, LOCKED); | |
312 | } | |
313 | ||
314 | /* Retrieve the latency data for e-core of ADL */ | |
09026243 KL |
315 | static u64 __grt_latency_data(struct perf_event *event, u64 status, |
316 | u8 dse, bool tlb, bool lock, bool blk) | |
39a41278 | 317 | { |
39a41278 KL |
318 | u64 val; |
319 | ||
b0560bfd | 320 | WARN_ON_ONCE(hybrid_pmu(event->pmu)->pmu_type == hybrid_big); |
39a41278 | 321 | |
608f6976 | 322 | dse &= PERF_PEBS_DATA_SOURCE_GRT_MASK; |
38aaf921 | 323 | val = hybrid_var(event->pmu, pebs_data_source)[dse]; |
39a41278 | 324 | |
38aaf921 | 325 | pebs_set_tlb_lock(&val, tlb, lock); |
39a41278 | 326 | |
38aaf921 | 327 | if (blk) |
39a41278 KL |
328 | val |= P(BLK, DATA); |
329 | else | |
330 | val |= P(BLK, NA); | |
331 | ||
332 | return val; | |
333 | } | |
334 | ||
09026243 | 335 | u64 grt_latency_data(struct perf_event *event, u64 status) |
38aaf921 KL |
336 | { |
337 | union intel_x86_pebs_dse dse; | |
338 | ||
339 | dse.val = status; | |
340 | ||
09026243 KL |
341 | return __grt_latency_data(event, status, dse.ld_dse, |
342 | dse.ld_locked, dse.ld_stlb_miss, | |
343 | dse.ld_data_blk); | |
38aaf921 KL |
344 | } |
345 | ||
346 | /* Retrieve the latency data for e-core of MTL */ | |
09026243 | 347 | u64 cmt_latency_data(struct perf_event *event, u64 status) |
38aaf921 KL |
348 | { |
349 | union intel_x86_pebs_dse dse; | |
350 | ||
351 | dse.val = status; | |
352 | ||
09026243 KL |
353 | return __grt_latency_data(event, status, dse.mtl_dse, |
354 | dse.mtl_stlb_miss, dse.mtl_locked, | |
355 | dse.mtl_fwd_blk); | |
38aaf921 KL |
356 | } |
357 | ||
608f6976 KL |
358 | static u64 lnc_latency_data(struct perf_event *event, u64 status) |
359 | { | |
360 | union intel_x86_pebs_dse dse; | |
361 | union perf_mem_data_src src; | |
362 | u64 val; | |
363 | ||
364 | dse.val = status; | |
365 | ||
366 | /* LNC core latency data */ | |
367 | val = hybrid_var(event->pmu, pebs_data_source)[status & PERF_PEBS_DATA_SOURCE_MASK]; | |
368 | if (!val) | |
369 | val = P(OP, LOAD) | LEVEL(NA) | P(SNOOP, NA); | |
370 | ||
371 | if (dse.lnc_stlb_miss) | |
372 | val |= P(TLB, MISS) | P(TLB, L2); | |
373 | else | |
374 | val |= P(TLB, HIT) | P(TLB, L1) | P(TLB, L2); | |
375 | ||
376 | if (dse.lnc_locked) | |
377 | val |= P(LOCK, LOCKED); | |
378 | ||
379 | if (dse.lnc_data_blk) | |
380 | val |= P(BLK, DATA); | |
381 | if (dse.lnc_addr_blk) | |
382 | val |= P(BLK, ADDR); | |
383 | if (!dse.lnc_data_blk && !dse.lnc_addr_blk) | |
384 | val |= P(BLK, NA); | |
385 | ||
386 | src.val = val; | |
387 | if (event->hw.flags & PERF_X86_EVENT_PEBS_ST_HSW) | |
388 | src.mem_op = P(OP, STORE); | |
389 | ||
390 | return src.val; | |
391 | } | |
392 | ||
393 | u64 lnl_latency_data(struct perf_event *event, u64 status) | |
394 | { | |
395 | struct x86_hybrid_pmu *pmu = hybrid_pmu(event->pmu); | |
396 | ||
397 | if (pmu->pmu_type == hybrid_small) | |
398 | return cmt_latency_data(event, status); | |
399 | ||
400 | return lnc_latency_data(event, status); | |
401 | } | |
402 | ||
d3fe6f0a DM |
403 | u64 arl_h_latency_data(struct perf_event *event, u64 status) |
404 | { | |
405 | struct x86_hybrid_pmu *pmu = hybrid_pmu(event->pmu); | |
406 | ||
407 | if (pmu->pmu_type == hybrid_tiny) | |
408 | return cmt_latency_data(event, status); | |
409 | ||
410 | return lnl_latency_data(event, status); | |
411 | } | |
412 | ||
ccf170e9 | 413 | static u64 load_latency_data(struct perf_event *event, u64 status) |
f20093ee SE |
414 | { |
415 | union intel_x86_pebs_dse dse; | |
416 | u64 val; | |
f20093ee SE |
417 | |
418 | dse.val = status; | |
419 | ||
420 | /* | |
421 | * use the mapping table for bit 0-3 | |
422 | */ | |
ccf170e9 | 423 | val = hybrid_var(event->pmu, pebs_data_source)[dse.ld_dse]; |
f20093ee SE |
424 | |
425 | /* | |
426 | * Nehalem models do not support TLB, Lock infos | |
427 | */ | |
95298355 | 428 | if (x86_pmu.pebs_no_tlb) { |
f20093ee SE |
429 | val |= P(TLB, NA) | P(LOCK, NA); |
430 | return val; | |
431 | } | |
f20093ee | 432 | |
39a41278 | 433 | pebs_set_tlb_lock(&val, dse.ld_stlb_miss, dse.ld_locked); |
f20093ee | 434 | |
61b985e3 KL |
435 | /* |
436 | * Ice Lake and earlier models do not support block infos. | |
437 | */ | |
438 | if (!x86_pmu.pebs_block) { | |
439 | val |= P(BLK, NA); | |
440 | return val; | |
441 | } | |
442 | /* | |
443 | * bit 6: load was blocked since its data could not be forwarded | |
444 | * from a preceding store | |
445 | */ | |
446 | if (dse.ld_data_blk) | |
447 | val |= P(BLK, DATA); | |
448 | ||
449 | /* | |
450 | * bit 7: load was blocked due to potential address conflict with | |
451 | * a preceding store | |
452 | */ | |
453 | if (dse.ld_addr_blk) | |
454 | val |= P(BLK, ADDR); | |
455 | ||
456 | if (!dse.ld_data_blk && !dse.ld_addr_blk) | |
457 | val |= P(BLK, NA); | |
458 | ||
459 | return val; | |
460 | } | |
461 | ||
ccf170e9 | 462 | static u64 store_latency_data(struct perf_event *event, u64 status) |
61b985e3 KL |
463 | { |
464 | union intel_x86_pebs_dse dse; | |
d4bdb0be | 465 | union perf_mem_data_src src; |
61b985e3 KL |
466 | u64 val; |
467 | ||
468 | dse.val = status; | |
469 | ||
470 | /* | |
471 | * use the mapping table for bit 0-3 | |
472 | */ | |
ccf170e9 | 473 | val = hybrid_var(event->pmu, pebs_data_source)[dse.st_lat_dse]; |
61b985e3 | 474 | |
39a41278 | 475 | pebs_set_tlb_lock(&val, dse.st_lat_stlb_miss, dse.st_lat_locked); |
61b985e3 KL |
476 | |
477 | val |= P(BLK, NA); | |
478 | ||
d4bdb0be SE |
479 | /* |
480 | * the pebs_data_source table is only for loads | |
481 | * so override the mem_op to say STORE instead | |
482 | */ | |
483 | src.val = val; | |
484 | src.mem_op = P(OP,STORE); | |
485 | ||
486 | return src.val; | |
f20093ee SE |
487 | } |
488 | ||
ca037701 PZ |
489 | struct pebs_record_core { |
490 | u64 flags, ip; | |
491 | u64 ax, bx, cx, dx; | |
492 | u64 si, di, bp, sp; | |
493 | u64 r8, r9, r10, r11; | |
494 | u64 r12, r13, r14, r15; | |
495 | }; | |
496 | ||
497 | struct pebs_record_nhm { | |
498 | u64 flags, ip; | |
499 | u64 ax, bx, cx, dx; | |
500 | u64 si, di, bp, sp; | |
501 | u64 r8, r9, r10, r11; | |
502 | u64 r12, r13, r14, r15; | |
503 | u64 status, dla, dse, lat; | |
504 | }; | |
505 | ||
130768b8 AK |
506 | /* |
507 | * Same as pebs_record_nhm, with two additional fields. | |
508 | */ | |
509 | struct pebs_record_hsw { | |
748e86aa AK |
510 | u64 flags, ip; |
511 | u64 ax, bx, cx, dx; | |
512 | u64 si, di, bp, sp; | |
513 | u64 r8, r9, r10, r11; | |
514 | u64 r12, r13, r14, r15; | |
515 | u64 status, dla, dse, lat; | |
d2beea4a | 516 | u64 real_ip, tsx_tuning; |
748e86aa AK |
517 | }; |
518 | ||
519 | union hsw_tsx_tuning { | |
520 | struct { | |
521 | u32 cycles_last_block : 32, | |
522 | hle_abort : 1, | |
523 | rtm_abort : 1, | |
524 | instruction_abort : 1, | |
525 | non_instruction_abort : 1, | |
526 | retry : 1, | |
527 | data_conflict : 1, | |
528 | capacity_writes : 1, | |
529 | capacity_reads : 1; | |
530 | }; | |
531 | u64 value; | |
130768b8 AK |
532 | }; |
533 | ||
a405bad5 AK |
534 | #define PEBS_HSW_TSX_FLAGS 0xff00000000ULL |
535 | ||
2f7ebf2e AK |
536 | /* Same as HSW, plus TSC */ |
537 | ||
538 | struct pebs_record_skl { | |
539 | u64 flags, ip; | |
540 | u64 ax, bx, cx, dx; | |
541 | u64 si, di, bp, sp; | |
542 | u64 r8, r9, r10, r11; | |
543 | u64 r12, r13, r14, r15; | |
544 | u64 status, dla, dse, lat; | |
545 | u64 real_ip, tsx_tuning; | |
546 | u64 tsc; | |
547 | }; | |
548 | ||
de0428a7 | 549 | void init_debug_store_on_cpu(int cpu) |
ca037701 PZ |
550 | { |
551 | struct debug_store *ds = per_cpu(cpu_hw_events, cpu).ds; | |
552 | ||
553 | if (!ds) | |
554 | return; | |
555 | ||
556 | wrmsr_on_cpu(cpu, MSR_IA32_DS_AREA, | |
557 | (u32)((u64)(unsigned long)ds), | |
558 | (u32)((u64)(unsigned long)ds >> 32)); | |
559 | } | |
560 | ||
de0428a7 | 561 | void fini_debug_store_on_cpu(int cpu) |
ca037701 PZ |
562 | { |
563 | if (!per_cpu(cpu_hw_events, cpu).ds) | |
564 | return; | |
565 | ||
566 | wrmsr_on_cpu(cpu, MSR_IA32_DS_AREA, 0, 0); | |
567 | } | |
568 | ||
9536c8d2 PZ |
569 | static DEFINE_PER_CPU(void *, insn_buffer); |
570 | ||
c1961a46 | 571 | static void ds_update_cea(void *cea, void *addr, size_t size, pgprot_t prot) |
5ee25c87 | 572 | { |
42f3bdc5 | 573 | unsigned long start = (unsigned long)cea; |
c1961a46 HD |
574 | phys_addr_t pa; |
575 | size_t msz = 0; | |
576 | ||
577 | pa = virt_to_phys(addr); | |
42f3bdc5 PZ |
578 | |
579 | preempt_disable(); | |
c1961a46 HD |
580 | for (; msz < size; msz += PAGE_SIZE, pa += PAGE_SIZE, cea += PAGE_SIZE) |
581 | cea_set_pte(cea, pa, prot); | |
42f3bdc5 PZ |
582 | |
583 | /* | |
584 | * This is a cross-CPU update of the cpu_entry_area, we must shoot down | |
585 | * all TLB entries for it. | |
586 | */ | |
587 | flush_tlb_kernel_range(start, start + size); | |
588 | preempt_enable(); | |
c1961a46 HD |
589 | } |
590 | ||
591 | static void ds_clear_cea(void *cea, size_t size) | |
592 | { | |
42f3bdc5 | 593 | unsigned long start = (unsigned long)cea; |
c1961a46 HD |
594 | size_t msz = 0; |
595 | ||
42f3bdc5 | 596 | preempt_disable(); |
c1961a46 HD |
597 | for (; msz < size; msz += PAGE_SIZE, cea += PAGE_SIZE) |
598 | cea_set_pte(cea, 0, PAGE_NONE); | |
42f3bdc5 PZ |
599 | |
600 | flush_tlb_kernel_range(start, start + size); | |
601 | preempt_enable(); | |
c1961a46 HD |
602 | } |
603 | ||
604 | static void *dsalloc_pages(size_t size, gfp_t flags, int cpu) | |
605 | { | |
606 | unsigned int order = get_order(size); | |
96681fc3 | 607 | int node = cpu_to_node(cpu); |
c1961a46 HD |
608 | struct page *page; |
609 | ||
610 | page = __alloc_pages_node(node, flags | __GFP_ZERO, order); | |
611 | return page ? page_address(page) : NULL; | |
612 | } | |
613 | ||
614 | static void dsfree_pages(const void *buffer, size_t size) | |
615 | { | |
616 | if (buffer) | |
617 | free_pages((unsigned long)buffer, get_order(size)); | |
618 | } | |
619 | ||
620 | static int alloc_pebs_buffer(int cpu) | |
621 | { | |
622 | struct cpu_hw_events *hwev = per_cpu_ptr(&cpu_hw_events, cpu); | |
623 | struct debug_store *ds = hwev->ds; | |
624 | size_t bsiz = x86_pmu.pebs_buffer_size; | |
625 | int max, node = cpu_to_node(cpu); | |
1fc654cf | 626 | void *buffer, *insn_buff, *cea; |
5ee25c87 | 627 | |
acb727e0 | 628 | if (!x86_pmu.ds_pebs) |
5ee25c87 PZ |
629 | return 0; |
630 | ||
c1961a46 | 631 | buffer = dsalloc_pages(bsiz, GFP_KERNEL, cpu); |
5ee25c87 PZ |
632 | if (unlikely(!buffer)) |
633 | return -ENOMEM; | |
634 | ||
9536c8d2 PZ |
635 | /* |
636 | * HSW+ already provides us the eventing ip; no need to allocate this | |
637 | * buffer then. | |
638 | */ | |
639 | if (x86_pmu.intel_cap.pebs_format < 2) { | |
1fc654cf IM |
640 | insn_buff = kzalloc_node(PEBS_FIXUP_SIZE, GFP_KERNEL, node); |
641 | if (!insn_buff) { | |
c1961a46 | 642 | dsfree_pages(buffer, bsiz); |
9536c8d2 PZ |
643 | return -ENOMEM; |
644 | } | |
1fc654cf | 645 | per_cpu(insn_buffer, cpu) = insn_buff; |
9536c8d2 | 646 | } |
c1961a46 HD |
647 | hwev->ds_pebs_vaddr = buffer; |
648 | /* Update the cpu entry area mapping */ | |
649 | cea = &get_cpu_entry_area(cpu)->cpu_debug_buffers.pebs_buffer; | |
650 | ds->pebs_buffer_base = (unsigned long) cea; | |
651 | ds_update_cea(cea, buffer, bsiz, PAGE_KERNEL); | |
5ee25c87 | 652 | ds->pebs_index = ds->pebs_buffer_base; |
c1961a46 HD |
653 | max = x86_pmu.pebs_record_size * (bsiz / x86_pmu.pebs_record_size); |
654 | ds->pebs_absolute_maximum = ds->pebs_buffer_base + max; | |
5ee25c87 PZ |
655 | return 0; |
656 | } | |
657 | ||
b39f88ac PZ |
658 | static void release_pebs_buffer(int cpu) |
659 | { | |
c1961a46 | 660 | struct cpu_hw_events *hwev = per_cpu_ptr(&cpu_hw_events, cpu); |
c1961a46 | 661 | void *cea; |
b39f88ac | 662 | |
acb727e0 | 663 | if (!x86_pmu.ds_pebs) |
b39f88ac PZ |
664 | return; |
665 | ||
9536c8d2 PZ |
666 | kfree(per_cpu(insn_buffer, cpu)); |
667 | per_cpu(insn_buffer, cpu) = NULL; | |
668 | ||
c1961a46 HD |
669 | /* Clear the fixmap */ |
670 | cea = &get_cpu_entry_area(cpu)->cpu_debug_buffers.pebs_buffer; | |
671 | ds_clear_cea(cea, x86_pmu.pebs_buffer_size); | |
c1961a46 HD |
672 | dsfree_pages(hwev->ds_pebs_vaddr, x86_pmu.pebs_buffer_size); |
673 | hwev->ds_pebs_vaddr = NULL; | |
b39f88ac PZ |
674 | } |
675 | ||
5ee25c87 PZ |
676 | static int alloc_bts_buffer(int cpu) |
677 | { | |
c1961a46 HD |
678 | struct cpu_hw_events *hwev = per_cpu_ptr(&cpu_hw_events, cpu); |
679 | struct debug_store *ds = hwev->ds; | |
680 | void *buffer, *cea; | |
681 | int max; | |
5ee25c87 PZ |
682 | |
683 | if (!x86_pmu.bts) | |
684 | return 0; | |
685 | ||
c1961a46 | 686 | buffer = dsalloc_pages(BTS_BUFFER_SIZE, GFP_KERNEL | __GFP_NOWARN, cpu); |
44851541 DR |
687 | if (unlikely(!buffer)) { |
688 | WARN_ONCE(1, "%s: BTS buffer allocation failure\n", __func__); | |
5ee25c87 | 689 | return -ENOMEM; |
44851541 | 690 | } |
c1961a46 HD |
691 | hwev->ds_bts_vaddr = buffer; |
692 | /* Update the fixmap */ | |
693 | cea = &get_cpu_entry_area(cpu)->cpu_debug_buffers.bts_buffer; | |
694 | ds->bts_buffer_base = (unsigned long) cea; | |
695 | ds_update_cea(cea, buffer, BTS_BUFFER_SIZE, PAGE_KERNEL); | |
5ee25c87 | 696 | ds->bts_index = ds->bts_buffer_base; |
2c991e40 HD |
697 | max = BTS_BUFFER_SIZE / BTS_RECORD_SIZE; |
698 | ds->bts_absolute_maximum = ds->bts_buffer_base + | |
699 | max * BTS_RECORD_SIZE; | |
700 | ds->bts_interrupt_threshold = ds->bts_absolute_maximum - | |
701 | (max / 16) * BTS_RECORD_SIZE; | |
5ee25c87 PZ |
702 | return 0; |
703 | } | |
704 | ||
b39f88ac PZ |
705 | static void release_bts_buffer(int cpu) |
706 | { | |
c1961a46 | 707 | struct cpu_hw_events *hwev = per_cpu_ptr(&cpu_hw_events, cpu); |
c1961a46 | 708 | void *cea; |
b39f88ac | 709 | |
efe951d3 | 710 | if (!x86_pmu.bts) |
b39f88ac PZ |
711 | return; |
712 | ||
c1961a46 HD |
713 | /* Clear the fixmap */ |
714 | cea = &get_cpu_entry_area(cpu)->cpu_debug_buffers.bts_buffer; | |
715 | ds_clear_cea(cea, BTS_BUFFER_SIZE); | |
c1961a46 HD |
716 | dsfree_pages(hwev->ds_bts_vaddr, BTS_BUFFER_SIZE); |
717 | hwev->ds_bts_vaddr = NULL; | |
b39f88ac PZ |
718 | } |
719 | ||
65af94ba PZ |
720 | static int alloc_ds_buffer(int cpu) |
721 | { | |
c1961a46 | 722 | struct debug_store *ds = &get_cpu_entry_area(cpu)->cpu_debug_store; |
65af94ba | 723 | |
c1961a46 | 724 | memset(ds, 0, sizeof(*ds)); |
65af94ba | 725 | per_cpu(cpu_hw_events, cpu).ds = ds; |
65af94ba PZ |
726 | return 0; |
727 | } | |
728 | ||
729 | static void release_ds_buffer(int cpu) | |
730 | { | |
65af94ba | 731 | per_cpu(cpu_hw_events, cpu).ds = NULL; |
65af94ba PZ |
732 | } |
733 | ||
de0428a7 | 734 | void release_ds_buffers(void) |
ca037701 PZ |
735 | { |
736 | int cpu; | |
737 | ||
acb727e0 | 738 | if (!x86_pmu.bts && !x86_pmu.ds_pebs) |
ca037701 PZ |
739 | return; |
740 | ||
efe951d3 PZ |
741 | for_each_possible_cpu(cpu) |
742 | release_ds_buffer(cpu); | |
743 | ||
744 | for_each_possible_cpu(cpu) { | |
745 | /* | |
746 | * Again, ignore errors from offline CPUs, they will no longer | |
747 | * observe cpu_hw_events.ds and not program the DS_AREA when | |
748 | * they come up. | |
749 | */ | |
ca037701 | 750 | fini_debug_store_on_cpu(cpu); |
efe951d3 | 751 | } |
ca037701 PZ |
752 | |
753 | for_each_possible_cpu(cpu) { | |
acb727e0 DM |
754 | if (x86_pmu.ds_pebs) |
755 | release_pebs_buffer(cpu); | |
b39f88ac | 756 | release_bts_buffer(cpu); |
ca037701 | 757 | } |
ca037701 PZ |
758 | } |
759 | ||
de0428a7 | 760 | void reserve_ds_buffers(void) |
ca037701 | 761 | { |
6809b6ea PZ |
762 | int bts_err = 0, pebs_err = 0; |
763 | int cpu; | |
764 | ||
765 | x86_pmu.bts_active = 0; | |
ca037701 | 766 | |
acb727e0 DM |
767 | if (x86_pmu.ds_pebs) |
768 | x86_pmu.pebs_active = 0; | |
769 | ||
770 | if (!x86_pmu.bts && !x86_pmu.ds_pebs) | |
f80c9e30 | 771 | return; |
ca037701 | 772 | |
6809b6ea PZ |
773 | if (!x86_pmu.bts) |
774 | bts_err = 1; | |
775 | ||
acb727e0 | 776 | if (!x86_pmu.ds_pebs) |
6809b6ea PZ |
777 | pebs_err = 1; |
778 | ||
ca037701 | 779 | for_each_possible_cpu(cpu) { |
6809b6ea PZ |
780 | if (alloc_ds_buffer(cpu)) { |
781 | bts_err = 1; | |
782 | pebs_err = 1; | |
783 | } | |
ca037701 | 784 | |
6809b6ea PZ |
785 | if (!bts_err && alloc_bts_buffer(cpu)) |
786 | bts_err = 1; | |
787 | ||
acb727e0 DM |
788 | if (x86_pmu.ds_pebs && !pebs_err && |
789 | alloc_pebs_buffer(cpu)) | |
6809b6ea | 790 | pebs_err = 1; |
5ee25c87 | 791 | |
6809b6ea | 792 | if (bts_err && pebs_err) |
5ee25c87 | 793 | break; |
6809b6ea PZ |
794 | } |
795 | ||
796 | if (bts_err) { | |
797 | for_each_possible_cpu(cpu) | |
798 | release_bts_buffer(cpu); | |
799 | } | |
ca037701 | 800 | |
acb727e0 | 801 | if (x86_pmu.ds_pebs && pebs_err) { |
6809b6ea PZ |
802 | for_each_possible_cpu(cpu) |
803 | release_pebs_buffer(cpu); | |
ca037701 PZ |
804 | } |
805 | ||
6809b6ea PZ |
806 | if (bts_err && pebs_err) { |
807 | for_each_possible_cpu(cpu) | |
808 | release_ds_buffer(cpu); | |
809 | } else { | |
810 | if (x86_pmu.bts && !bts_err) | |
811 | x86_pmu.bts_active = 1; | |
812 | ||
acb727e0 | 813 | if (x86_pmu.ds_pebs && !pebs_err) |
6809b6ea PZ |
814 | x86_pmu.pebs_active = 1; |
815 | ||
efe951d3 PZ |
816 | for_each_possible_cpu(cpu) { |
817 | /* | |
818 | * Ignores wrmsr_on_cpu() errors for offline CPUs they | |
819 | * will get this call through intel_pmu_cpu_starting(). | |
820 | */ | |
ca037701 | 821 | init_debug_store_on_cpu(cpu); |
efe951d3 | 822 | } |
ca037701 | 823 | } |
ca037701 PZ |
824 | } |
825 | ||
826 | /* | |
827 | * BTS | |
828 | */ | |
829 | ||
de0428a7 | 830 | struct event_constraint bts_constraint = |
15c7ad51 | 831 | EVENT_CONSTRAINT(0, 1ULL << INTEL_PMC_IDX_FIXED_BTS, 0); |
ca037701 | 832 | |
de0428a7 | 833 | void intel_pmu_enable_bts(u64 config) |
ca037701 PZ |
834 | { |
835 | unsigned long debugctlmsr; | |
836 | ||
837 | debugctlmsr = get_debugctlmsr(); | |
838 | ||
7c5ecaf7 PZ |
839 | debugctlmsr |= DEBUGCTLMSR_TR; |
840 | debugctlmsr |= DEBUGCTLMSR_BTS; | |
8062382c AS |
841 | if (config & ARCH_PERFMON_EVENTSEL_INT) |
842 | debugctlmsr |= DEBUGCTLMSR_BTINT; | |
ca037701 PZ |
843 | |
844 | if (!(config & ARCH_PERFMON_EVENTSEL_OS)) | |
7c5ecaf7 | 845 | debugctlmsr |= DEBUGCTLMSR_BTS_OFF_OS; |
ca037701 PZ |
846 | |
847 | if (!(config & ARCH_PERFMON_EVENTSEL_USR)) | |
7c5ecaf7 | 848 | debugctlmsr |= DEBUGCTLMSR_BTS_OFF_USR; |
ca037701 PZ |
849 | |
850 | update_debugctlmsr(debugctlmsr); | |
851 | } | |
852 | ||
de0428a7 | 853 | void intel_pmu_disable_bts(void) |
ca037701 | 854 | { |
89cbc767 | 855 | struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); |
ca037701 PZ |
856 | unsigned long debugctlmsr; |
857 | ||
858 | if (!cpuc->ds) | |
859 | return; | |
860 | ||
861 | debugctlmsr = get_debugctlmsr(); | |
862 | ||
863 | debugctlmsr &= | |
7c5ecaf7 PZ |
864 | ~(DEBUGCTLMSR_TR | DEBUGCTLMSR_BTS | DEBUGCTLMSR_BTINT | |
865 | DEBUGCTLMSR_BTS_OFF_OS | DEBUGCTLMSR_BTS_OFF_USR); | |
ca037701 PZ |
866 | |
867 | update_debugctlmsr(debugctlmsr); | |
868 | } | |
869 | ||
de0428a7 | 870 | int intel_pmu_drain_bts_buffer(void) |
ca037701 | 871 | { |
89cbc767 | 872 | struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); |
ca037701 PZ |
873 | struct debug_store *ds = cpuc->ds; |
874 | struct bts_record { | |
875 | u64 from; | |
876 | u64 to; | |
877 | u64 flags; | |
878 | }; | |
15c7ad51 | 879 | struct perf_event *event = cpuc->events[INTEL_PMC_IDX_FIXED_BTS]; |
a09d31f4 | 880 | struct bts_record *at, *base, *top; |
ca037701 PZ |
881 | struct perf_output_handle handle; |
882 | struct perf_event_header header; | |
883 | struct perf_sample_data data; | |
a09d31f4 | 884 | unsigned long skip = 0; |
ca037701 PZ |
885 | struct pt_regs regs; |
886 | ||
887 | if (!event) | |
b0b2072d | 888 | return 0; |
ca037701 | 889 | |
6809b6ea | 890 | if (!x86_pmu.bts_active) |
b0b2072d | 891 | return 0; |
ca037701 | 892 | |
a09d31f4 AS |
893 | base = (struct bts_record *)(unsigned long)ds->bts_buffer_base; |
894 | top = (struct bts_record *)(unsigned long)ds->bts_index; | |
ca037701 | 895 | |
a09d31f4 | 896 | if (top <= base) |
b0b2072d | 897 | return 0; |
ca037701 | 898 | |
0e48026a SE |
899 | memset(®s, 0, sizeof(regs)); |
900 | ||
ca037701 PZ |
901 | ds->bts_index = ds->bts_buffer_base; |
902 | ||
fd0d000b | 903 | perf_sample_data_init(&data, 0, event->hw.last_period); |
ca037701 | 904 | |
a09d31f4 AS |
905 | /* |
906 | * BTS leaks kernel addresses in branches across the cpl boundary, | |
907 | * such as traps or system calls, so unless the user is asking for | |
908 | * kernel tracing (and right now it's not possible), we'd need to | |
909 | * filter them out. But first we need to count how many of those we | |
910 | * have in the current batch. This is an extra O(n) pass, however, | |
911 | * it's much faster than the other one especially considering that | |
912 | * n <= 2560 (BTS_BUFFER_SIZE / BTS_RECORD_SIZE * 15/16; see the | |
913 | * alloc_bts_buffer()). | |
914 | */ | |
915 | for (at = base; at < top; at++) { | |
916 | /* | |
917 | * Note that right now *this* BTS code only works if | |
918 | * attr::exclude_kernel is set, but let's keep this extra | |
919 | * check here in case that changes. | |
920 | */ | |
921 | if (event->attr.exclude_kernel && | |
922 | (kernel_ip(at->from) || kernel_ip(at->to))) | |
923 | skip++; | |
924 | } | |
925 | ||
ca037701 PZ |
926 | /* |
927 | * Prepare a generic sample, i.e. fill in the invariant fields. | |
928 | * We will overwrite the from and to address before we output | |
929 | * the sample. | |
930 | */ | |
e8d8a90f | 931 | rcu_read_lock(); |
f6e70715 NK |
932 | perf_prepare_sample(&data, event, ®s); |
933 | perf_prepare_header(&header, &data, event, ®s); | |
ca037701 | 934 | |
267fb273 PZ |
935 | if (perf_output_begin(&handle, &data, event, |
936 | header.size * (top - base - skip))) | |
e8d8a90f | 937 | goto unlock; |
ca037701 | 938 | |
a09d31f4 AS |
939 | for (at = base; at < top; at++) { |
940 | /* Filter out any records that contain kernel addresses. */ | |
941 | if (event->attr.exclude_kernel && | |
942 | (kernel_ip(at->from) || kernel_ip(at->to))) | |
943 | continue; | |
944 | ||
ca037701 PZ |
945 | data.ip = at->from; |
946 | data.addr = at->to; | |
947 | ||
948 | perf_output_sample(&handle, &header, &data, event); | |
949 | } | |
950 | ||
951 | perf_output_end(&handle); | |
952 | ||
953 | /* There's new data available. */ | |
954 | event->hw.interrupts++; | |
955 | event->pending_kill = POLL_IN; | |
e8d8a90f PZ |
956 | unlock: |
957 | rcu_read_unlock(); | |
b0b2072d | 958 | return 1; |
ca037701 PZ |
959 | } |
960 | ||
f9bdf1f9 | 961 | void intel_pmu_drain_pebs_buffer(void) |
9c964efa | 962 | { |
9dfa9a5c PZ |
963 | struct perf_sample_data data; |
964 | ||
314dfe10 | 965 | static_call(x86_pmu_drain_pebs)(NULL, &data); |
9c964efa YZ |
966 | } |
967 | ||
ca037701 PZ |
968 | /* |
969 | * PEBS | |
970 | */ | |
de0428a7 | 971 | struct event_constraint intel_core2_pebs_event_constraints[] = { |
af4bdcf6 AK |
972 | INTEL_FLAGS_UEVENT_CONSTRAINT(0x00c0, 0x1), /* INST_RETIRED.ANY */ |
973 | INTEL_FLAGS_UEVENT_CONSTRAINT(0xfec1, 0x1), /* X87_OPS_RETIRED.ANY */ | |
974 | INTEL_FLAGS_UEVENT_CONSTRAINT(0x00c5, 0x1), /* BR_INST_RETIRED.MISPRED */ | |
975 | INTEL_FLAGS_UEVENT_CONSTRAINT(0x1fc7, 0x1), /* SIMD_INST_RETURED.ANY */ | |
976 | INTEL_FLAGS_EVENT_CONSTRAINT(0xcb, 0x1), /* MEM_LOAD_RETIRED.* */ | |
517e6341 | 977 | /* INST_RETIRED.ANY_P, inv=1, cmask=16 (cycles:p). */ |
23e3983a | 978 | INTEL_FLAGS_UEVENT_CONSTRAINT(0x108000c0, 0x01), |
ca037701 PZ |
979 | EVENT_CONSTRAINT_END |
980 | }; | |
981 | ||
de0428a7 | 982 | struct event_constraint intel_atom_pebs_event_constraints[] = { |
af4bdcf6 AK |
983 | INTEL_FLAGS_UEVENT_CONSTRAINT(0x00c0, 0x1), /* INST_RETIRED.ANY */ |
984 | INTEL_FLAGS_UEVENT_CONSTRAINT(0x00c5, 0x1), /* MISPREDICTED_BRANCH_RETIRED */ | |
985 | INTEL_FLAGS_EVENT_CONSTRAINT(0xcb, 0x1), /* MEM_LOAD_RETIRED.* */ | |
517e6341 | 986 | /* INST_RETIRED.ANY_P, inv=1, cmask=16 (cycles:p). */ |
23e3983a | 987 | INTEL_FLAGS_UEVENT_CONSTRAINT(0x108000c0, 0x01), |
673d188b SE |
988 | /* Allow all events as PEBS with no flags */ |
989 | INTEL_ALL_EVENT_CONSTRAINT(0, 0x1), | |
17e31629 SE |
990 | EVENT_CONSTRAINT_END |
991 | }; | |
992 | ||
1fa64180 | 993 | struct event_constraint intel_slm_pebs_event_constraints[] = { |
33636732 | 994 | /* INST_RETIRED.ANY_P, inv=1, cmask=16 (cycles:p). */ |
23e3983a | 995 | INTEL_FLAGS_UEVENT_CONSTRAINT(0x108000c0, 0x1), |
86a04461 AK |
996 | /* Allow all events as PEBS with no flags */ |
997 | INTEL_ALL_EVENT_CONSTRAINT(0, 0x1), | |
1fa64180 YZ |
998 | EVENT_CONSTRAINT_END |
999 | }; | |
1000 | ||
8b92c3a7 KL |
1001 | struct event_constraint intel_glm_pebs_event_constraints[] = { |
1002 | /* Allow all events as PEBS with no flags */ | |
1003 | INTEL_ALL_EVENT_CONSTRAINT(0, 0x1), | |
1004 | EVENT_CONSTRAINT_END | |
1005 | }; | |
1006 | ||
f83d2f91 KL |
1007 | struct event_constraint intel_grt_pebs_event_constraints[] = { |
1008 | /* Allow all events as PEBS with no flags */ | |
cde643ff | 1009 | INTEL_HYBRID_LAT_CONSTRAINT(0x5d0, 0x3), |
39a41278 | 1010 | INTEL_HYBRID_LAT_CONSTRAINT(0x6d0, 0xf), |
f83d2f91 KL |
1011 | EVENT_CONSTRAINT_END |
1012 | }; | |
1013 | ||
de0428a7 | 1014 | struct event_constraint intel_nehalem_pebs_event_constraints[] = { |
f20093ee | 1015 | INTEL_PLD_CONSTRAINT(0x100b, 0xf), /* MEM_INST_RETIRED.* */ |
af4bdcf6 AK |
1016 | INTEL_FLAGS_EVENT_CONSTRAINT(0x0f, 0xf), /* MEM_UNCORE_RETIRED.* */ |
1017 | INTEL_FLAGS_UEVENT_CONSTRAINT(0x010c, 0xf), /* MEM_STORE_RETIRED.DTLB_MISS */ | |
1018 | INTEL_FLAGS_EVENT_CONSTRAINT(0xc0, 0xf), /* INST_RETIRED.ANY */ | |
7d5d02da | 1019 | INTEL_EVENT_CONSTRAINT(0xc2, 0xf), /* UOPS_RETIRED.* */ |
af4bdcf6 AK |
1020 | INTEL_FLAGS_EVENT_CONSTRAINT(0xc4, 0xf), /* BR_INST_RETIRED.* */ |
1021 | INTEL_FLAGS_UEVENT_CONSTRAINT(0x02c5, 0xf), /* BR_MISP_RETIRED.NEAR_CALL */ | |
1022 | INTEL_FLAGS_EVENT_CONSTRAINT(0xc7, 0xf), /* SSEX_UOPS_RETIRED.* */ | |
1023 | INTEL_FLAGS_UEVENT_CONSTRAINT(0x20c8, 0xf), /* ITLB_MISS_RETIRED */ | |
1024 | INTEL_FLAGS_EVENT_CONSTRAINT(0xcb, 0xf), /* MEM_LOAD_RETIRED.* */ | |
1025 | INTEL_FLAGS_EVENT_CONSTRAINT(0xf7, 0xf), /* FP_ASSIST.* */ | |
517e6341 | 1026 | /* INST_RETIRED.ANY_P, inv=1, cmask=16 (cycles:p). */ |
23e3983a | 1027 | INTEL_FLAGS_UEVENT_CONSTRAINT(0x108000c0, 0x0f), |
17e31629 SE |
1028 | EVENT_CONSTRAINT_END |
1029 | }; | |
1030 | ||
de0428a7 | 1031 | struct event_constraint intel_westmere_pebs_event_constraints[] = { |
f20093ee | 1032 | INTEL_PLD_CONSTRAINT(0x100b, 0xf), /* MEM_INST_RETIRED.* */ |
af4bdcf6 AK |
1033 | INTEL_FLAGS_EVENT_CONSTRAINT(0x0f, 0xf), /* MEM_UNCORE_RETIRED.* */ |
1034 | INTEL_FLAGS_UEVENT_CONSTRAINT(0x010c, 0xf), /* MEM_STORE_RETIRED.DTLB_MISS */ | |
1035 | INTEL_FLAGS_EVENT_CONSTRAINT(0xc0, 0xf), /* INSTR_RETIRED.* */ | |
7d5d02da | 1036 | INTEL_EVENT_CONSTRAINT(0xc2, 0xf), /* UOPS_RETIRED.* */ |
af4bdcf6 AK |
1037 | INTEL_FLAGS_EVENT_CONSTRAINT(0xc4, 0xf), /* BR_INST_RETIRED.* */ |
1038 | INTEL_FLAGS_EVENT_CONSTRAINT(0xc5, 0xf), /* BR_MISP_RETIRED.* */ | |
1039 | INTEL_FLAGS_EVENT_CONSTRAINT(0xc7, 0xf), /* SSEX_UOPS_RETIRED.* */ | |
1040 | INTEL_FLAGS_UEVENT_CONSTRAINT(0x20c8, 0xf), /* ITLB_MISS_RETIRED */ | |
1041 | INTEL_FLAGS_EVENT_CONSTRAINT(0xcb, 0xf), /* MEM_LOAD_RETIRED.* */ | |
1042 | INTEL_FLAGS_EVENT_CONSTRAINT(0xf7, 0xf), /* FP_ASSIST.* */ | |
517e6341 | 1043 | /* INST_RETIRED.ANY_P, inv=1, cmask=16 (cycles:p). */ |
23e3983a | 1044 | INTEL_FLAGS_UEVENT_CONSTRAINT(0x108000c0, 0x0f), |
ca037701 PZ |
1045 | EVENT_CONSTRAINT_END |
1046 | }; | |
1047 | ||
de0428a7 | 1048 | struct event_constraint intel_snb_pebs_event_constraints[] = { |
0dbc9479 | 1049 | INTEL_FLAGS_UEVENT_CONSTRAINT(0x01c0, 0x2), /* INST_RETIRED.PRECDIST */ |
f20093ee | 1050 | INTEL_PLD_CONSTRAINT(0x01cd, 0x8), /* MEM_TRANS_RETIRED.LAT_ABOVE_THR */ |
9ad64c0f | 1051 | INTEL_PST_CONSTRAINT(0x02cd, 0x8), /* MEM_TRANS_RETIRED.PRECISE_STORES */ |
86a04461 | 1052 | /* UOPS_RETIRED.ALL, inv=1, cmask=16 (cycles:p). */ |
23e3983a | 1053 | INTEL_FLAGS_UEVENT_CONSTRAINT(0x108001c2, 0xf), |
b63b4b45 MD |
1054 | INTEL_EXCLEVT_CONSTRAINT(0xd0, 0xf), /* MEM_UOP_RETIRED.* */ |
1055 | INTEL_EXCLEVT_CONSTRAINT(0xd1, 0xf), /* MEM_LOAD_UOPS_RETIRED.* */ | |
1056 | INTEL_EXCLEVT_CONSTRAINT(0xd2, 0xf), /* MEM_LOAD_UOPS_LLC_HIT_RETIRED.* */ | |
1057 | INTEL_EXCLEVT_CONSTRAINT(0xd3, 0xf), /* MEM_LOAD_UOPS_LLC_MISS_RETIRED.* */ | |
86a04461 AK |
1058 | /* Allow all events as PEBS with no flags */ |
1059 | INTEL_ALL_EVENT_CONSTRAINT(0, 0xf), | |
b06b3d49 LM |
1060 | EVENT_CONSTRAINT_END |
1061 | }; | |
1062 | ||
20a36e39 | 1063 | struct event_constraint intel_ivb_pebs_event_constraints[] = { |
0dbc9479 | 1064 | INTEL_FLAGS_UEVENT_CONSTRAINT(0x01c0, 0x2), /* INST_RETIRED.PRECDIST */ |
f20093ee | 1065 | INTEL_PLD_CONSTRAINT(0x01cd, 0x8), /* MEM_TRANS_RETIRED.LAT_ABOVE_THR */ |
9ad64c0f | 1066 | INTEL_PST_CONSTRAINT(0x02cd, 0x8), /* MEM_TRANS_RETIRED.PRECISE_STORES */ |
86a04461 | 1067 | /* UOPS_RETIRED.ALL, inv=1, cmask=16 (cycles:p). */ |
23e3983a | 1068 | INTEL_FLAGS_UEVENT_CONSTRAINT(0x108001c2, 0xf), |
72469764 | 1069 | /* INST_RETIRED.PREC_DIST, inv=1, cmask=16 (cycles:ppp). */ |
23e3983a | 1070 | INTEL_FLAGS_UEVENT_CONSTRAINT(0x108001c0, 0x2), |
b63b4b45 MD |
1071 | INTEL_EXCLEVT_CONSTRAINT(0xd0, 0xf), /* MEM_UOP_RETIRED.* */ |
1072 | INTEL_EXCLEVT_CONSTRAINT(0xd1, 0xf), /* MEM_LOAD_UOPS_RETIRED.* */ | |
1073 | INTEL_EXCLEVT_CONSTRAINT(0xd2, 0xf), /* MEM_LOAD_UOPS_LLC_HIT_RETIRED.* */ | |
1074 | INTEL_EXCLEVT_CONSTRAINT(0xd3, 0xf), /* MEM_LOAD_UOPS_LLC_MISS_RETIRED.* */ | |
86a04461 AK |
1075 | /* Allow all events as PEBS with no flags */ |
1076 | INTEL_ALL_EVENT_CONSTRAINT(0, 0xf), | |
20a36e39 SE |
1077 | EVENT_CONSTRAINT_END |
1078 | }; | |
1079 | ||
3044318f | 1080 | struct event_constraint intel_hsw_pebs_event_constraints[] = { |
0dbc9479 | 1081 | INTEL_FLAGS_UEVENT_CONSTRAINT(0x01c0, 0x2), /* INST_RETIRED.PRECDIST */ |
86a04461 AK |
1082 | INTEL_PLD_CONSTRAINT(0x01cd, 0xf), /* MEM_TRANS_RETIRED.* */ |
1083 | /* UOPS_RETIRED.ALL, inv=1, cmask=16 (cycles:p). */ | |
23e3983a | 1084 | INTEL_FLAGS_UEVENT_CONSTRAINT(0x108001c2, 0xf), |
72469764 | 1085 | /* INST_RETIRED.PREC_DIST, inv=1, cmask=16 (cycles:ppp). */ |
23e3983a | 1086 | INTEL_FLAGS_UEVENT_CONSTRAINT(0x108001c0, 0x2), |
86a04461 | 1087 | INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_NA(0x01c2, 0xf), /* UOPS_RETIRED.ALL */ |
b63b4b45 MD |
1088 | INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_XLD(0x11d0, 0xf), /* MEM_UOPS_RETIRED.STLB_MISS_LOADS */ |
1089 | INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_XLD(0x21d0, 0xf), /* MEM_UOPS_RETIRED.LOCK_LOADS */ | |
1090 | INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_XLD(0x41d0, 0xf), /* MEM_UOPS_RETIRED.SPLIT_LOADS */ | |
1091 | INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_XLD(0x81d0, 0xf), /* MEM_UOPS_RETIRED.ALL_LOADS */ | |
1092 | INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_XST(0x12d0, 0xf), /* MEM_UOPS_RETIRED.STLB_MISS_STORES */ | |
1093 | INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_XST(0x42d0, 0xf), /* MEM_UOPS_RETIRED.SPLIT_STORES */ | |
1094 | INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_XST(0x82d0, 0xf), /* MEM_UOPS_RETIRED.ALL_STORES */ | |
1095 | INTEL_FLAGS_EVENT_CONSTRAINT_DATALA_XLD(0xd1, 0xf), /* MEM_LOAD_UOPS_RETIRED.* */ | |
1096 | INTEL_FLAGS_EVENT_CONSTRAINT_DATALA_XLD(0xd2, 0xf), /* MEM_LOAD_UOPS_L3_HIT_RETIRED.* */ | |
1097 | INTEL_FLAGS_EVENT_CONSTRAINT_DATALA_XLD(0xd3, 0xf), /* MEM_LOAD_UOPS_L3_MISS_RETIRED.* */ | |
9a92e16f AK |
1098 | /* Allow all events as PEBS with no flags */ |
1099 | INTEL_ALL_EVENT_CONSTRAINT(0, 0xf), | |
1100 | EVENT_CONSTRAINT_END | |
1101 | }; | |
1102 | ||
b3e62463 SE |
1103 | struct event_constraint intel_bdw_pebs_event_constraints[] = { |
1104 | INTEL_FLAGS_UEVENT_CONSTRAINT(0x01c0, 0x2), /* INST_RETIRED.PRECDIST */ | |
1105 | INTEL_PLD_CONSTRAINT(0x01cd, 0xf), /* MEM_TRANS_RETIRED.* */ | |
1106 | /* UOPS_RETIRED.ALL, inv=1, cmask=16 (cycles:p). */ | |
23e3983a | 1107 | INTEL_FLAGS_UEVENT_CONSTRAINT(0x108001c2, 0xf), |
b3e62463 | 1108 | /* INST_RETIRED.PREC_DIST, inv=1, cmask=16 (cycles:ppp). */ |
23e3983a | 1109 | INTEL_FLAGS_UEVENT_CONSTRAINT(0x108001c0, 0x2), |
b3e62463 SE |
1110 | INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_NA(0x01c2, 0xf), /* UOPS_RETIRED.ALL */ |
1111 | INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_LD(0x11d0, 0xf), /* MEM_UOPS_RETIRED.STLB_MISS_LOADS */ | |
1112 | INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_LD(0x21d0, 0xf), /* MEM_UOPS_RETIRED.LOCK_LOADS */ | |
1113 | INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_LD(0x41d0, 0xf), /* MEM_UOPS_RETIRED.SPLIT_LOADS */ | |
1114 | INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_LD(0x81d0, 0xf), /* MEM_UOPS_RETIRED.ALL_LOADS */ | |
1115 | INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_ST(0x12d0, 0xf), /* MEM_UOPS_RETIRED.STLB_MISS_STORES */ | |
1116 | INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_ST(0x42d0, 0xf), /* MEM_UOPS_RETIRED.SPLIT_STORES */ | |
1117 | INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_ST(0x82d0, 0xf), /* MEM_UOPS_RETIRED.ALL_STORES */ | |
1118 | INTEL_FLAGS_EVENT_CONSTRAINT_DATALA_LD(0xd1, 0xf), /* MEM_LOAD_UOPS_RETIRED.* */ | |
1119 | INTEL_FLAGS_EVENT_CONSTRAINT_DATALA_LD(0xd2, 0xf), /* MEM_LOAD_UOPS_L3_HIT_RETIRED.* */ | |
1120 | INTEL_FLAGS_EVENT_CONSTRAINT_DATALA_LD(0xd3, 0xf), /* MEM_LOAD_UOPS_L3_MISS_RETIRED.* */ | |
1121 | /* Allow all events as PEBS with no flags */ | |
1122 | INTEL_ALL_EVENT_CONSTRAINT(0, 0xf), | |
1123 | EVENT_CONSTRAINT_END | |
1124 | }; | |
1125 | ||
1126 | ||
9a92e16f AK |
1127 | struct event_constraint intel_skl_pebs_event_constraints[] = { |
1128 | INTEL_FLAGS_UEVENT_CONSTRAINT(0x1c0, 0x2), /* INST_RETIRED.PREC_DIST */ | |
72469764 | 1129 | /* INST_RETIRED.PREC_DIST, inv=1, cmask=16 (cycles:ppp). */ |
23e3983a | 1130 | INTEL_FLAGS_UEVENT_CONSTRAINT(0x108001c0, 0x2), |
442f5c74 | 1131 | /* INST_RETIRED.TOTAL_CYCLES_PS (inv=1, cmask=16) (cycles:p). */ |
23e3983a | 1132 | INTEL_FLAGS_UEVENT_CONSTRAINT(0x108000c0, 0x0f), |
9a92e16f AK |
1133 | INTEL_PLD_CONSTRAINT(0x1cd, 0xf), /* MEM_TRANS_RETIRED.* */ |
1134 | INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_LD(0x11d0, 0xf), /* MEM_INST_RETIRED.STLB_MISS_LOADS */ | |
1135 | INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_ST(0x12d0, 0xf), /* MEM_INST_RETIRED.STLB_MISS_STORES */ | |
1136 | INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_LD(0x21d0, 0xf), /* MEM_INST_RETIRED.LOCK_LOADS */ | |
1137 | INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_ST(0x22d0, 0xf), /* MEM_INST_RETIRED.LOCK_STORES */ | |
1138 | INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_LD(0x41d0, 0xf), /* MEM_INST_RETIRED.SPLIT_LOADS */ | |
1139 | INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_ST(0x42d0, 0xf), /* MEM_INST_RETIRED.SPLIT_STORES */ | |
1140 | INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_LD(0x81d0, 0xf), /* MEM_INST_RETIRED.ALL_LOADS */ | |
1141 | INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_ST(0x82d0, 0xf), /* MEM_INST_RETIRED.ALL_STORES */ | |
1142 | INTEL_FLAGS_EVENT_CONSTRAINT_DATALA_LD(0xd1, 0xf), /* MEM_LOAD_RETIRED.* */ | |
1143 | INTEL_FLAGS_EVENT_CONSTRAINT_DATALA_LD(0xd2, 0xf), /* MEM_LOAD_L3_HIT_RETIRED.* */ | |
1144 | INTEL_FLAGS_EVENT_CONSTRAINT_DATALA_LD(0xd3, 0xf), /* MEM_LOAD_L3_MISS_RETIRED.* */ | |
86a04461 AK |
1145 | /* Allow all events as PEBS with no flags */ |
1146 | INTEL_ALL_EVENT_CONSTRAINT(0, 0xf), | |
3044318f AK |
1147 | EVENT_CONSTRAINT_END |
1148 | }; | |
1149 | ||
60176089 | 1150 | struct event_constraint intel_icl_pebs_event_constraints[] = { |
2de71ee1 SE |
1151 | INTEL_FLAGS_UEVENT_CONSTRAINT(0x01c0, 0x100000000ULL), /* old INST_RETIRED.PREC_DIST */ |
1152 | INTEL_FLAGS_UEVENT_CONSTRAINT(0x0100, 0x100000000ULL), /* INST_RETIRED.PREC_DIST */ | |
3d0c3953 | 1153 | INTEL_FLAGS_UEVENT_CONSTRAINT(0x0400, 0x800000000ULL), /* SLOTS */ |
60176089 KL |
1154 | |
1155 | INTEL_PLD_CONSTRAINT(0x1cd, 0xff), /* MEM_TRANS_RETIRED.LOAD_LATENCY */ | |
acc5568b KL |
1156 | INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_LD(0x11d0, 0xf), /* MEM_INST_RETIRED.STLB_MISS_LOADS */ |
1157 | INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_ST(0x12d0, 0xf), /* MEM_INST_RETIRED.STLB_MISS_STORES */ | |
1158 | INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_LD(0x21d0, 0xf), /* MEM_INST_RETIRED.LOCK_LOADS */ | |
1159 | INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_LD(0x41d0, 0xf), /* MEM_INST_RETIRED.SPLIT_LOADS */ | |
1160 | INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_ST(0x42d0, 0xf), /* MEM_INST_RETIRED.SPLIT_STORES */ | |
1161 | INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_LD(0x81d0, 0xf), /* MEM_INST_RETIRED.ALL_LOADS */ | |
1162 | INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_ST(0x82d0, 0xf), /* MEM_INST_RETIRED.ALL_STORES */ | |
60176089 KL |
1163 | |
1164 | INTEL_FLAGS_EVENT_CONSTRAINT_DATALA_LD_RANGE(0xd1, 0xd4, 0xf), /* MEM_LOAD_*_RETIRED.* */ | |
1165 | ||
1166 | INTEL_FLAGS_EVENT_CONSTRAINT(0xd0, 0xf), /* MEM_INST_RETIRED.* */ | |
1167 | ||
1168 | /* | |
1169 | * Everything else is handled by PMU_FL_PEBS_ALL, because we | |
1170 | * need the full constraints from the main table. | |
1171 | */ | |
1172 | ||
1173 | EVENT_CONSTRAINT_END | |
1174 | }; | |
1175 | ||
d4b5694c | 1176 | struct event_constraint intel_glc_pebs_event_constraints[] = { |
2de71ee1 | 1177 | INTEL_FLAGS_UEVENT_CONSTRAINT(0x100, 0x100000000ULL), /* INST_RETIRED.PREC_DIST */ |
61b985e3 KL |
1178 | INTEL_FLAGS_UEVENT_CONSTRAINT(0x0400, 0x800000000ULL), |
1179 | ||
1180 | INTEL_FLAGS_EVENT_CONSTRAINT(0xc0, 0xfe), | |
1181 | INTEL_PLD_CONSTRAINT(0x1cd, 0xfe), | |
1182 | INTEL_PSD_CONSTRAINT(0x2cd, 0x1), | |
0916886b | 1183 | INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_LD(0x11d0, 0xf), /* MEM_INST_RETIRED.STLB_MISS_LOADS */ |
a932aa0e KL |
1184 | INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_ST(0x12d0, 0xf), /* MEM_INST_RETIRED.STLB_MISS_STORES */ |
1185 | INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_LD(0x21d0, 0xf), /* MEM_INST_RETIRED.LOCK_LOADS */ | |
1186 | INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_LD(0x41d0, 0xf), /* MEM_INST_RETIRED.SPLIT_LOADS */ | |
1187 | INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_ST(0x42d0, 0xf), /* MEM_INST_RETIRED.SPLIT_STORES */ | |
1188 | INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_LD(0x81d0, 0xf), /* MEM_INST_RETIRED.ALL_LOADS */ | |
1189 | INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_ST(0x82d0, 0xf), /* MEM_INST_RETIRED.ALL_STORES */ | |
1190 | ||
1191 | INTEL_FLAGS_EVENT_CONSTRAINT_DATALA_LD_RANGE(0xd1, 0xd4, 0xf), | |
1192 | ||
1193 | INTEL_FLAGS_EVENT_CONSTRAINT(0xd0, 0xf), | |
1194 | ||
1195 | /* | |
1196 | * Everything else is handled by PMU_FL_PEBS_ALL, because we | |
1197 | * need the full constraints from the main table. | |
1198 | */ | |
1199 | ||
1200 | EVENT_CONSTRAINT_END | |
1201 | }; | |
1202 | ||
1203 | struct event_constraint intel_lnc_pebs_event_constraints[] = { | |
1204 | INTEL_FLAGS_UEVENT_CONSTRAINT(0x100, 0x100000000ULL), /* INST_RETIRED.PREC_DIST */ | |
1205 | INTEL_FLAGS_UEVENT_CONSTRAINT(0x0400, 0x800000000ULL), | |
1206 | ||
782cffee | 1207 | INTEL_HYBRID_LDLAT_CONSTRAINT(0x1cd, 0x3fc), |
608f6976 | 1208 | INTEL_HYBRID_STLAT_CONSTRAINT(0x2cd, 0x3), |
a932aa0e | 1209 | INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_LD(0x11d0, 0xf), /* MEM_INST_RETIRED.STLB_MISS_LOADS */ |
0916886b KL |
1210 | INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_ST(0x12d0, 0xf), /* MEM_INST_RETIRED.STLB_MISS_STORES */ |
1211 | INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_LD(0x21d0, 0xf), /* MEM_INST_RETIRED.LOCK_LOADS */ | |
1212 | INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_LD(0x41d0, 0xf), /* MEM_INST_RETIRED.SPLIT_LOADS */ | |
1213 | INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_ST(0x42d0, 0xf), /* MEM_INST_RETIRED.SPLIT_STORES */ | |
1214 | INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_LD(0x81d0, 0xf), /* MEM_INST_RETIRED.ALL_LOADS */ | |
1215 | INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_ST(0x82d0, 0xf), /* MEM_INST_RETIRED.ALL_STORES */ | |
61b985e3 KL |
1216 | |
1217 | INTEL_FLAGS_EVENT_CONSTRAINT_DATALA_LD_RANGE(0xd1, 0xd4, 0xf), | |
1218 | ||
1219 | INTEL_FLAGS_EVENT_CONSTRAINT(0xd0, 0xf), | |
1220 | ||
1221 | /* | |
1222 | * Everything else is handled by PMU_FL_PEBS_ALL, because we | |
1223 | * need the full constraints from the main table. | |
1224 | */ | |
1225 | ||
1226 | EVENT_CONSTRAINT_END | |
1227 | }; | |
1228 | ||
de0428a7 | 1229 | struct event_constraint *intel_pebs_constraints(struct perf_event *event) |
ca037701 | 1230 | { |
24ee38ff | 1231 | struct event_constraint *pebs_constraints = hybrid(event->pmu, pebs_constraints); |
ca037701 PZ |
1232 | struct event_constraint *c; |
1233 | ||
ab608344 | 1234 | if (!event->attr.precise_ip) |
ca037701 PZ |
1235 | return NULL; |
1236 | ||
24ee38ff KL |
1237 | if (pebs_constraints) { |
1238 | for_each_event_constraint(c, pebs_constraints) { | |
63b79f6e | 1239 | if (constraint_match(c, event->hw.config)) { |
9fac2cf3 | 1240 | event->hw.flags |= c->flags; |
ca037701 | 1241 | return c; |
9fac2cf3 | 1242 | } |
ca037701 PZ |
1243 | } |
1244 | } | |
1245 | ||
31962340 KL |
1246 | /* |
1247 | * Extended PEBS support | |
1248 | * Makes the PEBS code search the normal constraints. | |
1249 | */ | |
1250 | if (x86_pmu.flags & PMU_FL_PEBS_ALL) | |
1251 | return NULL; | |
1252 | ||
ca037701 PZ |
1253 | return &emptyconstraint; |
1254 | } | |
1255 | ||
09e61b4f PZ |
1256 | /* |
1257 | * We need the sched_task callback even for per-cpu events when we use | |
1258 | * the large interrupt threshold, such that we can provide PID and TID | |
1259 | * to PEBS samples. | |
1260 | */ | |
1261 | static inline bool pebs_needs_sched_cb(struct cpu_hw_events *cpuc) | |
1262 | { | |
42880f72 AS |
1263 | if (cpuc->n_pebs == cpuc->n_pebs_via_pt) |
1264 | return false; | |
1265 | ||
09e61b4f PZ |
1266 | return cpuc->n_pebs && (cpuc->n_pebs == cpuc->n_large_pebs); |
1267 | } | |
1268 | ||
bd275681 | 1269 | void intel_pmu_pebs_sched_task(struct perf_event_pmu_context *pmu_ctx, bool sched_in) |
df6c3db8 JO |
1270 | { |
1271 | struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); | |
1272 | ||
1273 | if (!sched_in && pebs_needs_sched_cb(cpuc)) | |
1274 | intel_pmu_drain_pebs_buffer(); | |
1275 | } | |
1276 | ||
09e61b4f PZ |
1277 | static inline void pebs_update_threshold(struct cpu_hw_events *cpuc) |
1278 | { | |
1279 | struct debug_store *ds = cpuc->ds; | |
a23eb2fc | 1280 | int max_pebs_events = intel_pmu_max_num_pebs(cpuc->pmu); |
09e61b4f | 1281 | u64 threshold; |
ec71a398 KL |
1282 | int reserved; |
1283 | ||
42880f72 AS |
1284 | if (cpuc->n_pebs_via_pt) |
1285 | return; | |
1286 | ||
ec71a398 | 1287 | if (x86_pmu.flags & PMU_FL_PEBS_ALL) |
722e42e4 | 1288 | reserved = max_pebs_events + x86_pmu_max_num_counters_fixed(cpuc->pmu); |
ec71a398 | 1289 | else |
d4b294bf | 1290 | reserved = max_pebs_events; |
09e61b4f PZ |
1291 | |
1292 | if (cpuc->n_pebs == cpuc->n_large_pebs) { | |
1293 | threshold = ds->pebs_absolute_maximum - | |
c22497f5 | 1294 | reserved * cpuc->pebs_record_size; |
09e61b4f | 1295 | } else { |
c22497f5 | 1296 | threshold = ds->pebs_buffer_base + cpuc->pebs_record_size; |
09e61b4f PZ |
1297 | } |
1298 | ||
1299 | ds->pebs_interrupt_threshold = threshold; | |
1300 | } | |
1301 | ||
e02e9b03 KL |
1302 | #define PEBS_DATACFG_CNTRS(x) \ |
1303 | ((x >> PEBS_DATACFG_CNTR_SHIFT) & PEBS_DATACFG_CNTR_MASK) | |
1304 | ||
1305 | #define PEBS_DATACFG_CNTR_BIT(x) \ | |
1306 | (((1ULL << x) & PEBS_DATACFG_CNTR_MASK) << PEBS_DATACFG_CNTR_SHIFT) | |
1307 | ||
1308 | #define PEBS_DATACFG_FIX(x) \ | |
1309 | ((x >> PEBS_DATACFG_FIX_SHIFT) & PEBS_DATACFG_FIX_MASK) | |
1310 | ||
1311 | #define PEBS_DATACFG_FIX_BIT(x) \ | |
1312 | (((1ULL << (x)) & PEBS_DATACFG_FIX_MASK) \ | |
1313 | << PEBS_DATACFG_FIX_SHIFT) | |
1314 | ||
c22497f5 KL |
1315 | static void adaptive_pebs_record_size_update(void) |
1316 | { | |
1317 | struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); | |
1318 | u64 pebs_data_cfg = cpuc->pebs_data_cfg; | |
1319 | int sz = sizeof(struct pebs_basic); | |
1320 | ||
1321 | if (pebs_data_cfg & PEBS_DATACFG_MEMINFO) | |
1322 | sz += sizeof(struct pebs_meminfo); | |
1323 | if (pebs_data_cfg & PEBS_DATACFG_GP) | |
1324 | sz += sizeof(struct pebs_gprs); | |
1325 | if (pebs_data_cfg & PEBS_DATACFG_XMMS) | |
1326 | sz += sizeof(struct pebs_xmm); | |
1327 | if (pebs_data_cfg & PEBS_DATACFG_LBRS) | |
5624986d | 1328 | sz += x86_pmu.lbr_nr * sizeof(struct lbr_entry); |
e02e9b03 KL |
1329 | if (pebs_data_cfg & (PEBS_DATACFG_METRICS | PEBS_DATACFG_CNTR)) { |
1330 | sz += sizeof(struct pebs_cntr_header); | |
1331 | ||
1332 | /* Metrics base and Metrics Data */ | |
1333 | if (pebs_data_cfg & PEBS_DATACFG_METRICS) | |
1334 | sz += 2 * sizeof(u64); | |
1335 | ||
1336 | if (pebs_data_cfg & PEBS_DATACFG_CNTR) { | |
1337 | sz += (hweight64(PEBS_DATACFG_CNTRS(pebs_data_cfg)) + | |
1338 | hweight64(PEBS_DATACFG_FIX(pebs_data_cfg))) * | |
1339 | sizeof(u64); | |
1340 | } | |
1341 | } | |
c22497f5 KL |
1342 | |
1343 | cpuc->pebs_record_size = sz; | |
1344 | } | |
1345 | ||
e02e9b03 KL |
1346 | static void __intel_pmu_pebs_update_cfg(struct perf_event *event, |
1347 | int idx, u64 *pebs_data_cfg) | |
1348 | { | |
1349 | if (is_metric_event(event)) { | |
1350 | *pebs_data_cfg |= PEBS_DATACFG_METRICS; | |
1351 | return; | |
1352 | } | |
1353 | ||
1354 | *pebs_data_cfg |= PEBS_DATACFG_CNTR; | |
1355 | ||
1356 | if (idx >= INTEL_PMC_IDX_FIXED) | |
1357 | *pebs_data_cfg |= PEBS_DATACFG_FIX_BIT(idx - INTEL_PMC_IDX_FIXED); | |
1358 | else | |
1359 | *pebs_data_cfg |= PEBS_DATACFG_CNTR_BIT(idx); | |
1360 | } | |
1361 | ||
1362 | ||
0a655793 | 1363 | void intel_pmu_pebs_late_setup(struct cpu_hw_events *cpuc) |
e02e9b03 | 1364 | { |
e02e9b03 KL |
1365 | struct perf_event *event; |
1366 | u64 pebs_data_cfg = 0; | |
1367 | int i; | |
1368 | ||
1369 | for (i = 0; i < cpuc->n_events; i++) { | |
1370 | event = cpuc->event_list[i]; | |
1371 | if (!is_pebs_counter_event_group(event)) | |
1372 | continue; | |
1373 | __intel_pmu_pebs_update_cfg(event, cpuc->assign[i], &pebs_data_cfg); | |
1374 | } | |
1375 | ||
1376 | if (pebs_data_cfg & ~cpuc->pebs_data_cfg) | |
1377 | cpuc->pebs_data_cfg |= pebs_data_cfg | PEBS_UPDATE_DS_SW; | |
1378 | } | |
1379 | ||
c22497f5 | 1380 | #define PERF_PEBS_MEMINFO_TYPE (PERF_SAMPLE_ADDR | PERF_SAMPLE_DATA_SRC | \ |
2a6c6b7d KL |
1381 | PERF_SAMPLE_PHYS_ADDR | \ |
1382 | PERF_SAMPLE_WEIGHT_TYPE | \ | |
76a5433f KL |
1383 | PERF_SAMPLE_TRANSACTION | \ |
1384 | PERF_SAMPLE_DATA_PAGE_SIZE) | |
c22497f5 KL |
1385 | |
1386 | static u64 pebs_update_adaptive_cfg(struct perf_event *event) | |
1387 | { | |
1388 | struct perf_event_attr *attr = &event->attr; | |
1389 | u64 sample_type = attr->sample_type; | |
1390 | u64 pebs_data_cfg = 0; | |
1391 | bool gprs, tsx_weight; | |
1392 | ||
1393 | if (!(sample_type & ~(PERF_SAMPLE_IP|PERF_SAMPLE_TIME)) && | |
1394 | attr->precise_ip > 1) | |
1395 | return pebs_data_cfg; | |
1396 | ||
1397 | if (sample_type & PERF_PEBS_MEMINFO_TYPE) | |
1398 | pebs_data_cfg |= PEBS_DATACFG_MEMINFO; | |
1399 | ||
1400 | /* | |
1401 | * We need GPRs when: | |
1402 | * + user requested them | |
1403 | * + precise_ip < 2 for the non event IP | |
1404 | * + For RTM TSX weight we need GPRs for the abort code. | |
1405 | */ | |
71dcc11c DM |
1406 | gprs = ((sample_type & PERF_SAMPLE_REGS_INTR) && |
1407 | (attr->sample_regs_intr & PEBS_GP_REGS)) || | |
1408 | ((sample_type & PERF_SAMPLE_REGS_USER) && | |
1409 | (attr->sample_regs_user & PEBS_GP_REGS)); | |
c22497f5 | 1410 | |
2a6c6b7d | 1411 | tsx_weight = (sample_type & PERF_SAMPLE_WEIGHT_TYPE) && |
c22497f5 KL |
1412 | ((attr->config & INTEL_ARCH_EVENT_MASK) == |
1413 | x86_pmu.rtm_abort_event); | |
1414 | ||
1415 | if (gprs || (attr->precise_ip < 2) || tsx_weight) | |
1416 | pebs_data_cfg |= PEBS_DATACFG_GP; | |
1417 | ||
1418 | if ((sample_type & PERF_SAMPLE_REGS_INTR) && | |
dce86ac7 | 1419 | (attr->sample_regs_intr & PERF_REG_EXTENDED_MASK)) |
c22497f5 KL |
1420 | pebs_data_cfg |= PEBS_DATACFG_XMMS; |
1421 | ||
1422 | if (sample_type & PERF_SAMPLE_BRANCH_STACK) { | |
1423 | /* | |
1424 | * For now always log all LBRs. Could configure this | |
1425 | * later. | |
1426 | */ | |
1427 | pebs_data_cfg |= PEBS_DATACFG_LBRS | | |
1428 | ((x86_pmu.lbr_nr-1) << PEBS_DATACFG_LBR_SHIFT); | |
1429 | } | |
1430 | ||
1431 | return pebs_data_cfg; | |
1432 | } | |
1433 | ||
09e61b4f | 1434 | static void |
c22497f5 KL |
1435 | pebs_update_state(bool needed_cb, struct cpu_hw_events *cpuc, |
1436 | struct perf_event *event, bool add) | |
09e61b4f | 1437 | { |
bd275681 | 1438 | struct pmu *pmu = event->pmu; |
b752ea0c | 1439 | |
b6a32f02 | 1440 | /* |
312be9fc KL |
1441 | * Make sure we get updated with the first PEBS event. |
1442 | * During removal, ->pebs_data_cfg is still valid for | |
1443 | * the last PEBS event. Don't clear it. | |
b6a32f02 | 1444 | */ |
312be9fc | 1445 | if ((cpuc->n_pebs == 1) && add) |
b752ea0c | 1446 | cpuc->pebs_data_cfg = PEBS_UPDATE_DS_SW; |
b6a32f02 | 1447 | |
09e61b4f PZ |
1448 | if (needed_cb != pebs_needs_sched_cb(cpuc)) { |
1449 | if (!needed_cb) | |
1450 | perf_sched_cb_inc(pmu); | |
1451 | else | |
1452 | perf_sched_cb_dec(pmu); | |
1453 | ||
b752ea0c | 1454 | cpuc->pebs_data_cfg |= PEBS_UPDATE_DS_SW; |
09e61b4f | 1455 | } |
b6a32f02 | 1456 | |
c22497f5 KL |
1457 | /* |
1458 | * The PEBS record doesn't shrink on pmu::del(). Doing so would require | |
1459 | * iterating all remaining PEBS events to reconstruct the config. | |
1460 | */ | |
1461 | if (x86_pmu.intel_cap.pebs_baseline && add) { | |
1462 | u64 pebs_data_cfg; | |
1463 | ||
c22497f5 | 1464 | pebs_data_cfg = pebs_update_adaptive_cfg(event); |
b752ea0c KL |
1465 | /* |
1466 | * Be sure to update the thresholds when we change the record. | |
1467 | */ | |
1468 | if (pebs_data_cfg & ~cpuc->pebs_data_cfg) | |
1469 | cpuc->pebs_data_cfg |= pebs_data_cfg | PEBS_UPDATE_DS_SW; | |
c22497f5 | 1470 | } |
09e61b4f PZ |
1471 | } |
1472 | ||
68f7082f | 1473 | void intel_pmu_pebs_add(struct perf_event *event) |
3569c0d7 | 1474 | { |
09e61b4f PZ |
1475 | struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); |
1476 | struct hw_perf_event *hwc = &event->hw; | |
1477 | bool needed_cb = pebs_needs_sched_cb(cpuc); | |
1478 | ||
1479 | cpuc->n_pebs++; | |
174afc3e | 1480 | if (hwc->flags & PERF_X86_EVENT_LARGE_PEBS) |
09e61b4f | 1481 | cpuc->n_large_pebs++; |
42880f72 AS |
1482 | if (hwc->flags & PERF_X86_EVENT_PEBS_VIA_PT) |
1483 | cpuc->n_pebs_via_pt++; | |
09e61b4f | 1484 | |
c22497f5 | 1485 | pebs_update_state(needed_cb, cpuc, event, true); |
3569c0d7 YZ |
1486 | } |
1487 | ||
42880f72 AS |
1488 | static void intel_pmu_pebs_via_pt_disable(struct perf_event *event) |
1489 | { | |
1490 | struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); | |
1491 | ||
1492 | if (!is_pebs_pt(event)) | |
1493 | return; | |
1494 | ||
1495 | if (!(cpuc->pebs_enabled & ~PEBS_VIA_PT_MASK)) | |
1496 | cpuc->pebs_enabled &= ~PEBS_VIA_PT_MASK; | |
1497 | } | |
1498 | ||
1499 | static void intel_pmu_pebs_via_pt_enable(struct perf_event *event) | |
1500 | { | |
1501 | struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); | |
1502 | struct hw_perf_event *hwc = &event->hw; | |
1503 | struct debug_store *ds = cpuc->ds; | |
4c58d922 LX |
1504 | u64 value = ds->pebs_event_reset[hwc->idx]; |
1505 | u32 base = MSR_RELOAD_PMC0; | |
1506 | unsigned int idx = hwc->idx; | |
42880f72 AS |
1507 | |
1508 | if (!is_pebs_pt(event)) | |
1509 | return; | |
1510 | ||
1511 | if (!(event->hw.flags & PERF_X86_EVENT_LARGE_PEBS)) | |
1512 | cpuc->pebs_enabled |= PEBS_PMI_AFTER_EACH_RECORD; | |
1513 | ||
1514 | cpuc->pebs_enabled |= PEBS_OUTPUT_PT; | |
1515 | ||
4c58d922 LX |
1516 | if (hwc->idx >= INTEL_PMC_IDX_FIXED) { |
1517 | base = MSR_RELOAD_FIXED_CTR0; | |
1518 | idx = hwc->idx - INTEL_PMC_IDX_FIXED; | |
2145e77f KL |
1519 | if (x86_pmu.intel_cap.pebs_format < 5) |
1520 | value = ds->pebs_event_reset[MAX_PEBS_EVENTS_FMT4 + idx]; | |
1521 | else | |
1522 | value = ds->pebs_event_reset[MAX_PEBS_EVENTS + idx]; | |
4c58d922 | 1523 | } |
78255eb2 | 1524 | wrmsrq(base + idx, value); |
42880f72 AS |
1525 | } |
1526 | ||
b752ea0c KL |
1527 | static inline void intel_pmu_drain_large_pebs(struct cpu_hw_events *cpuc) |
1528 | { | |
1529 | if (cpuc->n_pebs == cpuc->n_large_pebs && | |
1530 | cpuc->n_pebs != cpuc->n_pebs_via_pt) | |
1531 | intel_pmu_drain_pebs_buffer(); | |
1532 | } | |
1533 | ||
de0428a7 | 1534 | void intel_pmu_pebs_enable(struct perf_event *event) |
ca037701 | 1535 | { |
89cbc767 | 1536 | struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); |
b752ea0c | 1537 | u64 pebs_data_cfg = cpuc->pebs_data_cfg & ~PEBS_UPDATE_DS_SW; |
ef21f683 | 1538 | struct hw_perf_event *hwc = &event->hw; |
851559e3 | 1539 | struct debug_store *ds = cpuc->ds; |
4c58d922 | 1540 | unsigned int idx = hwc->idx; |
09e61b4f | 1541 | |
ca037701 PZ |
1542 | hwc->config &= ~ARCH_PERFMON_EVENTSEL_INT; |
1543 | ||
ad0e6cfe | 1544 | cpuc->pebs_enabled |= 1ULL << hwc->idx; |
f20093ee | 1545 | |
60176089 | 1546 | if ((event->hw.flags & PERF_X86_EVENT_PEBS_LDLAT) && (x86_pmu.version < 5)) |
f20093ee | 1547 | cpuc->pebs_enabled |= 1ULL << (hwc->idx + 32); |
9ad64c0f SE |
1548 | else if (event->hw.flags & PERF_X86_EVENT_PEBS_ST) |
1549 | cpuc->pebs_enabled |= 1ULL << 63; | |
851559e3 | 1550 | |
c22497f5 KL |
1551 | if (x86_pmu.intel_cap.pebs_baseline) { |
1552 | hwc->config |= ICL_EVENTSEL_ADAPTIVE; | |
b752ea0c KL |
1553 | if (pebs_data_cfg != cpuc->active_pebs_data_cfg) { |
1554 | /* | |
1555 | * drain_pebs() assumes uniform record size; | |
1556 | * hence we need to drain when changing said | |
1557 | * size. | |
1558 | */ | |
9f3de72a | 1559 | intel_pmu_drain_pebs_buffer(); |
b752ea0c | 1560 | adaptive_pebs_record_size_update(); |
78255eb2 | 1561 | wrmsrq(MSR_PEBS_DATA_CFG, pebs_data_cfg); |
b752ea0c | 1562 | cpuc->active_pebs_data_cfg = pebs_data_cfg; |
c22497f5 KL |
1563 | } |
1564 | } | |
b752ea0c KL |
1565 | if (cpuc->pebs_data_cfg & PEBS_UPDATE_DS_SW) { |
1566 | cpuc->pebs_data_cfg = pebs_data_cfg; | |
1567 | pebs_update_threshold(cpuc); | |
1568 | } | |
c22497f5 | 1569 | |
2145e77f KL |
1570 | if (idx >= INTEL_PMC_IDX_FIXED) { |
1571 | if (x86_pmu.intel_cap.pebs_format < 5) | |
1572 | idx = MAX_PEBS_EVENTS_FMT4 + (idx - INTEL_PMC_IDX_FIXED); | |
1573 | else | |
1574 | idx = MAX_PEBS_EVENTS + (idx - INTEL_PMC_IDX_FIXED); | |
1575 | } | |
4c58d922 | 1576 | |
3569c0d7 | 1577 | /* |
09e61b4f PZ |
1578 | * Use auto-reload if possible to save a MSR write in the PMI. |
1579 | * This must be done in pmu::start(), because PERF_EVENT_IOC_PERIOD. | |
3569c0d7 | 1580 | */ |
851559e3 | 1581 | if (hwc->flags & PERF_X86_EVENT_AUTO_RELOAD) { |
ec71a398 | 1582 | ds->pebs_event_reset[idx] = |
851559e3 | 1583 | (u64)(-hwc->sample_period) & x86_pmu.cntval_mask; |
dc853e26 | 1584 | } else { |
4c58d922 | 1585 | ds->pebs_event_reset[idx] = 0; |
851559e3 | 1586 | } |
42880f72 AS |
1587 | |
1588 | intel_pmu_pebs_via_pt_enable(event); | |
09e61b4f PZ |
1589 | } |
1590 | ||
68f7082f | 1591 | void intel_pmu_pebs_del(struct perf_event *event) |
09e61b4f PZ |
1592 | { |
1593 | struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); | |
1594 | struct hw_perf_event *hwc = &event->hw; | |
1595 | bool needed_cb = pebs_needs_sched_cb(cpuc); | |
1596 | ||
1597 | cpuc->n_pebs--; | |
174afc3e | 1598 | if (hwc->flags & PERF_X86_EVENT_LARGE_PEBS) |
09e61b4f | 1599 | cpuc->n_large_pebs--; |
42880f72 AS |
1600 | if (hwc->flags & PERF_X86_EVENT_PEBS_VIA_PT) |
1601 | cpuc->n_pebs_via_pt--; | |
3569c0d7 | 1602 | |
c22497f5 | 1603 | pebs_update_state(needed_cb, cpuc, event, false); |
ca037701 PZ |
1604 | } |
1605 | ||
de0428a7 | 1606 | void intel_pmu_pebs_disable(struct perf_event *event) |
ca037701 | 1607 | { |
89cbc767 | 1608 | struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); |
ef21f683 | 1609 | struct hw_perf_event *hwc = &event->hw; |
2a853e11 | 1610 | |
b752ea0c | 1611 | intel_pmu_drain_large_pebs(cpuc); |
ca037701 | 1612 | |
ad0e6cfe | 1613 | cpuc->pebs_enabled &= ~(1ULL << hwc->idx); |
983433b5 | 1614 | |
60176089 KL |
1615 | if ((event->hw.flags & PERF_X86_EVENT_PEBS_LDLAT) && |
1616 | (x86_pmu.version < 5)) | |
983433b5 | 1617 | cpuc->pebs_enabled &= ~(1ULL << (hwc->idx + 32)); |
b371b594 | 1618 | else if (event->hw.flags & PERF_X86_EVENT_PEBS_ST) |
983433b5 SE |
1619 | cpuc->pebs_enabled &= ~(1ULL << 63); |
1620 | ||
42880f72 AS |
1621 | intel_pmu_pebs_via_pt_disable(event); |
1622 | ||
4807e3d5 | 1623 | if (cpuc->enabled) |
78255eb2 | 1624 | wrmsrq(MSR_IA32_PEBS_ENABLE, cpuc->pebs_enabled); |
ca037701 PZ |
1625 | |
1626 | hwc->config |= ARCH_PERFMON_EVENTSEL_INT; | |
1627 | } | |
1628 | ||
de0428a7 | 1629 | void intel_pmu_pebs_enable_all(void) |
ca037701 | 1630 | { |
89cbc767 | 1631 | struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); |
ca037701 PZ |
1632 | |
1633 | if (cpuc->pebs_enabled) | |
78255eb2 | 1634 | wrmsrq(MSR_IA32_PEBS_ENABLE, cpuc->pebs_enabled); |
ca037701 PZ |
1635 | } |
1636 | ||
de0428a7 | 1637 | void intel_pmu_pebs_disable_all(void) |
ca037701 | 1638 | { |
89cbc767 | 1639 | struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); |
ca037701 PZ |
1640 | |
1641 | if (cpuc->pebs_enabled) | |
c22ac2a3 | 1642 | __intel_pmu_pebs_disable_all(); |
ca037701 PZ |
1643 | } |
1644 | ||
ef21f683 PZ |
1645 | static int intel_pmu_pebs_fixup_ip(struct pt_regs *regs) |
1646 | { | |
89cbc767 | 1647 | struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); |
ef21f683 PZ |
1648 | unsigned long from = cpuc->lbr_entries[0].from; |
1649 | unsigned long old_to, to = cpuc->lbr_entries[0].to; | |
1650 | unsigned long ip = regs->ip; | |
57d1c0c0 | 1651 | int is_64bit = 0; |
9536c8d2 | 1652 | void *kaddr; |
6ba48ff4 | 1653 | int size; |
ef21f683 | 1654 | |
8db909a7 PZ |
1655 | /* |
1656 | * We don't need to fixup if the PEBS assist is fault like | |
1657 | */ | |
1658 | if (!x86_pmu.intel_cap.pebs_trap) | |
1659 | return 1; | |
1660 | ||
a562b187 PZ |
1661 | /* |
1662 | * No LBR entry, no basic block, no rewinding | |
1663 | */ | |
ef21f683 PZ |
1664 | if (!cpuc->lbr_stack.nr || !from || !to) |
1665 | return 0; | |
1666 | ||
a562b187 PZ |
1667 | /* |
1668 | * Basic blocks should never cross user/kernel boundaries | |
1669 | */ | |
1670 | if (kernel_ip(ip) != kernel_ip(to)) | |
1671 | return 0; | |
1672 | ||
1673 | /* | |
1674 | * unsigned math, either ip is before the start (impossible) or | |
1675 | * the basic block is larger than 1 page (sanity) | |
1676 | */ | |
9536c8d2 | 1677 | if ((ip - to) > PEBS_FIXUP_SIZE) |
ef21f683 PZ |
1678 | return 0; |
1679 | ||
1680 | /* | |
1681 | * We sampled a branch insn, rewind using the LBR stack | |
1682 | */ | |
1683 | if (ip == to) { | |
d07bdfd3 | 1684 | set_linear_ip(regs, from); |
ef21f683 PZ |
1685 | return 1; |
1686 | } | |
1687 | ||
6ba48ff4 | 1688 | size = ip - to; |
9536c8d2 | 1689 | if (!kernel_ip(ip)) { |
6ba48ff4 | 1690 | int bytes; |
9536c8d2 PZ |
1691 | u8 *buf = this_cpu_read(insn_buffer); |
1692 | ||
6ba48ff4 | 1693 | /* 'size' must fit our buffer, see above */ |
9536c8d2 | 1694 | bytes = copy_from_user_nmi(buf, (void __user *)to, size); |
0a196848 | 1695 | if (bytes != 0) |
9536c8d2 PZ |
1696 | return 0; |
1697 | ||
1698 | kaddr = buf; | |
1699 | } else { | |
1700 | kaddr = (void *)to; | |
1701 | } | |
1702 | ||
ef21f683 PZ |
1703 | do { |
1704 | struct insn insn; | |
ef21f683 PZ |
1705 | |
1706 | old_to = to; | |
ef21f683 | 1707 | |
57d1c0c0 | 1708 | #ifdef CONFIG_X86_64 |
375d4bfd | 1709 | is_64bit = kernel_ip(to) || any_64bit_mode(regs); |
57d1c0c0 | 1710 | #endif |
6ba48ff4 | 1711 | insn_init(&insn, kaddr, size, is_64bit); |
2ff49881 | 1712 | |
6ba48ff4 | 1713 | /* |
2ff49881 BP |
1714 | * Make sure there was not a problem decoding the instruction. |
1715 | * This is doubly important because we have an infinite loop if | |
1716 | * insn.length=0. | |
6ba48ff4 | 1717 | */ |
2ff49881 | 1718 | if (insn_get_length(&insn)) |
6ba48ff4 | 1719 | break; |
9536c8d2 | 1720 | |
ef21f683 | 1721 | to += insn.length; |
9536c8d2 | 1722 | kaddr += insn.length; |
6ba48ff4 | 1723 | size -= insn.length; |
ef21f683 PZ |
1724 | } while (to < ip); |
1725 | ||
1726 | if (to == ip) { | |
d07bdfd3 | 1727 | set_linear_ip(regs, old_to); |
ef21f683 PZ |
1728 | return 1; |
1729 | } | |
1730 | ||
a562b187 PZ |
1731 | /* |
1732 | * Even though we decoded the basic block, the instruction stream | |
1733 | * never matched the given IP, either the TO or the IP got corrupted. | |
1734 | */ | |
ef21f683 PZ |
1735 | return 0; |
1736 | } | |
1737 | ||
48f38aa4 | 1738 | static inline u64 intel_get_tsx_weight(u64 tsx_tuning) |
748e86aa | 1739 | { |
48f38aa4 AK |
1740 | if (tsx_tuning) { |
1741 | union hsw_tsx_tuning tsx = { .value = tsx_tuning }; | |
748e86aa AK |
1742 | return tsx.cycles_last_block; |
1743 | } | |
1744 | return 0; | |
1745 | } | |
1746 | ||
48f38aa4 | 1747 | static inline u64 intel_get_tsx_transaction(u64 tsx_tuning, u64 ax) |
a405bad5 | 1748 | { |
48f38aa4 | 1749 | u64 txn = (tsx_tuning & PEBS_HSW_TSX_FLAGS) >> 32; |
a405bad5 AK |
1750 | |
1751 | /* For RTM XABORTs also log the abort code from AX */ | |
48f38aa4 AK |
1752 | if ((txn & PERF_TXN_TRANSACTION) && (ax & 1)) |
1753 | txn |= ((ax >> 24) & 0xff) << PERF_TXN_ABORT_SHIFT; | |
a405bad5 AK |
1754 | return txn; |
1755 | } | |
1756 | ||
c22497f5 KL |
1757 | static inline u64 get_pebs_status(void *n) |
1758 | { | |
1759 | if (x86_pmu.intel_cap.pebs_format < 4) | |
1760 | return ((struct pebs_record_nhm *)n)->status; | |
1761 | return ((struct pebs_basic *)n)->applicable_counters; | |
1762 | } | |
1763 | ||
48f38aa4 AK |
1764 | #define PERF_X86_EVENT_PEBS_HSW_PREC \ |
1765 | (PERF_X86_EVENT_PEBS_ST_HSW | \ | |
1766 | PERF_X86_EVENT_PEBS_LD_HSW | \ | |
1767 | PERF_X86_EVENT_PEBS_NA_HSW) | |
1768 | ||
1769 | static u64 get_data_src(struct perf_event *event, u64 aux) | |
1770 | { | |
1771 | u64 val = PERF_MEM_NA; | |
1772 | int fl = event->hw.flags; | |
1773 | bool fst = fl & (PERF_X86_EVENT_PEBS_ST | PERF_X86_EVENT_PEBS_HSW_PREC); | |
1774 | ||
1775 | if (fl & PERF_X86_EVENT_PEBS_LDLAT) | |
ccf170e9 | 1776 | val = load_latency_data(event, aux); |
61b985e3 | 1777 | else if (fl & PERF_X86_EVENT_PEBS_STLAT) |
ccf170e9 | 1778 | val = store_latency_data(event, aux); |
39a41278 KL |
1779 | else if (fl & PERF_X86_EVENT_PEBS_LAT_HYBRID) |
1780 | val = x86_pmu.pebs_latency_data(event, aux); | |
48f38aa4 AK |
1781 | else if (fst && (fl & PERF_X86_EVENT_PEBS_HSW_PREC)) |
1782 | val = precise_datala_hsw(event, aux); | |
1783 | else if (fst) | |
1784 | val = precise_store_data(aux); | |
1785 | return val; | |
1786 | } | |
1787 | ||
89e97eb8 KL |
1788 | static void setup_pebs_time(struct perf_event *event, |
1789 | struct perf_sample_data *data, | |
1790 | u64 tsc) | |
1791 | { | |
1792 | /* Converting to a user-defined clock is not supported yet. */ | |
1793 | if (event->attr.use_clockid != 0) | |
1794 | return; | |
1795 | ||
1796 | /* | |
1797 | * Doesn't support the conversion when the TSC is unstable. | |
1798 | * The TSC unstable case is a corner case and very unlikely to | |
1799 | * happen. If it happens, the TSC in a PEBS record will be | |
1800 | * dropped and fall back to perf_event_clock(). | |
1801 | */ | |
1802 | if (!using_native_sched_clock() || !sched_clock_stable()) | |
1803 | return; | |
1804 | ||
1805 | data->time = native_sched_clock_from_tsc(tsc) + __sched_clock_offset; | |
1806 | data->sample_flags |= PERF_SAMPLE_TIME; | |
1807 | } | |
1808 | ||
76a5433f KL |
1809 | #define PERF_SAMPLE_ADDR_TYPE (PERF_SAMPLE_ADDR | \ |
1810 | PERF_SAMPLE_PHYS_ADDR | \ | |
1811 | PERF_SAMPLE_DATA_PAGE_SIZE) | |
1812 | ||
c22497f5 | 1813 | static void setup_pebs_fixed_sample_data(struct perf_event *event, |
43cf7631 YZ |
1814 | struct pt_regs *iregs, void *__pebs, |
1815 | struct perf_sample_data *data, | |
1816 | struct pt_regs *regs) | |
2b0b5c6f PZ |
1817 | { |
1818 | /* | |
d2beea4a PZ |
1819 | * We cast to the biggest pebs_record but are careful not to |
1820 | * unconditionally access the 'extra' entries. | |
2b0b5c6f | 1821 | */ |
89cbc767 | 1822 | struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); |
2f7ebf2e | 1823 | struct pebs_record_skl *pebs = __pebs; |
f20093ee | 1824 | u64 sample_type; |
48f38aa4 | 1825 | int fll; |
2b0b5c6f | 1826 | |
21509084 YZ |
1827 | if (pebs == NULL) |
1828 | return; | |
1829 | ||
c8aab2e0 | 1830 | sample_type = event->attr.sample_type; |
48f38aa4 | 1831 | fll = event->hw.flags & PERF_X86_EVENT_PEBS_LDLAT; |
f20093ee | 1832 | |
43cf7631 | 1833 | perf_sample_data_init(data, 0, event->hw.last_period); |
2b0b5c6f | 1834 | |
f20093ee | 1835 | /* |
c8aab2e0 | 1836 | * Use latency for weight (only avail with PEBS-LL) |
f20093ee | 1837 | */ |
2abe681d | 1838 | if (fll && (sample_type & PERF_SAMPLE_WEIGHT_TYPE)) { |
2a6c6b7d | 1839 | data->weight.full = pebs->lat; |
2abe681d KL |
1840 | data->sample_flags |= PERF_SAMPLE_WEIGHT_TYPE; |
1841 | } | |
c8aab2e0 SE |
1842 | |
1843 | /* | |
1844 | * data.data_src encodes the data source | |
1845 | */ | |
e16fd7f2 | 1846 | if (sample_type & PERF_SAMPLE_DATA_SRC) { |
48f38aa4 | 1847 | data->data_src.val = get_data_src(event, pebs->dse); |
e16fd7f2 KL |
1848 | data->sample_flags |= PERF_SAMPLE_DATA_SRC; |
1849 | } | |
f20093ee | 1850 | |
6cbc304f PZ |
1851 | /* |
1852 | * We must however always use iregs for the unwinder to stay sane; the | |
1853 | * record BP,SP,IP can point into thin air when the record is from a | |
a97673a1 | 1854 | * previous PMI context or an (I)RET happened between the record and |
6cbc304f PZ |
1855 | * PMI. |
1856 | */ | |
f226805b | 1857 | perf_sample_save_callchain(data, event, iregs); |
6cbc304f | 1858 | |
2b0b5c6f | 1859 | /* |
b8000586 PZ |
1860 | * We use the interrupt regs as a base because the PEBS record does not |
1861 | * contain a full regs set, specifically it seems to lack segment | |
1862 | * descriptors, which get used by things like user_mode(). | |
2b0b5c6f | 1863 | * |
b8000586 | 1864 | * In the simple case fix up only the IP for PERF_SAMPLE_IP. |
2b0b5c6f | 1865 | */ |
43cf7631 | 1866 | *regs = *iregs; |
d1e7e602 SE |
1867 | |
1868 | /* | |
1869 | * Initialize regs_>flags from PEBS, | |
1870 | * Clear exact bit (which uses x86 EFLAGS Reserved bit 3), | |
1871 | * i.e., do not rely on it being zero: | |
1872 | */ | |
1873 | regs->flags = pebs->flags & ~PERF_EFLAGS_EXACT; | |
2b0b5c6f | 1874 | |
aea48559 | 1875 | if (sample_type & PERF_SAMPLE_REGS_INTR) { |
43cf7631 YZ |
1876 | regs->ax = pebs->ax; |
1877 | regs->bx = pebs->bx; | |
1878 | regs->cx = pebs->cx; | |
1879 | regs->dx = pebs->dx; | |
1880 | regs->si = pebs->si; | |
1881 | regs->di = pebs->di; | |
43cf7631 | 1882 | |
6cbc304f PZ |
1883 | regs->bp = pebs->bp; |
1884 | regs->sp = pebs->sp; | |
b8000586 | 1885 | |
aea48559 | 1886 | #ifndef CONFIG_X86_32 |
43cf7631 YZ |
1887 | regs->r8 = pebs->r8; |
1888 | regs->r9 = pebs->r9; | |
1889 | regs->r10 = pebs->r10; | |
1890 | regs->r11 = pebs->r11; | |
1891 | regs->r12 = pebs->r12; | |
1892 | regs->r13 = pebs->r13; | |
1893 | regs->r14 = pebs->r14; | |
1894 | regs->r15 = pebs->r15; | |
aea48559 SE |
1895 | #endif |
1896 | } | |
1897 | ||
71eb9ee9 | 1898 | if (event->attr.precise_ip > 1) { |
d1e7e602 SE |
1899 | /* |
1900 | * Haswell and later processors have an 'eventing IP' | |
1901 | * (real IP) which fixes the off-by-1 skid in hardware. | |
1902 | * Use it when precise_ip >= 2 : | |
1903 | */ | |
71eb9ee9 SE |
1904 | if (x86_pmu.intel_cap.pebs_format >= 2) { |
1905 | set_linear_ip(regs, pebs->real_ip); | |
1906 | regs->flags |= PERF_EFLAGS_EXACT; | |
1907 | } else { | |
d1e7e602 | 1908 | /* Otherwise, use PEBS off-by-1 IP: */ |
71eb9ee9 SE |
1909 | set_linear_ip(regs, pebs->ip); |
1910 | ||
d1e7e602 SE |
1911 | /* |
1912 | * With precise_ip >= 2, try to fix up the off-by-1 IP | |
1913 | * using the LBR. If successful, the fixup function | |
1914 | * corrects regs->ip and calls set_linear_ip() on regs: | |
1915 | */ | |
71eb9ee9 SE |
1916 | if (intel_pmu_pebs_fixup_ip(regs)) |
1917 | regs->flags |= PERF_EFLAGS_EXACT; | |
1918 | } | |
d1e7e602 SE |
1919 | } else { |
1920 | /* | |
1921 | * When precise_ip == 1, return the PEBS off-by-1 IP, | |
1922 | * no fixup attempted: | |
1923 | */ | |
71eb9ee9 | 1924 | set_linear_ip(regs, pebs->ip); |
d1e7e602 | 1925 | } |
71eb9ee9 | 1926 | |
2b0b5c6f | 1927 | |
76a5433f | 1928 | if ((sample_type & PERF_SAMPLE_ADDR_TYPE) && |
7b084630 | 1929 | x86_pmu.intel_cap.pebs_format >= 1) { |
43cf7631 | 1930 | data->addr = pebs->dla; |
7b084630 NK |
1931 | data->sample_flags |= PERF_SAMPLE_ADDR; |
1932 | } | |
f9134f36 | 1933 | |
a405bad5 AK |
1934 | if (x86_pmu.intel_cap.pebs_format >= 2) { |
1935 | /* Only set the TSX weight when no memory weight. */ | |
2abe681d | 1936 | if ((sample_type & PERF_SAMPLE_WEIGHT_TYPE) && !fll) { |
2a6c6b7d | 1937 | data->weight.full = intel_get_tsx_weight(pebs->tsx_tuning); |
2abe681d KL |
1938 | data->sample_flags |= PERF_SAMPLE_WEIGHT_TYPE; |
1939 | } | |
ee9db0e1 | 1940 | if (sample_type & PERF_SAMPLE_TRANSACTION) { |
48f38aa4 AK |
1941 | data->txn = intel_get_tsx_transaction(pebs->tsx_tuning, |
1942 | pebs->ax); | |
ee9db0e1 KL |
1943 | data->sample_flags |= PERF_SAMPLE_TRANSACTION; |
1944 | } | |
a405bad5 | 1945 | } |
748e86aa | 1946 | |
2f7ebf2e AK |
1947 | /* |
1948 | * v3 supplies an accurate time stamp, so we use that | |
1949 | * for the time stamp. | |
1950 | * | |
1951 | * We can only do this for the default trace clock. | |
1952 | */ | |
89e97eb8 KL |
1953 | if (x86_pmu.intel_cap.pebs_format >= 3) |
1954 | setup_pebs_time(event, data, pebs->tsc); | |
2f7ebf2e | 1955 | |
faac6f10 | 1956 | perf_sample_save_brstack(data, event, &cpuc->lbr_stack, NULL); |
43cf7631 YZ |
1957 | } |
1958 | ||
c22497f5 KL |
1959 | static void adaptive_pebs_save_regs(struct pt_regs *regs, |
1960 | struct pebs_gprs *gprs) | |
1961 | { | |
1962 | regs->ax = gprs->ax; | |
1963 | regs->bx = gprs->bx; | |
1964 | regs->cx = gprs->cx; | |
1965 | regs->dx = gprs->dx; | |
1966 | regs->si = gprs->si; | |
1967 | regs->di = gprs->di; | |
1968 | regs->bp = gprs->bp; | |
1969 | regs->sp = gprs->sp; | |
1970 | #ifndef CONFIG_X86_32 | |
1971 | regs->r8 = gprs->r8; | |
1972 | regs->r9 = gprs->r9; | |
1973 | regs->r10 = gprs->r10; | |
1974 | regs->r11 = gprs->r11; | |
1975 | regs->r12 = gprs->r12; | |
1976 | regs->r13 = gprs->r13; | |
1977 | regs->r14 = gprs->r14; | |
1978 | regs->r15 = gprs->r15; | |
1979 | #endif | |
1980 | } | |
1981 | ||
e02e9b03 KL |
1982 | static void intel_perf_event_update_pmc(struct perf_event *event, u64 pmc) |
1983 | { | |
1984 | int shift = 64 - x86_pmu.cntval_bits; | |
1985 | struct hw_perf_event *hwc; | |
1986 | u64 delta, prev_pmc; | |
1987 | ||
1988 | /* | |
1989 | * A recorded counter may not have an assigned event in the | |
1990 | * following cases. The value should be dropped. | |
1991 | * - An event is deleted. There is still an active PEBS event. | |
1992 | * The PEBS record doesn't shrink on pmu::del(). | |
1993 | * If the counter of the deleted event once occurred in a PEBS | |
1994 | * record, PEBS still records the counter until the counter is | |
1995 | * reassigned. | |
1996 | * - An event is stopped for some reason, e.g., throttled. | |
1997 | * During this period, another event is added and takes the | |
1998 | * counter of the stopped event. The stopped event is assigned | |
1999 | * to another new and uninitialized counter, since the | |
2000 | * x86_pmu_start(RELOAD) is not invoked for a stopped event. | |
2001 | * The PEBS__DATA_CFG is updated regardless of the event state. | |
2002 | * The uninitialized counter can be recorded in a PEBS record. | |
2003 | * But the cpuc->events[uninitialized_counter] is always NULL, | |
2004 | * because the event is stopped. The uninitialized value is | |
2005 | * safely dropped. | |
2006 | */ | |
2007 | if (!event) | |
2008 | return; | |
2009 | ||
2010 | hwc = &event->hw; | |
2011 | prev_pmc = local64_read(&hwc->prev_count); | |
2012 | ||
2013 | /* Only update the count when the PMU is disabled */ | |
2014 | WARN_ON(this_cpu_read(cpu_hw_events.enabled)); | |
2015 | local64_set(&hwc->prev_count, pmc); | |
2016 | ||
2017 | delta = (pmc << shift) - (prev_pmc << shift); | |
2018 | delta >>= shift; | |
2019 | ||
2020 | local64_add(delta, &event->count); | |
2021 | local64_sub(delta, &hwc->period_left); | |
2022 | } | |
2023 | ||
2024 | static inline void __setup_pebs_counter_group(struct cpu_hw_events *cpuc, | |
2025 | struct perf_event *event, | |
2026 | struct pebs_cntr_header *cntr, | |
2027 | void *next_record) | |
2028 | { | |
2029 | int bit; | |
2030 | ||
2031 | for_each_set_bit(bit, (unsigned long *)&cntr->cntr, INTEL_PMC_MAX_GENERIC) { | |
2032 | intel_perf_event_update_pmc(cpuc->events[bit], *(u64 *)next_record); | |
2033 | next_record += sizeof(u64); | |
2034 | } | |
2035 | ||
2036 | for_each_set_bit(bit, (unsigned long *)&cntr->fixed, INTEL_PMC_MAX_FIXED) { | |
2037 | /* The slots event will be handled with perf_metric later */ | |
2038 | if ((cntr->metrics == INTEL_CNTR_METRICS) && | |
2039 | (bit + INTEL_PMC_IDX_FIXED == INTEL_PMC_IDX_FIXED_SLOTS)) { | |
2040 | next_record += sizeof(u64); | |
2041 | continue; | |
2042 | } | |
2043 | intel_perf_event_update_pmc(cpuc->events[bit + INTEL_PMC_IDX_FIXED], | |
2044 | *(u64 *)next_record); | |
2045 | next_record += sizeof(u64); | |
2046 | } | |
2047 | ||
2048 | /* HW will reload the value right after the overflow. */ | |
2049 | if (event->hw.flags & PERF_X86_EVENT_AUTO_RELOAD) | |
2050 | local64_set(&event->hw.prev_count, (u64)-event->hw.sample_period); | |
2051 | ||
2052 | if (cntr->metrics == INTEL_CNTR_METRICS) { | |
2053 | static_call(intel_pmu_update_topdown_event) | |
2054 | (cpuc->events[INTEL_PMC_IDX_FIXED_SLOTS], | |
2055 | (u64 *)next_record); | |
2056 | next_record += 2 * sizeof(u64); | |
2057 | } | |
2058 | } | |
2059 | ||
61b985e3 | 2060 | #define PEBS_LATENCY_MASK 0xffff |
61b985e3 | 2061 | |
c22497f5 KL |
2062 | /* |
2063 | * With adaptive PEBS the layout depends on what fields are configured. | |
2064 | */ | |
c22497f5 KL |
2065 | static void setup_pebs_adaptive_sample_data(struct perf_event *event, |
2066 | struct pt_regs *iregs, void *__pebs, | |
2067 | struct perf_sample_data *data, | |
2068 | struct pt_regs *regs) | |
2069 | { | |
2070 | struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); | |
2071 | struct pebs_basic *basic = __pebs; | |
2072 | void *next_record = basic + 1; | |
7087bfb0 | 2073 | u64 sample_type, format_group; |
c22497f5 KL |
2074 | struct pebs_meminfo *meminfo = NULL; |
2075 | struct pebs_gprs *gprs = NULL; | |
2076 | struct x86_perf_regs *perf_regs; | |
2077 | ||
2078 | if (basic == NULL) | |
2079 | return; | |
2080 | ||
2081 | perf_regs = container_of(regs, struct x86_perf_regs, regs); | |
2082 | perf_regs->xmm_regs = NULL; | |
2083 | ||
2084 | sample_type = event->attr.sample_type; | |
7087bfb0 | 2085 | format_group = basic->format_group; |
c22497f5 | 2086 | perf_sample_data_init(data, 0, event->hw.last_period); |
c22497f5 | 2087 | |
89e97eb8 | 2088 | setup_pebs_time(event, data, basic->tsc); |
c22497f5 KL |
2089 | |
2090 | /* | |
2091 | * We must however always use iregs for the unwinder to stay sane; the | |
2092 | * record BP,SP,IP can point into thin air when the record is from a | |
2093 | * previous PMI context or an (I)RET happened between the record and | |
2094 | * PMI. | |
2095 | */ | |
f226805b | 2096 | perf_sample_save_callchain(data, event, iregs); |
c22497f5 KL |
2097 | |
2098 | *regs = *iregs; | |
2099 | /* The ip in basic is EventingIP */ | |
2100 | set_linear_ip(regs, basic->ip); | |
2101 | regs->flags = PERF_EFLAGS_EXACT; | |
2102 | ||
e5f32ad5 KL |
2103 | if (sample_type & PERF_SAMPLE_WEIGHT_STRUCT) { |
2104 | if (x86_pmu.flags & PMU_FL_RETIRE_LATENCY) | |
7087bfb0 | 2105 | data->weight.var3_w = basic->retire_latency; |
e5f32ad5 KL |
2106 | else |
2107 | data->weight.var3_w = 0; | |
2108 | } | |
c87a3109 | 2109 | |
c22497f5 KL |
2110 | /* |
2111 | * The record for MEMINFO is in front of GP | |
2112 | * But PERF_SAMPLE_TRANSACTION needs gprs->ax. | |
2113 | * Save the pointer here but process later. | |
2114 | */ | |
7087bfb0 | 2115 | if (format_group & PEBS_DATACFG_MEMINFO) { |
c22497f5 KL |
2116 | meminfo = next_record; |
2117 | next_record = meminfo + 1; | |
2118 | } | |
2119 | ||
7087bfb0 | 2120 | if (format_group & PEBS_DATACFG_GP) { |
c22497f5 KL |
2121 | gprs = next_record; |
2122 | next_record = gprs + 1; | |
2123 | ||
2124 | if (event->attr.precise_ip < 2) { | |
2125 | set_linear_ip(regs, gprs->ip); | |
2126 | regs->flags &= ~PERF_EFLAGS_EXACT; | |
2127 | } | |
2128 | ||
71dcc11c | 2129 | if (sample_type & (PERF_SAMPLE_REGS_INTR | PERF_SAMPLE_REGS_USER)) |
c22497f5 KL |
2130 | adaptive_pebs_save_regs(regs, gprs); |
2131 | } | |
2132 | ||
7087bfb0 | 2133 | if (format_group & PEBS_DATACFG_MEMINFO) { |
61b985e3 | 2134 | if (sample_type & PERF_SAMPLE_WEIGHT_TYPE) { |
7087bfb0 KL |
2135 | u64 latency = x86_pmu.flags & PMU_FL_INSTR_LATENCY ? |
2136 | meminfo->cache_latency : meminfo->mem_latency; | |
61b985e3 | 2137 | |
7087bfb0 KL |
2138 | if (x86_pmu.flags & PMU_FL_INSTR_LATENCY) |
2139 | data->weight.var2_w = meminfo->instr_latency; | |
61b985e3 KL |
2140 | |
2141 | /* | |
2142 | * Although meminfo::latency is defined as a u64, | |
2143 | * only the lower 32 bits include the valid data | |
2144 | * in practice on Ice Lake and earlier platforms. | |
2145 | */ | |
2146 | if (sample_type & PERF_SAMPLE_WEIGHT) { | |
7087bfb0 | 2147 | data->weight.full = latency ?: |
61b985e3 KL |
2148 | intel_get_tsx_weight(meminfo->tsx_tuning); |
2149 | } else { | |
7087bfb0 | 2150 | data->weight.var1_dw = (u32)latency ?: |
61b985e3 KL |
2151 | intel_get_tsx_weight(meminfo->tsx_tuning); |
2152 | } | |
7087bfb0 | 2153 | |
2abe681d | 2154 | data->sample_flags |= PERF_SAMPLE_WEIGHT_TYPE; |
61b985e3 | 2155 | } |
c22497f5 | 2156 | |
e16fd7f2 | 2157 | if (sample_type & PERF_SAMPLE_DATA_SRC) { |
c22497f5 | 2158 | data->data_src.val = get_data_src(event, meminfo->aux); |
e16fd7f2 KL |
2159 | data->sample_flags |= PERF_SAMPLE_DATA_SRC; |
2160 | } | |
c22497f5 | 2161 | |
7b084630 | 2162 | if (sample_type & PERF_SAMPLE_ADDR_TYPE) { |
c22497f5 | 2163 | data->addr = meminfo->address; |
7b084630 NK |
2164 | data->sample_flags |= PERF_SAMPLE_ADDR; |
2165 | } | |
c22497f5 | 2166 | |
ee9db0e1 | 2167 | if (sample_type & PERF_SAMPLE_TRANSACTION) { |
c22497f5 KL |
2168 | data->txn = intel_get_tsx_transaction(meminfo->tsx_tuning, |
2169 | gprs ? gprs->ax : 0); | |
ee9db0e1 KL |
2170 | data->sample_flags |= PERF_SAMPLE_TRANSACTION; |
2171 | } | |
c22497f5 KL |
2172 | } |
2173 | ||
7087bfb0 | 2174 | if (format_group & PEBS_DATACFG_XMMS) { |
c22497f5 KL |
2175 | struct pebs_xmm *xmm = next_record; |
2176 | ||
2177 | next_record = xmm + 1; | |
2178 | perf_regs->xmm_regs = xmm->xmm; | |
2179 | } | |
2180 | ||
7087bfb0 | 2181 | if (format_group & PEBS_DATACFG_LBRS) { |
5624986d | 2182 | struct lbr_entry *lbr = next_record; |
7087bfb0 | 2183 | int num_lbr = ((format_group >> PEBS_DATACFG_LBR_SHIFT) |
c22497f5 | 2184 | & 0xff) + 1; |
5624986d | 2185 | next_record = next_record + num_lbr * sizeof(struct lbr_entry); |
c22497f5 KL |
2186 | |
2187 | if (has_branch_stack(event)) { | |
2188 | intel_pmu_store_pebs_lbrs(lbr); | |
33744916 | 2189 | intel_pmu_lbr_save_brstack(data, cpuc, event); |
c22497f5 KL |
2190 | } |
2191 | } | |
2192 | ||
e02e9b03 KL |
2193 | if (format_group & (PEBS_DATACFG_CNTR | PEBS_DATACFG_METRICS)) { |
2194 | struct pebs_cntr_header *cntr = next_record; | |
2195 | unsigned int nr; | |
2196 | ||
2197 | next_record += sizeof(struct pebs_cntr_header); | |
2198 | /* | |
2199 | * The PEBS_DATA_CFG is a global register, which is the | |
2200 | * superset configuration for all PEBS events. | |
2201 | * For the PEBS record of non-sample-read group, ignore | |
2202 | * the counter snapshot fields. | |
2203 | */ | |
2204 | if (is_pebs_counter_event_group(event)) { | |
2205 | __setup_pebs_counter_group(cpuc, event, cntr, next_record); | |
2206 | data->sample_flags |= PERF_SAMPLE_READ; | |
2207 | } | |
2208 | ||
2209 | nr = hweight32(cntr->cntr) + hweight32(cntr->fixed); | |
2210 | if (cntr->metrics == INTEL_CNTR_METRICS) | |
2211 | nr += 2; | |
2212 | next_record += nr * sizeof(u64); | |
2213 | } | |
2214 | ||
7087bfb0 KL |
2215 | WARN_ONCE(next_record != __pebs + basic->format_size, |
2216 | "PEBS record size %u, expected %llu, config %llx\n", | |
2217 | basic->format_size, | |
c22497f5 | 2218 | (u64)(next_record - __pebs), |
7087bfb0 | 2219 | format_group); |
c22497f5 KL |
2220 | } |
2221 | ||
21509084 YZ |
2222 | static inline void * |
2223 | get_next_pebs_record_by_bit(void *base, void *top, int bit) | |
2224 | { | |
2225 | struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); | |
2226 | void *at; | |
2227 | u64 pebs_status; | |
2228 | ||
1424a09a SE |
2229 | /* |
2230 | * fmt0 does not have a status bitfield (does not use | |
2231 | * perf_record_nhm format) | |
2232 | */ | |
2233 | if (x86_pmu.intel_cap.pebs_format < 1) | |
2234 | return base; | |
2235 | ||
21509084 YZ |
2236 | if (base == NULL) |
2237 | return NULL; | |
2238 | ||
c22497f5 KL |
2239 | for (at = base; at < top; at += cpuc->pebs_record_size) { |
2240 | unsigned long status = get_pebs_status(at); | |
21509084 | 2241 | |
c22497f5 | 2242 | if (test_bit(bit, (unsigned long *)&status)) { |
a3d86542 PZ |
2243 | /* PEBS v3 has accurate status bits */ |
2244 | if (x86_pmu.intel_cap.pebs_format >= 3) | |
2245 | return at; | |
21509084 | 2246 | |
c22497f5 | 2247 | if (status == (1 << bit)) |
21509084 YZ |
2248 | return at; |
2249 | ||
2250 | /* clear non-PEBS bit and re-check */ | |
c22497f5 | 2251 | pebs_status = status & cpuc->pebs_enabled; |
fd583ad1 | 2252 | pebs_status &= PEBS_COUNTER_MASK; |
21509084 YZ |
2253 | if (pebs_status == (1 << bit)) |
2254 | return at; | |
2255 | } | |
2256 | } | |
2257 | return NULL; | |
2258 | } | |
2259 | ||
d31fc13f KL |
2260 | /* |
2261 | * Special variant of intel_pmu_save_and_restart() for auto-reload. | |
2262 | */ | |
2263 | static int | |
2264 | intel_pmu_save_and_restart_reload(struct perf_event *event, int count) | |
2265 | { | |
2266 | struct hw_perf_event *hwc = &event->hw; | |
2267 | int shift = 64 - x86_pmu.cntval_bits; | |
2268 | u64 period = hwc->sample_period; | |
2269 | u64 prev_raw_count, new_raw_count; | |
2270 | s64 new, old; | |
2271 | ||
2272 | WARN_ON(!period); | |
2273 | ||
2274 | /* | |
2275 | * drain_pebs() only happens when the PMU is disabled. | |
2276 | */ | |
2277 | WARN_ON(this_cpu_read(cpu_hw_events.enabled)); | |
2278 | ||
2279 | prev_raw_count = local64_read(&hwc->prev_count); | |
795ada52 | 2280 | new_raw_count = rdpmc(hwc->event_base_rdpmc); |
d31fc13f KL |
2281 | local64_set(&hwc->prev_count, new_raw_count); |
2282 | ||
2283 | /* | |
2284 | * Since the counter increments a negative counter value and | |
2285 | * overflows on the sign switch, giving the interval: | |
2286 | * | |
2287 | * [-period, 0] | |
2288 | * | |
d9f6e12f | 2289 | * the difference between two consecutive reads is: |
d31fc13f KL |
2290 | * |
2291 | * A) value2 - value1; | |
2292 | * when no overflows have happened in between, | |
2293 | * | |
2294 | * B) (0 - value1) + (value2 - (-period)); | |
2295 | * when one overflow happened in between, | |
2296 | * | |
2297 | * C) (0 - value1) + (n - 1) * (period) + (value2 - (-period)); | |
2298 | * when @n overflows happened in between. | |
2299 | * | |
2300 | * Here A) is the obvious difference, B) is the extension to the | |
2301 | * discrete interval, where the first term is to the top of the | |
2302 | * interval and the second term is from the bottom of the next | |
2303 | * interval and C) the extension to multiple intervals, where the | |
2304 | * middle term is the whole intervals covered. | |
2305 | * | |
2306 | * An equivalent of C, by reduction, is: | |
2307 | * | |
2308 | * value2 - value1 + n * period | |
2309 | */ | |
2310 | new = ((s64)(new_raw_count << shift) >> shift); | |
2311 | old = ((s64)(prev_raw_count << shift) >> shift); | |
2312 | local64_add(new - old + count * period, &event->count); | |
2313 | ||
f861854e KL |
2314 | local64_set(&hwc->period_left, -new); |
2315 | ||
d31fc13f KL |
2316 | perf_event_update_userpage(event); |
2317 | ||
2318 | return 0; | |
2319 | } | |
2320 | ||
3c00ed34 KL |
2321 | typedef void (*setup_fn)(struct perf_event *, struct pt_regs *, void *, |
2322 | struct perf_sample_data *, struct pt_regs *); | |
2323 | ||
2324 | static struct pt_regs dummy_iregs; | |
2325 | ||
9dfa9a5c PZ |
2326 | static __always_inline void |
2327 | __intel_pmu_pebs_event(struct perf_event *event, | |
2328 | struct pt_regs *iregs, | |
3c00ed34 | 2329 | struct pt_regs *regs, |
9dfa9a5c | 2330 | struct perf_sample_data *data, |
3c00ed34 KL |
2331 | void *at, |
2332 | setup_fn setup_sample) | |
43cf7631 | 2333 | { |
3c00ed34 KL |
2334 | setup_sample(event, iregs, at, data, regs); |
2335 | perf_event_output(event, data, regs); | |
2336 | } | |
35d1ce6b | 2337 | |
3c00ed34 KL |
2338 | static __always_inline void |
2339 | __intel_pmu_pebs_last_event(struct perf_event *event, | |
2340 | struct pt_regs *iregs, | |
2341 | struct pt_regs *regs, | |
2342 | struct perf_sample_data *data, | |
2343 | void *at, | |
2344 | int count, | |
2345 | setup_fn setup_sample) | |
2346 | { | |
2347 | struct hw_perf_event *hwc = &event->hw; | |
21509084 | 2348 | |
9dfa9a5c | 2349 | setup_sample(event, iregs, at, data, regs); |
35d1ce6b KL |
2350 | if (iregs == &dummy_iregs) { |
2351 | /* | |
2352 | * The PEBS records may be drained in the non-overflow context, | |
2353 | * e.g., large PEBS + context switch. Perf should treat the | |
2354 | * last record the same as other PEBS records, and doesn't | |
2355 | * invoke the generic overflow handler. | |
2356 | */ | |
9dfa9a5c | 2357 | perf_event_output(event, data, regs); |
35d1ce6b KL |
2358 | } else { |
2359 | /* | |
2360 | * All but the last records are processed. | |
2361 | * The last one is left to be able to call the overflow handler. | |
2362 | */ | |
b8328f67 | 2363 | perf_event_overflow(event, data, regs); |
21509084 | 2364 | } |
3c00ed34 KL |
2365 | |
2366 | if (hwc->flags & PERF_X86_EVENT_AUTO_RELOAD) { | |
e02e9b03 KL |
2367 | if ((is_pebs_counter_event_group(event))) { |
2368 | /* | |
2369 | * The value of each sample has been updated when setup | |
2370 | * the corresponding sample data. | |
2371 | */ | |
2372 | perf_event_update_userpage(event); | |
2373 | } else { | |
2374 | /* | |
2375 | * Now, auto-reload is only enabled in fixed period mode. | |
2376 | * The reload value is always hwc->sample_period. | |
2377 | * May need to change it, if auto-reload is enabled in | |
2378 | * freq mode later. | |
2379 | */ | |
2380 | intel_pmu_save_and_restart_reload(event, count); | |
2381 | } | |
7da9960b KL |
2382 | } else { |
2383 | /* | |
2384 | * For a non-precise event, it's possible the | |
2385 | * counters-snapshotting records a positive value for the | |
2386 | * overflowed event. Then the HW auto-reload mechanism | |
2387 | * reset the counter to 0 immediately, because the | |
2388 | * pebs_event_reset is cleared if the PERF_X86_EVENT_AUTO_RELOAD | |
2389 | * is not set. The counter backwards may be observed in a | |
2390 | * PMI handler. | |
2391 | * | |
2392 | * Since the event value has been updated when processing the | |
2393 | * counters-snapshotting record, only needs to set the new | |
2394 | * period for the counter. | |
2395 | */ | |
2396 | if (is_pebs_counter_event_group(event)) | |
2397 | static_call(x86_pmu_set_period)(event); | |
2398 | else | |
2399 | intel_pmu_save_and_restart(event); | |
2400 | } | |
3c00ed34 KL |
2401 | } |
2402 | ||
2403 | static __always_inline void | |
2404 | __intel_pmu_pebs_events(struct perf_event *event, | |
2405 | struct pt_regs *iregs, | |
2406 | struct perf_sample_data *data, | |
2407 | void *base, void *top, | |
2408 | int bit, int count, | |
2409 | setup_fn setup_sample) | |
2410 | { | |
2411 | struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); | |
2412 | struct x86_perf_regs perf_regs; | |
2413 | struct pt_regs *regs = &perf_regs.regs; | |
2414 | void *at = get_next_pebs_record_by_bit(base, top, bit); | |
2415 | int cnt = count; | |
2416 | ||
2417 | if (!iregs) | |
2418 | iregs = &dummy_iregs; | |
2419 | ||
2420 | while (cnt > 1) { | |
2421 | __intel_pmu_pebs_event(event, iregs, regs, data, at, setup_sample); | |
2422 | at += cpuc->pebs_record_size; | |
2423 | at = get_next_pebs_record_by_bit(at, top, bit); | |
2424 | cnt--; | |
2425 | } | |
2426 | ||
2427 | __intel_pmu_pebs_last_event(event, iregs, regs, data, at, count, setup_sample); | |
2b0b5c6f PZ |
2428 | } |
2429 | ||
9dfa9a5c | 2430 | static void intel_pmu_drain_pebs_core(struct pt_regs *iregs, struct perf_sample_data *data) |
ca037701 | 2431 | { |
89cbc767 | 2432 | struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); |
ca037701 PZ |
2433 | struct debug_store *ds = cpuc->ds; |
2434 | struct perf_event *event = cpuc->events[0]; /* PMC0 only */ | |
2435 | struct pebs_record_core *at, *top; | |
ca037701 PZ |
2436 | int n; |
2437 | ||
6809b6ea | 2438 | if (!x86_pmu.pebs_active) |
ca037701 PZ |
2439 | return; |
2440 | ||
ca037701 PZ |
2441 | at = (struct pebs_record_core *)(unsigned long)ds->pebs_buffer_base; |
2442 | top = (struct pebs_record_core *)(unsigned long)ds->pebs_index; | |
2443 | ||
d80c7502 PZ |
2444 | /* |
2445 | * Whatever else happens, drain the thing | |
2446 | */ | |
2447 | ds->pebs_index = ds->pebs_buffer_base; | |
2448 | ||
2449 | if (!test_bit(0, cpuc->active_mask)) | |
8f4aebd2 | 2450 | return; |
ca037701 | 2451 | |
d80c7502 PZ |
2452 | WARN_ON_ONCE(!event); |
2453 | ||
ab608344 | 2454 | if (!event->attr.precise_ip) |
d80c7502 PZ |
2455 | return; |
2456 | ||
1424a09a | 2457 | n = top - at; |
d31fc13f KL |
2458 | if (n <= 0) { |
2459 | if (event->hw.flags & PERF_X86_EVENT_AUTO_RELOAD) | |
2460 | intel_pmu_save_and_restart_reload(event, 0); | |
d80c7502 | 2461 | return; |
d31fc13f | 2462 | } |
ca037701 | 2463 | |
3c00ed34 KL |
2464 | __intel_pmu_pebs_events(event, iregs, data, at, top, 0, n, |
2465 | setup_pebs_fixed_sample_data); | |
ca037701 PZ |
2466 | } |
2467 | ||
99bcd91f | 2468 | static void intel_pmu_pebs_event_update_no_drain(struct cpu_hw_events *cpuc, u64 mask) |
477f00f9 | 2469 | { |
99bcd91f | 2470 | u64 pebs_enabled = cpuc->pebs_enabled & mask; |
477f00f9 KL |
2471 | struct perf_event *event; |
2472 | int bit; | |
2473 | ||
2474 | /* | |
2475 | * The drain_pebs() could be called twice in a short period | |
2476 | * for auto-reload event in pmu::read(). There are no | |
2477 | * overflows have happened in between. | |
2478 | * It needs to call intel_pmu_save_and_restart_reload() to | |
2479 | * update the event->count for this case. | |
2480 | */ | |
99bcd91f | 2481 | for_each_set_bit(bit, (unsigned long *)&pebs_enabled, X86_PMC_IDX_MAX) { |
477f00f9 KL |
2482 | event = cpuc->events[bit]; |
2483 | if (event->hw.flags & PERF_X86_EVENT_AUTO_RELOAD) | |
2484 | intel_pmu_save_and_restart_reload(event, 0); | |
2485 | } | |
2486 | } | |
2487 | ||
9dfa9a5c | 2488 | static void intel_pmu_drain_pebs_nhm(struct pt_regs *iregs, struct perf_sample_data *data) |
ca037701 | 2489 | { |
89cbc767 | 2490 | struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); |
ca037701 | 2491 | struct debug_store *ds = cpuc->ds; |
21509084 YZ |
2492 | struct perf_event *event; |
2493 | void *base, *at, *top; | |
ec71a398 KL |
2494 | short counts[INTEL_PMC_IDX_FIXED + MAX_FIXED_PEBS_EVENTS] = {}; |
2495 | short error[INTEL_PMC_IDX_FIXED + MAX_FIXED_PEBS_EVENTS] = {}; | |
a23eb2fc | 2496 | int max_pebs_events = intel_pmu_max_num_pebs(NULL); |
ec71a398 KL |
2497 | int bit, i, size; |
2498 | u64 mask; | |
d2beea4a PZ |
2499 | |
2500 | if (!x86_pmu.pebs_active) | |
2501 | return; | |
2502 | ||
21509084 | 2503 | base = (struct pebs_record_nhm *)(unsigned long)ds->pebs_buffer_base; |
d2beea4a | 2504 | top = (struct pebs_record_nhm *)(unsigned long)ds->pebs_index; |
ca037701 | 2505 | |
ca037701 PZ |
2506 | ds->pebs_index = ds->pebs_buffer_base; |
2507 | ||
a23eb2fc KL |
2508 | mask = x86_pmu.pebs_events_mask; |
2509 | size = max_pebs_events; | |
ec71a398 | 2510 | if (x86_pmu.flags & PMU_FL_PEBS_ALL) { |
722e42e4 KL |
2511 | mask |= x86_pmu.fixed_cntr_mask64 << INTEL_PMC_IDX_FIXED; |
2512 | size = INTEL_PMC_IDX_FIXED + x86_pmu_max_num_counters_fixed(NULL); | |
ec71a398 KL |
2513 | } |
2514 | ||
d31fc13f | 2515 | if (unlikely(base >= top)) { |
99bcd91f | 2516 | intel_pmu_pebs_event_update_no_drain(cpuc, mask); |
d2beea4a | 2517 | return; |
d31fc13f | 2518 | } |
d2beea4a | 2519 | |
21509084 | 2520 | for (at = base; at < top; at += x86_pmu.pebs_record_size) { |
130768b8 | 2521 | struct pebs_record_nhm *p = at; |
75f80859 | 2522 | u64 pebs_status; |
ca037701 | 2523 | |
8ef9b845 | 2524 | pebs_status = p->status & cpuc->pebs_enabled; |
ec71a398 | 2525 | pebs_status &= mask; |
8ef9b845 PZ |
2526 | |
2527 | /* PEBS v3 has more accurate status bits */ | |
a3d86542 | 2528 | if (x86_pmu.intel_cap.pebs_format >= 3) { |
c22497f5 | 2529 | for_each_set_bit(bit, (unsigned long *)&pebs_status, size) |
a3d86542 PZ |
2530 | counts[bit]++; |
2531 | ||
2532 | continue; | |
2533 | } | |
2534 | ||
01330d72 AK |
2535 | /* |
2536 | * On some CPUs the PEBS status can be zero when PEBS is | |
2537 | * racing with clearing of GLOBAL_STATUS. | |
2538 | * | |
2539 | * Normally we would drop that record, but in the | |
2540 | * case when there is only a single active PEBS event | |
2541 | * we can assume it's for that event. | |
2542 | */ | |
2543 | if (!pebs_status && cpuc->pebs_enabled && | |
2544 | !(cpuc->pebs_enabled & (cpuc->pebs_enabled-1))) | |
d88d05a9 | 2545 | pebs_status = p->status = cpuc->pebs_enabled; |
01330d72 | 2546 | |
75f80859 | 2547 | bit = find_first_bit((unsigned long *)&pebs_status, |
a23eb2fc KL |
2548 | max_pebs_events); |
2549 | ||
2550 | if (!(x86_pmu.pebs_events_mask & (1 << bit))) | |
21509084 | 2551 | continue; |
75f80859 | 2552 | |
21509084 YZ |
2553 | /* |
2554 | * The PEBS hardware does not deal well with the situation | |
2555 | * when events happen near to each other and multiple bits | |
2556 | * are set. But it should happen rarely. | |
2557 | * | |
2558 | * If these events include one PEBS and multiple non-PEBS | |
2559 | * events, it doesn't impact PEBS record. The record will | |
2560 | * be handled normally. (slow path) | |
2561 | * | |
2562 | * If these events include two or more PEBS events, the | |
2563 | * records for the events can be collapsed into a single | |
2564 | * one, and it's not possible to reconstruct all events | |
2565 | * that caused the PEBS record. It's called collision. | |
2566 | * If collision happened, the record will be dropped. | |
21509084 | 2567 | */ |
fc17db8a | 2568 | if (pebs_status != (1ULL << bit)) { |
c22497f5 | 2569 | for_each_set_bit(i, (unsigned long *)&pebs_status, size) |
75f80859 PZ |
2570 | error[i]++; |
2571 | continue; | |
ca037701 | 2572 | } |
75f80859 | 2573 | |
21509084 YZ |
2574 | counts[bit]++; |
2575 | } | |
ca037701 | 2576 | |
c22497f5 | 2577 | for_each_set_bit(bit, (unsigned long *)&mask, size) { |
f38b0dbb | 2578 | if ((counts[bit] == 0) && (error[bit] == 0)) |
ca037701 | 2579 | continue; |
75f80859 | 2580 | |
21509084 | 2581 | event = cpuc->events[bit]; |
8ef9b845 PZ |
2582 | if (WARN_ON_ONCE(!event)) |
2583 | continue; | |
2584 | ||
2585 | if (WARN_ON_ONCE(!event->attr.precise_ip)) | |
2586 | continue; | |
ca037701 | 2587 | |
f38b0dbb | 2588 | /* log dropped samples number */ |
475113d9 | 2589 | if (error[bit]) { |
f38b0dbb KL |
2590 | perf_log_lost_samples(event, error[bit]); |
2591 | ||
b8328f67 KL |
2592 | if (iregs) |
2593 | perf_event_account_interrupt(event); | |
475113d9 JO |
2594 | } |
2595 | ||
f38b0dbb | 2596 | if (counts[bit]) { |
3c00ed34 KL |
2597 | __intel_pmu_pebs_events(event, iregs, data, base, |
2598 | top, bit, counts[bit], | |
2599 | setup_pebs_fixed_sample_data); | |
f38b0dbb | 2600 | } |
ca037701 | 2601 | } |
ca037701 PZ |
2602 | } |
2603 | ||
9dfa9a5c | 2604 | static void intel_pmu_drain_pebs_icl(struct pt_regs *iregs, struct perf_sample_data *data) |
c22497f5 KL |
2605 | { |
2606 | short counts[INTEL_PMC_IDX_FIXED + MAX_FIXED_PEBS_EVENTS] = {}; | |
ae55e308 | 2607 | void *last[INTEL_PMC_IDX_FIXED + MAX_FIXED_PEBS_EVENTS]; |
c22497f5 KL |
2608 | struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); |
2609 | struct debug_store *ds = cpuc->ds; | |
ae55e308 KL |
2610 | struct x86_perf_regs perf_regs; |
2611 | struct pt_regs *regs = &perf_regs.regs; | |
2612 | struct pebs_basic *basic; | |
c22497f5 KL |
2613 | struct perf_event *event; |
2614 | void *base, *at, *top; | |
722e42e4 | 2615 | int bit; |
c22497f5 KL |
2616 | u64 mask; |
2617 | ||
2618 | if (!x86_pmu.pebs_active) | |
2619 | return; | |
2620 | ||
2621 | base = (struct pebs_basic *)(unsigned long)ds->pebs_buffer_base; | |
2622 | top = (struct pebs_basic *)(unsigned long)ds->pebs_index; | |
2623 | ||
2624 | ds->pebs_index = ds->pebs_buffer_base; | |
2625 | ||
a23eb2fc | 2626 | mask = hybrid(cpuc->pmu, pebs_events_mask) | |
722e42e4 | 2627 | (hybrid(cpuc->pmu, fixed_cntr_mask64) << INTEL_PMC_IDX_FIXED); |
c22497f5 KL |
2628 | |
2629 | if (unlikely(base >= top)) { | |
99bcd91f | 2630 | intel_pmu_pebs_event_update_no_drain(cpuc, mask); |
c22497f5 KL |
2631 | return; |
2632 | } | |
2633 | ||
ae55e308 KL |
2634 | if (!iregs) |
2635 | iregs = &dummy_iregs; | |
2636 | ||
2637 | /* Process all but the last event for each counter. */ | |
2638 | for (at = base; at < top; at += basic->format_size) { | |
c22497f5 KL |
2639 | u64 pebs_status; |
2640 | ||
ae55e308 KL |
2641 | basic = at; |
2642 | if (basic->format_size != cpuc->pebs_record_size) | |
2643 | continue; | |
c22497f5 | 2644 | |
ae55e308 KL |
2645 | pebs_status = basic->applicable_counters & cpuc->pebs_enabled & mask; |
2646 | for_each_set_bit(bit, (unsigned long *)&pebs_status, X86_PMC_IDX_MAX) { | |
2647 | event = cpuc->events[bit]; | |
c22497f5 | 2648 | |
ae55e308 KL |
2649 | if (WARN_ON_ONCE(!event) || |
2650 | WARN_ON_ONCE(!event->attr.precise_ip)) | |
2651 | continue; | |
2652 | ||
2653 | if (counts[bit]++) { | |
2654 | __intel_pmu_pebs_event(event, iregs, regs, data, last[bit], | |
2655 | setup_pebs_adaptive_sample_data); | |
2656 | } | |
2657 | last[bit] = at; | |
2658 | } | |
c22497f5 KL |
2659 | } |
2660 | ||
722e42e4 | 2661 | for_each_set_bit(bit, (unsigned long *)&mask, X86_PMC_IDX_MAX) { |
ae55e308 | 2662 | if (!counts[bit]) |
c22497f5 KL |
2663 | continue; |
2664 | ||
2665 | event = cpuc->events[bit]; | |
c22497f5 | 2666 | |
ae55e308 KL |
2667 | __intel_pmu_pebs_last_event(event, iregs, regs, data, last[bit], |
2668 | counts[bit], setup_pebs_adaptive_sample_data); | |
c22497f5 KL |
2669 | } |
2670 | } | |
2671 | ||
ca037701 | 2672 | /* |
d971342d | 2673 | * PEBS probe and setup |
ca037701 PZ |
2674 | */ |
2675 | ||
d971342d | 2676 | void __init intel_pebs_init(void) |
ca037701 PZ |
2677 | { |
2678 | /* | |
2679 | * No support for 32bit formats | |
2680 | */ | |
2681 | if (!boot_cpu_has(X86_FEATURE_DTES64)) | |
2682 | return; | |
2683 | ||
acb727e0 | 2684 | x86_pmu.ds_pebs = boot_cpu_has(X86_FEATURE_PEBS); |
e72daf3f | 2685 | x86_pmu.pebs_buffer_size = PEBS_BUFFER_SIZE; |
cd6b984f | 2686 | if (x86_pmu.version <= 4) |
9b545c04 | 2687 | x86_pmu.pebs_no_isolation = 1; |
cd6b984f | 2688 | |
acb727e0 | 2689 | if (x86_pmu.ds_pebs) { |
8db909a7 | 2690 | char pebs_type = x86_pmu.intel_cap.pebs_trap ? '+' : '-'; |
c22497f5 | 2691 | char *pebs_qual = ""; |
8db909a7 | 2692 | int format = x86_pmu.intel_cap.pebs_format; |
ca037701 | 2693 | |
c22497f5 KL |
2694 | if (format < 4) |
2695 | x86_pmu.intel_cap.pebs_baseline = 0; | |
2696 | ||
4a3fd130 DM |
2697 | x86_pmu.pebs_enable = intel_pmu_pebs_enable; |
2698 | x86_pmu.pebs_disable = intel_pmu_pebs_disable; | |
2699 | x86_pmu.pebs_enable_all = intel_pmu_pebs_enable_all; | |
2700 | x86_pmu.pebs_disable_all = intel_pmu_pebs_disable_all; | |
2701 | ||
ca037701 PZ |
2702 | switch (format) { |
2703 | case 0: | |
1b74dde7 | 2704 | pr_cont("PEBS fmt0%c, ", pebs_type); |
ca037701 | 2705 | x86_pmu.pebs_record_size = sizeof(struct pebs_record_core); |
e72daf3f JO |
2706 | /* |
2707 | * Using >PAGE_SIZE buffers makes the WRMSR to | |
2708 | * PERF_GLOBAL_CTRL in intel_pmu_enable_all() | |
2709 | * mysteriously hang on Core2. | |
2710 | * | |
2711 | * As a workaround, we don't do this. | |
2712 | */ | |
2713 | x86_pmu.pebs_buffer_size = PAGE_SIZE; | |
ca037701 | 2714 | x86_pmu.drain_pebs = intel_pmu_drain_pebs_core; |
ca037701 PZ |
2715 | break; |
2716 | ||
2717 | case 1: | |
1b74dde7 | 2718 | pr_cont("PEBS fmt1%c, ", pebs_type); |
ca037701 PZ |
2719 | x86_pmu.pebs_record_size = sizeof(struct pebs_record_nhm); |
2720 | x86_pmu.drain_pebs = intel_pmu_drain_pebs_nhm; | |
ca037701 PZ |
2721 | break; |
2722 | ||
130768b8 AK |
2723 | case 2: |
2724 | pr_cont("PEBS fmt2%c, ", pebs_type); | |
2725 | x86_pmu.pebs_record_size = sizeof(struct pebs_record_hsw); | |
d2beea4a | 2726 | x86_pmu.drain_pebs = intel_pmu_drain_pebs_nhm; |
130768b8 AK |
2727 | break; |
2728 | ||
2f7ebf2e AK |
2729 | case 3: |
2730 | pr_cont("PEBS fmt3%c, ", pebs_type); | |
2731 | x86_pmu.pebs_record_size = | |
2732 | sizeof(struct pebs_record_skl); | |
2733 | x86_pmu.drain_pebs = intel_pmu_drain_pebs_nhm; | |
174afc3e | 2734 | x86_pmu.large_pebs_flags |= PERF_SAMPLE_TIME; |
2f7ebf2e AK |
2735 | break; |
2736 | ||
b8c3a250 | 2737 | case 6: |
e02e9b03 KL |
2738 | if (x86_pmu.intel_cap.pebs_baseline) { |
2739 | x86_pmu.large_pebs_flags |= PERF_SAMPLE_READ; | |
2740 | x86_pmu.late_setup = intel_pmu_late_setup; | |
2741 | } | |
2742 | fallthrough; | |
2145e77f | 2743 | case 5: |
13738a36 LX |
2744 | x86_pmu.pebs_ept = 1; |
2745 | fallthrough; | |
2746 | case 4: | |
c22497f5 KL |
2747 | x86_pmu.drain_pebs = intel_pmu_drain_pebs_icl; |
2748 | x86_pmu.pebs_record_size = sizeof(struct pebs_basic); | |
2749 | if (x86_pmu.intel_cap.pebs_baseline) { | |
2750 | x86_pmu.large_pebs_flags |= | |
2751 | PERF_SAMPLE_BRANCH_STACK | | |
2752 | PERF_SAMPLE_TIME; | |
2753 | x86_pmu.flags |= PMU_FL_PEBS_ALL; | |
7d359886 | 2754 | x86_pmu.pebs_capable = ~0ULL; |
c22497f5 | 2755 | pebs_qual = "-baseline"; |
61e76d53 | 2756 | x86_get_pmu(smp_processor_id())->capabilities |= PERF_PMU_CAP_EXTENDED_REGS; |
c22497f5 KL |
2757 | } else { |
2758 | /* Only basic record supported */ | |
c22497f5 KL |
2759 | x86_pmu.large_pebs_flags &= |
2760 | ~(PERF_SAMPLE_ADDR | | |
2761 | PERF_SAMPLE_TIME | | |
2762 | PERF_SAMPLE_DATA_SRC | | |
2763 | PERF_SAMPLE_TRANSACTION | | |
2764 | PERF_SAMPLE_REGS_USER | | |
2765 | PERF_SAMPLE_REGS_INTR); | |
2766 | } | |
e02e9b03 | 2767 | pr_cont("PEBS fmt%d%c%s, ", format, pebs_type, pebs_qual); |
42880f72 | 2768 | |
0a556150 KL |
2769 | /* |
2770 | * The PEBS-via-PT is not supported on hybrid platforms, | |
2771 | * because not all CPUs of a hybrid machine support it. | |
2772 | * The global x86_pmu.intel_cap, which only contains the | |
2773 | * common capabilities, is used to check the availability | |
2774 | * of the feature. The per-PMU pebs_output_pt_available | |
2775 | * in a hybrid machine should be ignored. | |
2776 | */ | |
2777 | if (x86_pmu.intel_cap.pebs_output_pt_available) { | |
42880f72 | 2778 | pr_cont("PEBS-via-PT, "); |
61e76d53 | 2779 | x86_get_pmu(smp_processor_id())->capabilities |= PERF_PMU_CAP_AUX_OUTPUT; |
42880f72 AS |
2780 | } |
2781 | ||
c22497f5 KL |
2782 | break; |
2783 | ||
ca037701 | 2784 | default: |
1b74dde7 | 2785 | pr_cont("no PEBS fmt%d%c, ", format, pebs_type); |
acb727e0 | 2786 | x86_pmu.ds_pebs = 0; |
ca037701 PZ |
2787 | } |
2788 | } | |
2789 | } | |
1d9d8639 SE |
2790 | |
2791 | void perf_restore_debug_store(void) | |
2792 | { | |
2a6e06b2 LT |
2793 | struct debug_store *ds = __this_cpu_read(cpu_hw_events.ds); |
2794 | ||
acb727e0 | 2795 | if (!x86_pmu.bts && !x86_pmu.ds_pebs) |
1d9d8639 SE |
2796 | return; |
2797 | ||
78255eb2 | 2798 | wrmsrq(MSR_IA32_DS_AREA, (unsigned long)ds); |
1d9d8639 | 2799 | } |