Commit | Line | Data |
---|---|---|
e9991434 AP |
1 | // SPDX-License-Identifier: GPL-2.0 |
2 | /* | |
3 | * RISC-V performance counter support. | |
4 | * | |
5 | * Copyright (C) 2021 Western Digital Corporation or its affiliates. | |
6 | * | |
7 | * This code is based on ARM perf event code which is in turn based on | |
8 | * sparc64 and x86 code. | |
9 | */ | |
10 | ||
11 | #define pr_fmt(fmt) "riscv-pmu-sbi: " fmt | |
12 | ||
13 | #include <linux/mod_devicetable.h> | |
14 | #include <linux/perf/riscv_pmu.h> | |
15 | #include <linux/platform_device.h> | |
4905ec2f AP |
16 | #include <linux/irq.h> |
17 | #include <linux/irqdomain.h> | |
18 | #include <linux/of_irq.h> | |
19 | #include <linux/of.h> | |
e9a023f2 | 20 | #include <linux/cpu_pm.h> |
096b52fd | 21 | #include <linux/sched/clock.h> |
e9991434 | 22 | |
65e9fb08 | 23 | #include <asm/errata_list.h> |
e9991434 | 24 | #include <asm/sbi.h> |
4905ec2f | 25 | #include <asm/hwcap.h> |
e9991434 | 26 | |
26fabd6d NS |
27 | PMU_FORMAT_ATTR(event, "config:0-47"); |
28 | PMU_FORMAT_ATTR(firmware, "config:63"); | |
29 | ||
30 | static struct attribute *riscv_arch_formats_attr[] = { | |
31 | &format_attr_event.attr, | |
32 | &format_attr_firmware.attr, | |
33 | NULL, | |
34 | }; | |
35 | ||
36 | static struct attribute_group riscv_pmu_format_group = { | |
37 | .name = "format", | |
38 | .attrs = riscv_arch_formats_attr, | |
39 | }; | |
40 | ||
41 | static const struct attribute_group *riscv_pmu_attr_groups[] = { | |
42 | &riscv_pmu_format_group, | |
43 | NULL, | |
44 | }; | |
45 | ||
c7a9dcea | 46 | /* |
585e351f | 47 | * RISC-V doesn't have heterogeneous harts yet. This need to be part of |
e9991434 AP |
48 | * per_cpu in case of harts with different pmu counters |
49 | */ | |
50 | static union sbi_pmu_ctr_info *pmu_ctr_list; | |
65e9fb08 HS |
51 | static bool riscv_pmu_use_irq; |
52 | static unsigned int riscv_pmu_irq_num; | |
4905ec2f | 53 | static unsigned int riscv_pmu_irq; |
e9991434 | 54 | |
585e351f AP |
55 | /* Cache the available counters in a bitmask */ |
56 | static unsigned long cmask; | |
57 | ||
e9991434 AP |
58 | struct sbi_pmu_event_data { |
59 | union { | |
60 | union { | |
61 | struct hw_gen_event { | |
62 | uint32_t event_code:16; | |
63 | uint32_t event_type:4; | |
64 | uint32_t reserved:12; | |
65 | } hw_gen_event; | |
66 | struct hw_cache_event { | |
67 | uint32_t result_id:1; | |
68 | uint32_t op_id:2; | |
69 | uint32_t cache_id:13; | |
70 | uint32_t event_type:4; | |
71 | uint32_t reserved:12; | |
72 | } hw_cache_event; | |
73 | }; | |
74 | uint32_t event_idx; | |
75 | }; | |
76 | }; | |
77 | ||
78 | static const struct sbi_pmu_event_data pmu_hw_event_map[] = { | |
79 | [PERF_COUNT_HW_CPU_CYCLES] = {.hw_gen_event = { | |
80 | SBI_PMU_HW_CPU_CYCLES, | |
81 | SBI_PMU_EVENT_TYPE_HW, 0}}, | |
82 | [PERF_COUNT_HW_INSTRUCTIONS] = {.hw_gen_event = { | |
83 | SBI_PMU_HW_INSTRUCTIONS, | |
84 | SBI_PMU_EVENT_TYPE_HW, 0}}, | |
85 | [PERF_COUNT_HW_CACHE_REFERENCES] = {.hw_gen_event = { | |
86 | SBI_PMU_HW_CACHE_REFERENCES, | |
87 | SBI_PMU_EVENT_TYPE_HW, 0}}, | |
88 | [PERF_COUNT_HW_CACHE_MISSES] = {.hw_gen_event = { | |
89 | SBI_PMU_HW_CACHE_MISSES, | |
90 | SBI_PMU_EVENT_TYPE_HW, 0}}, | |
91 | [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = {.hw_gen_event = { | |
92 | SBI_PMU_HW_BRANCH_INSTRUCTIONS, | |
93 | SBI_PMU_EVENT_TYPE_HW, 0}}, | |
94 | [PERF_COUNT_HW_BRANCH_MISSES] = {.hw_gen_event = { | |
95 | SBI_PMU_HW_BRANCH_MISSES, | |
96 | SBI_PMU_EVENT_TYPE_HW, 0}}, | |
97 | [PERF_COUNT_HW_BUS_CYCLES] = {.hw_gen_event = { | |
98 | SBI_PMU_HW_BUS_CYCLES, | |
99 | SBI_PMU_EVENT_TYPE_HW, 0}}, | |
100 | [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = {.hw_gen_event = { | |
101 | SBI_PMU_HW_STALLED_CYCLES_FRONTEND, | |
102 | SBI_PMU_EVENT_TYPE_HW, 0}}, | |
103 | [PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = {.hw_gen_event = { | |
104 | SBI_PMU_HW_STALLED_CYCLES_BACKEND, | |
105 | SBI_PMU_EVENT_TYPE_HW, 0}}, | |
106 | [PERF_COUNT_HW_REF_CPU_CYCLES] = {.hw_gen_event = { | |
107 | SBI_PMU_HW_REF_CPU_CYCLES, | |
108 | SBI_PMU_EVENT_TYPE_HW, 0}}, | |
109 | }; | |
110 | ||
111 | #define C(x) PERF_COUNT_HW_CACHE_##x | |
112 | static const struct sbi_pmu_event_data pmu_cache_event_map[PERF_COUNT_HW_CACHE_MAX] | |
113 | [PERF_COUNT_HW_CACHE_OP_MAX] | |
114 | [PERF_COUNT_HW_CACHE_RESULT_MAX] = { | |
115 | [C(L1D)] = { | |
116 | [C(OP_READ)] = { | |
117 | [C(RESULT_ACCESS)] = {.hw_cache_event = {C(RESULT_ACCESS), | |
118 | C(OP_READ), C(L1D), SBI_PMU_EVENT_TYPE_CACHE, 0}}, | |
119 | [C(RESULT_MISS)] = {.hw_cache_event = {C(RESULT_MISS), | |
120 | C(OP_READ), C(L1D), SBI_PMU_EVENT_TYPE_CACHE, 0}}, | |
121 | }, | |
122 | [C(OP_WRITE)] = { | |
123 | [C(RESULT_ACCESS)] = {.hw_cache_event = {C(RESULT_ACCESS), | |
124 | C(OP_WRITE), C(L1D), SBI_PMU_EVENT_TYPE_CACHE, 0}}, | |
125 | [C(RESULT_MISS)] = {.hw_cache_event = {C(RESULT_MISS), | |
126 | C(OP_WRITE), C(L1D), SBI_PMU_EVENT_TYPE_CACHE, 0}}, | |
127 | }, | |
128 | [C(OP_PREFETCH)] = { | |
129 | [C(RESULT_ACCESS)] = {.hw_cache_event = {C(RESULT_ACCESS), | |
130 | C(OP_PREFETCH), C(L1D), SBI_PMU_EVENT_TYPE_CACHE, 0}}, | |
131 | [C(RESULT_MISS)] = {.hw_cache_event = {C(RESULT_MISS), | |
132 | C(OP_PREFETCH), C(L1D), SBI_PMU_EVENT_TYPE_CACHE, 0}}, | |
133 | }, | |
134 | }, | |
135 | [C(L1I)] = { | |
136 | [C(OP_READ)] = { | |
137 | [C(RESULT_ACCESS)] = {.hw_cache_event = {C(RESULT_ACCESS), | |
138 | C(OP_READ), C(L1I), SBI_PMU_EVENT_TYPE_CACHE, 0}}, | |
139 | [C(RESULT_MISS)] = {.hw_cache_event = {C(RESULT_MISS), C(OP_READ), | |
140 | C(L1I), SBI_PMU_EVENT_TYPE_CACHE, 0}}, | |
141 | }, | |
142 | [C(OP_WRITE)] = { | |
143 | [C(RESULT_ACCESS)] = {.hw_cache_event = {C(RESULT_ACCESS), | |
144 | C(OP_WRITE), C(L1I), SBI_PMU_EVENT_TYPE_CACHE, 0}}, | |
145 | [C(RESULT_MISS)] = {.hw_cache_event = {C(RESULT_MISS), | |
146 | C(OP_WRITE), C(L1I), SBI_PMU_EVENT_TYPE_CACHE, 0}}, | |
147 | }, | |
148 | [C(OP_PREFETCH)] = { | |
149 | [C(RESULT_ACCESS)] = {.hw_cache_event = {C(RESULT_ACCESS), | |
150 | C(OP_PREFETCH), C(L1I), SBI_PMU_EVENT_TYPE_CACHE, 0}}, | |
151 | [C(RESULT_MISS)] = {.hw_cache_event = {C(RESULT_MISS), | |
152 | C(OP_PREFETCH), C(L1I), SBI_PMU_EVENT_TYPE_CACHE, 0}}, | |
153 | }, | |
154 | }, | |
155 | [C(LL)] = { | |
156 | [C(OP_READ)] = { | |
157 | [C(RESULT_ACCESS)] = {.hw_cache_event = {C(RESULT_ACCESS), | |
158 | C(OP_READ), C(LL), SBI_PMU_EVENT_TYPE_CACHE, 0}}, | |
159 | [C(RESULT_MISS)] = {.hw_cache_event = {C(RESULT_MISS), | |
160 | C(OP_READ), C(LL), SBI_PMU_EVENT_TYPE_CACHE, 0}}, | |
161 | }, | |
162 | [C(OP_WRITE)] = { | |
163 | [C(RESULT_ACCESS)] = {.hw_cache_event = {C(RESULT_ACCESS), | |
164 | C(OP_WRITE), C(LL), SBI_PMU_EVENT_TYPE_CACHE, 0}}, | |
165 | [C(RESULT_MISS)] = {.hw_cache_event = {C(RESULT_MISS), | |
166 | C(OP_WRITE), C(LL), SBI_PMU_EVENT_TYPE_CACHE, 0}}, | |
167 | }, | |
168 | [C(OP_PREFETCH)] = { | |
169 | [C(RESULT_ACCESS)] = {.hw_cache_event = {C(RESULT_ACCESS), | |
170 | C(OP_PREFETCH), C(LL), SBI_PMU_EVENT_TYPE_CACHE, 0}}, | |
171 | [C(RESULT_MISS)] = {.hw_cache_event = {C(RESULT_MISS), | |
172 | C(OP_PREFETCH), C(LL), SBI_PMU_EVENT_TYPE_CACHE, 0}}, | |
173 | }, | |
174 | }, | |
175 | [C(DTLB)] = { | |
176 | [C(OP_READ)] = { | |
177 | [C(RESULT_ACCESS)] = {.hw_cache_event = {C(RESULT_ACCESS), | |
178 | C(OP_READ), C(DTLB), SBI_PMU_EVENT_TYPE_CACHE, 0}}, | |
179 | [C(RESULT_MISS)] = {.hw_cache_event = {C(RESULT_MISS), | |
180 | C(OP_READ), C(DTLB), SBI_PMU_EVENT_TYPE_CACHE, 0}}, | |
181 | }, | |
182 | [C(OP_WRITE)] = { | |
183 | [C(RESULT_ACCESS)] = {.hw_cache_event = {C(RESULT_ACCESS), | |
184 | C(OP_WRITE), C(DTLB), SBI_PMU_EVENT_TYPE_CACHE, 0}}, | |
185 | [C(RESULT_MISS)] = {.hw_cache_event = {C(RESULT_MISS), | |
186 | C(OP_WRITE), C(DTLB), SBI_PMU_EVENT_TYPE_CACHE, 0}}, | |
187 | }, | |
188 | [C(OP_PREFETCH)] = { | |
189 | [C(RESULT_ACCESS)] = {.hw_cache_event = {C(RESULT_ACCESS), | |
190 | C(OP_PREFETCH), C(DTLB), SBI_PMU_EVENT_TYPE_CACHE, 0}}, | |
191 | [C(RESULT_MISS)] = {.hw_cache_event = {C(RESULT_MISS), | |
192 | C(OP_PREFETCH), C(DTLB), SBI_PMU_EVENT_TYPE_CACHE, 0}}, | |
193 | }, | |
194 | }, | |
195 | [C(ITLB)] = { | |
196 | [C(OP_READ)] = { | |
197 | [C(RESULT_ACCESS)] = {.hw_cache_event = {C(RESULT_ACCESS), | |
198 | C(OP_READ), C(ITLB), SBI_PMU_EVENT_TYPE_CACHE, 0}}, | |
199 | [C(RESULT_MISS)] = {.hw_cache_event = {C(RESULT_MISS), | |
200 | C(OP_READ), C(ITLB), SBI_PMU_EVENT_TYPE_CACHE, 0}}, | |
201 | }, | |
202 | [C(OP_WRITE)] = { | |
203 | [C(RESULT_ACCESS)] = {.hw_cache_event = {C(RESULT_ACCESS), | |
204 | C(OP_WRITE), C(ITLB), SBI_PMU_EVENT_TYPE_CACHE, 0}}, | |
205 | [C(RESULT_MISS)] = {.hw_cache_event = {C(RESULT_MISS), | |
206 | C(OP_WRITE), C(ITLB), SBI_PMU_EVENT_TYPE_CACHE, 0}}, | |
207 | }, | |
208 | [C(OP_PREFETCH)] = { | |
209 | [C(RESULT_ACCESS)] = {.hw_cache_event = {C(RESULT_ACCESS), | |
210 | C(OP_PREFETCH), C(ITLB), SBI_PMU_EVENT_TYPE_CACHE, 0}}, | |
211 | [C(RESULT_MISS)] = {.hw_cache_event = {C(RESULT_MISS), | |
212 | C(OP_PREFETCH), C(ITLB), SBI_PMU_EVENT_TYPE_CACHE, 0}}, | |
213 | }, | |
214 | }, | |
215 | [C(BPU)] = { | |
216 | [C(OP_READ)] = { | |
217 | [C(RESULT_ACCESS)] = {.hw_cache_event = {C(RESULT_ACCESS), | |
218 | C(OP_READ), C(BPU), SBI_PMU_EVENT_TYPE_CACHE, 0}}, | |
219 | [C(RESULT_MISS)] = {.hw_cache_event = {C(RESULT_MISS), | |
220 | C(OP_READ), C(BPU), SBI_PMU_EVENT_TYPE_CACHE, 0}}, | |
221 | }, | |
222 | [C(OP_WRITE)] = { | |
223 | [C(RESULT_ACCESS)] = {.hw_cache_event = {C(RESULT_ACCESS), | |
224 | C(OP_WRITE), C(BPU), SBI_PMU_EVENT_TYPE_CACHE, 0}}, | |
225 | [C(RESULT_MISS)] = {.hw_cache_event = {C(RESULT_MISS), | |
226 | C(OP_WRITE), C(BPU), SBI_PMU_EVENT_TYPE_CACHE, 0}}, | |
227 | }, | |
228 | [C(OP_PREFETCH)] = { | |
229 | [C(RESULT_ACCESS)] = {.hw_cache_event = {C(RESULT_ACCESS), | |
230 | C(OP_PREFETCH), C(BPU), SBI_PMU_EVENT_TYPE_CACHE, 0}}, | |
231 | [C(RESULT_MISS)] = {.hw_cache_event = {C(RESULT_MISS), | |
232 | C(OP_PREFETCH), C(BPU), SBI_PMU_EVENT_TYPE_CACHE, 0}}, | |
233 | }, | |
234 | }, | |
235 | [C(NODE)] = { | |
236 | [C(OP_READ)] = { | |
237 | [C(RESULT_ACCESS)] = {.hw_cache_event = {C(RESULT_ACCESS), | |
238 | C(OP_READ), C(NODE), SBI_PMU_EVENT_TYPE_CACHE, 0}}, | |
239 | [C(RESULT_MISS)] = {.hw_cache_event = {C(RESULT_MISS), | |
240 | C(OP_READ), C(NODE), SBI_PMU_EVENT_TYPE_CACHE, 0}}, | |
241 | }, | |
242 | [C(OP_WRITE)] = { | |
243 | [C(RESULT_ACCESS)] = {.hw_cache_event = {C(RESULT_ACCESS), | |
244 | C(OP_WRITE), C(NODE), SBI_PMU_EVENT_TYPE_CACHE, 0}}, | |
245 | [C(RESULT_MISS)] = {.hw_cache_event = {C(RESULT_MISS), | |
246 | C(OP_WRITE), C(NODE), SBI_PMU_EVENT_TYPE_CACHE, 0}}, | |
247 | }, | |
248 | [C(OP_PREFETCH)] = { | |
249 | [C(RESULT_ACCESS)] = {.hw_cache_event = {C(RESULT_ACCESS), | |
250 | C(OP_PREFETCH), C(NODE), SBI_PMU_EVENT_TYPE_CACHE, 0}}, | |
251 | [C(RESULT_MISS)] = {.hw_cache_event = {C(RESULT_MISS), | |
252 | C(OP_PREFETCH), C(NODE), SBI_PMU_EVENT_TYPE_CACHE, 0}}, | |
253 | }, | |
254 | }, | |
255 | }; | |
256 | ||
257 | static int pmu_sbi_ctr_get_width(int idx) | |
258 | { | |
259 | return pmu_ctr_list[idx].width; | |
260 | } | |
261 | ||
262 | static bool pmu_sbi_ctr_is_fw(int cidx) | |
263 | { | |
264 | union sbi_pmu_ctr_info *info; | |
265 | ||
266 | info = &pmu_ctr_list[cidx]; | |
267 | if (!info) | |
268 | return false; | |
269 | ||
270 | return (info->type == SBI_PMU_CTR_TYPE_FW) ? true : false; | |
271 | } | |
272 | ||
585e351f AP |
273 | /* |
274 | * Returns the counter width of a programmable counter and number of hardware | |
275 | * counters. As we don't support heterogeneous CPUs yet, it is okay to just | |
276 | * return the counter width of the first programmable counter. | |
277 | */ | |
278 | int riscv_pmu_get_hpm_info(u32 *hw_ctr_width, u32 *num_hw_ctr) | |
279 | { | |
280 | int i; | |
281 | union sbi_pmu_ctr_info *info; | |
282 | u32 hpm_width = 0, hpm_count = 0; | |
283 | ||
284 | if (!cmask) | |
285 | return -EINVAL; | |
286 | ||
287 | for_each_set_bit(i, &cmask, RISCV_MAX_COUNTERS) { | |
288 | info = &pmu_ctr_list[i]; | |
289 | if (!info) | |
290 | continue; | |
291 | if (!hpm_width && info->csr != CSR_CYCLE && info->csr != CSR_INSTRET) | |
292 | hpm_width = info->width; | |
293 | if (info->type == SBI_PMU_CTR_TYPE_HW) | |
294 | hpm_count++; | |
295 | } | |
296 | ||
297 | *hw_ctr_width = hpm_width; | |
298 | *num_hw_ctr = hpm_count; | |
299 | ||
300 | return 0; | |
301 | } | |
302 | EXPORT_SYMBOL_GPL(riscv_pmu_get_hpm_info); | |
303 | ||
8929283a AP |
304 | static unsigned long pmu_sbi_get_filter_flags(struct perf_event *event) |
305 | { | |
306 | unsigned long cflags = 0; | |
307 | bool guest_events = false; | |
308 | ||
309 | if (event->attr.config1 & RISCV_PMU_CONFIG1_GUEST_EVENTS) | |
310 | guest_events = true; | |
311 | if (event->attr.exclude_kernel) | |
312 | cflags |= guest_events ? SBI_PMU_CFG_FLAG_SET_VSINH : SBI_PMU_CFG_FLAG_SET_SINH; | |
313 | if (event->attr.exclude_user) | |
314 | cflags |= guest_events ? SBI_PMU_CFG_FLAG_SET_VUINH : SBI_PMU_CFG_FLAG_SET_UINH; | |
315 | if (guest_events && event->attr.exclude_hv) | |
316 | cflags |= SBI_PMU_CFG_FLAG_SET_SINH; | |
317 | if (event->attr.exclude_host) | |
318 | cflags |= SBI_PMU_CFG_FLAG_SET_UINH | SBI_PMU_CFG_FLAG_SET_SINH; | |
319 | if (event->attr.exclude_guest) | |
320 | cflags |= SBI_PMU_CFG_FLAG_SET_VSINH | SBI_PMU_CFG_FLAG_SET_VUINH; | |
321 | ||
322 | return cflags; | |
323 | } | |
324 | ||
e9991434 AP |
325 | static int pmu_sbi_ctr_get_idx(struct perf_event *event) |
326 | { | |
327 | struct hw_perf_event *hwc = &event->hw; | |
328 | struct riscv_pmu *rvpmu = to_riscv_pmu(event->pmu); | |
329 | struct cpu_hw_events *cpuc = this_cpu_ptr(rvpmu->hw_events); | |
330 | struct sbiret ret; | |
331 | int idx; | |
332 | uint64_t cbase = 0; | |
e9991434 AP |
333 | unsigned long cflags = 0; |
334 | ||
8929283a | 335 | cflags = pmu_sbi_get_filter_flags(event); |
e9991434 | 336 | /* retrieve the available counter index */ |
0209b583 | 337 | #if defined(CONFIG_32BIT) |
1537bf26 SM |
338 | ret = sbi_ecall(SBI_EXT_PMU, SBI_EXT_PMU_COUNTER_CFG_MATCH, cbase, |
339 | rvpmu->cmask, cflags, hwc->event_base, hwc->config, | |
340 | hwc->config >> 32); | |
0209b583 | 341 | #else |
1537bf26 SM |
342 | ret = sbi_ecall(SBI_EXT_PMU, SBI_EXT_PMU_COUNTER_CFG_MATCH, cbase, |
343 | rvpmu->cmask, cflags, hwc->event_base, hwc->config, 0); | |
0209b583 | 344 | #endif |
e9991434 AP |
345 | if (ret.error) { |
346 | pr_debug("Not able to find a counter for event %lx config %llx\n", | |
347 | hwc->event_base, hwc->config); | |
348 | return sbi_err_map_linux_errno(ret.error); | |
349 | } | |
350 | ||
351 | idx = ret.value; | |
1537bf26 | 352 | if (!test_bit(idx, &rvpmu->cmask) || !pmu_ctr_list[idx].value) |
e9991434 AP |
353 | return -ENOENT; |
354 | ||
355 | /* Additional sanity check for the counter id */ | |
356 | if (pmu_sbi_ctr_is_fw(idx)) { | |
357 | if (!test_and_set_bit(idx, cpuc->used_fw_ctrs)) | |
358 | return idx; | |
359 | } else { | |
360 | if (!test_and_set_bit(idx, cpuc->used_hw_ctrs)) | |
361 | return idx; | |
362 | } | |
363 | ||
364 | return -ENOENT; | |
365 | } | |
366 | ||
367 | static void pmu_sbi_ctr_clear_idx(struct perf_event *event) | |
368 | { | |
369 | ||
370 | struct hw_perf_event *hwc = &event->hw; | |
371 | struct riscv_pmu *rvpmu = to_riscv_pmu(event->pmu); | |
372 | struct cpu_hw_events *cpuc = this_cpu_ptr(rvpmu->hw_events); | |
373 | int idx = hwc->idx; | |
374 | ||
375 | if (pmu_sbi_ctr_is_fw(idx)) | |
376 | clear_bit(idx, cpuc->used_fw_ctrs); | |
377 | else | |
378 | clear_bit(idx, cpuc->used_hw_ctrs); | |
379 | } | |
380 | ||
381 | static int pmu_event_find_cache(u64 config) | |
382 | { | |
383 | unsigned int cache_type, cache_op, cache_result, ret; | |
384 | ||
385 | cache_type = (config >> 0) & 0xff; | |
386 | if (cache_type >= PERF_COUNT_HW_CACHE_MAX) | |
387 | return -EINVAL; | |
388 | ||
389 | cache_op = (config >> 8) & 0xff; | |
390 | if (cache_op >= PERF_COUNT_HW_CACHE_OP_MAX) | |
391 | return -EINVAL; | |
392 | ||
393 | cache_result = (config >> 16) & 0xff; | |
394 | if (cache_result >= PERF_COUNT_HW_CACHE_RESULT_MAX) | |
395 | return -EINVAL; | |
396 | ||
397 | ret = pmu_cache_event_map[cache_type][cache_op][cache_result].event_idx; | |
398 | ||
399 | return ret; | |
400 | } | |
401 | ||
402 | static bool pmu_sbi_is_fw_event(struct perf_event *event) | |
403 | { | |
404 | u32 type = event->attr.type; | |
405 | u64 config = event->attr.config; | |
406 | ||
407 | if ((type == PERF_TYPE_RAW) && ((config >> 63) == 1)) | |
408 | return true; | |
409 | else | |
410 | return false; | |
411 | } | |
412 | ||
413 | static int pmu_sbi_event_map(struct perf_event *event, u64 *econfig) | |
414 | { | |
415 | u32 type = event->attr.type; | |
416 | u64 config = event->attr.config; | |
417 | int bSoftware; | |
418 | u64 raw_config_val; | |
419 | int ret; | |
420 | ||
421 | switch (type) { | |
422 | case PERF_TYPE_HARDWARE: | |
423 | if (config >= PERF_COUNT_HW_MAX) | |
424 | return -EINVAL; | |
425 | ret = pmu_hw_event_map[event->attr.config].event_idx; | |
426 | break; | |
427 | case PERF_TYPE_HW_CACHE: | |
428 | ret = pmu_event_find_cache(config); | |
429 | break; | |
430 | case PERF_TYPE_RAW: | |
431 | /* | |
432 | * As per SBI specification, the upper 16 bits must be unused for | |
433 | * a raw event. Use the MSB (63b) to distinguish between hardware | |
434 | * raw event and firmware events. | |
435 | */ | |
436 | bSoftware = config >> 63; | |
437 | raw_config_val = config & RISCV_PMU_RAW_EVENT_MASK; | |
438 | if (bSoftware) { | |
9f828bc3 MC |
439 | ret = (raw_config_val & 0xFFFF) | |
440 | (SBI_PMU_EVENT_TYPE_FW << 16); | |
e9991434 AP |
441 | } else { |
442 | ret = RISCV_PMU_RAW_EVENT_IDX; | |
443 | *econfig = raw_config_val; | |
444 | } | |
445 | break; | |
446 | default: | |
447 | ret = -EINVAL; | |
448 | break; | |
449 | } | |
450 | ||
451 | return ret; | |
452 | } | |
453 | ||
454 | static u64 pmu_sbi_ctr_read(struct perf_event *event) | |
455 | { | |
456 | struct hw_perf_event *hwc = &event->hw; | |
457 | int idx = hwc->idx; | |
458 | struct sbiret ret; | |
459 | union sbi_pmu_ctr_info info; | |
460 | u64 val = 0; | |
461 | ||
462 | if (pmu_sbi_is_fw_event(event)) { | |
463 | ret = sbi_ecall(SBI_EXT_PMU, SBI_EXT_PMU_COUNTER_FW_READ, | |
464 | hwc->idx, 0, 0, 0, 0, 0); | |
465 | if (!ret.error) | |
466 | val = ret.value; | |
467 | } else { | |
468 | info = pmu_ctr_list[idx]; | |
469 | val = riscv_pmu_ctr_read_csr(info.csr); | |
470 | if (IS_ENABLED(CONFIG_32BIT)) | |
471 | val = ((u64)riscv_pmu_ctr_read_csr(info.csr + 0x80)) << 31 | val; | |
472 | } | |
473 | ||
474 | return val; | |
475 | } | |
476 | ||
477 | static void pmu_sbi_ctr_start(struct perf_event *event, u64 ival) | |
478 | { | |
479 | struct sbiret ret; | |
480 | struct hw_perf_event *hwc = &event->hw; | |
481 | unsigned long flag = SBI_PMU_START_FLAG_SET_INIT_VALUE; | |
482 | ||
0209b583 | 483 | #if defined(CONFIG_32BIT) |
e9991434 AP |
484 | ret = sbi_ecall(SBI_EXT_PMU, SBI_EXT_PMU_COUNTER_START, hwc->idx, |
485 | 1, flag, ival, ival >> 32, 0); | |
0209b583 AP |
486 | #else |
487 | ret = sbi_ecall(SBI_EXT_PMU, SBI_EXT_PMU_COUNTER_START, hwc->idx, | |
488 | 1, flag, ival, 0, 0); | |
489 | #endif | |
e9991434 AP |
490 | if (ret.error && (ret.error != SBI_ERR_ALREADY_STARTED)) |
491 | pr_err("Starting counter idx %d failed with error %d\n", | |
492 | hwc->idx, sbi_err_map_linux_errno(ret.error)); | |
493 | } | |
494 | ||
495 | static void pmu_sbi_ctr_stop(struct perf_event *event, unsigned long flag) | |
496 | { | |
497 | struct sbiret ret; | |
498 | struct hw_perf_event *hwc = &event->hw; | |
499 | ||
500 | ret = sbi_ecall(SBI_EXT_PMU, SBI_EXT_PMU_COUNTER_STOP, hwc->idx, 1, flag, 0, 0, 0); | |
501 | if (ret.error && (ret.error != SBI_ERR_ALREADY_STOPPED) && | |
502 | flag != SBI_PMU_STOP_FLAG_RESET) | |
503 | pr_err("Stopping counter idx %d failed with error %d\n", | |
504 | hwc->idx, sbi_err_map_linux_errno(ret.error)); | |
505 | } | |
506 | ||
507 | static int pmu_sbi_find_num_ctrs(void) | |
508 | { | |
509 | struct sbiret ret; | |
510 | ||
511 | ret = sbi_ecall(SBI_EXT_PMU, SBI_EXT_PMU_NUM_COUNTERS, 0, 0, 0, 0, 0, 0); | |
512 | if (!ret.error) | |
513 | return ret.value; | |
514 | else | |
515 | return sbi_err_map_linux_errno(ret.error); | |
516 | } | |
517 | ||
1537bf26 | 518 | static int pmu_sbi_get_ctrinfo(int nctr, unsigned long *mask) |
e9991434 AP |
519 | { |
520 | struct sbiret ret; | |
521 | int i, num_hw_ctr = 0, num_fw_ctr = 0; | |
522 | union sbi_pmu_ctr_info cinfo; | |
523 | ||
524 | pmu_ctr_list = kcalloc(nctr, sizeof(*pmu_ctr_list), GFP_KERNEL); | |
525 | if (!pmu_ctr_list) | |
526 | return -ENOMEM; | |
527 | ||
20e0fbab | 528 | for (i = 0; i < nctr; i++) { |
e9991434 AP |
529 | ret = sbi_ecall(SBI_EXT_PMU, SBI_EXT_PMU_COUNTER_GET_INFO, i, 0, 0, 0, 0, 0); |
530 | if (ret.error) | |
531 | /* The logical counter ids are not expected to be contiguous */ | |
532 | continue; | |
1537bf26 SM |
533 | |
534 | *mask |= BIT(i); | |
535 | ||
e9991434 AP |
536 | cinfo.value = ret.value; |
537 | if (cinfo.type == SBI_PMU_CTR_TYPE_FW) | |
538 | num_fw_ctr++; | |
539 | else | |
540 | num_hw_ctr++; | |
541 | pmu_ctr_list[i].value = cinfo.value; | |
542 | } | |
543 | ||
544 | pr_info("%d firmware and %d hardware counters\n", num_fw_ctr, num_hw_ctr); | |
545 | ||
546 | return 0; | |
547 | } | |
548 | ||
4905ec2f AP |
549 | static inline void pmu_sbi_stop_all(struct riscv_pmu *pmu) |
550 | { | |
c7a9dcea | 551 | /* |
4905ec2f AP |
552 | * No need to check the error because we are disabling all the counters |
553 | * which may include counters that are not enabled yet. | |
554 | */ | |
555 | sbi_ecall(SBI_EXT_PMU, SBI_EXT_PMU_COUNTER_STOP, | |
1537bf26 | 556 | 0, pmu->cmask, 0, 0, 0, 0); |
4905ec2f AP |
557 | } |
558 | ||
559 | static inline void pmu_sbi_stop_hw_ctrs(struct riscv_pmu *pmu) | |
560 | { | |
561 | struct cpu_hw_events *cpu_hw_evt = this_cpu_ptr(pmu->hw_events); | |
562 | ||
563 | /* No need to check the error here as we can't do anything about the error */ | |
564 | sbi_ecall(SBI_EXT_PMU, SBI_EXT_PMU_COUNTER_STOP, 0, | |
565 | cpu_hw_evt->used_hw_ctrs[0], 0, 0, 0, 0); | |
566 | } | |
567 | ||
c7a9dcea | 568 | /* |
4905ec2f AP |
569 | * This function starts all the used counters in two step approach. |
570 | * Any counter that did not overflow can be start in a single step | |
571 | * while the overflowed counters need to be started with updated initialization | |
572 | * value. | |
573 | */ | |
574 | static inline void pmu_sbi_start_overflow_mask(struct riscv_pmu *pmu, | |
575 | unsigned long ctr_ovf_mask) | |
576 | { | |
577 | int idx = 0; | |
578 | struct cpu_hw_events *cpu_hw_evt = this_cpu_ptr(pmu->hw_events); | |
579 | struct perf_event *event; | |
580 | unsigned long flag = SBI_PMU_START_FLAG_SET_INIT_VALUE; | |
581 | unsigned long ctr_start_mask = 0; | |
582 | uint64_t max_period; | |
583 | struct hw_perf_event *hwc; | |
584 | u64 init_val = 0; | |
585 | ||
586 | ctr_start_mask = cpu_hw_evt->used_hw_ctrs[0] & ~ctr_ovf_mask; | |
587 | ||
588 | /* Start all the counters that did not overflow in a single shot */ | |
589 | sbi_ecall(SBI_EXT_PMU, SBI_EXT_PMU_COUNTER_START, 0, ctr_start_mask, | |
590 | 0, 0, 0, 0); | |
591 | ||
592 | /* Reinitialize and start all the counter that overflowed */ | |
593 | while (ctr_ovf_mask) { | |
594 | if (ctr_ovf_mask & 0x01) { | |
595 | event = cpu_hw_evt->events[idx]; | |
596 | hwc = &event->hw; | |
597 | max_period = riscv_pmu_ctr_get_width_mask(event); | |
598 | init_val = local64_read(&hwc->prev_count) & max_period; | |
acc1b919 AP |
599 | #if defined(CONFIG_32BIT) |
600 | sbi_ecall(SBI_EXT_PMU, SBI_EXT_PMU_COUNTER_START, idx, 1, | |
601 | flag, init_val, init_val >> 32, 0); | |
602 | #else | |
4905ec2f AP |
603 | sbi_ecall(SBI_EXT_PMU, SBI_EXT_PMU_COUNTER_START, idx, 1, |
604 | flag, init_val, 0, 0); | |
acc1b919 | 605 | #endif |
133a6d1f | 606 | perf_event_update_userpage(event); |
4905ec2f AP |
607 | } |
608 | ctr_ovf_mask = ctr_ovf_mask >> 1; | |
609 | idx++; | |
610 | } | |
611 | } | |
612 | ||
613 | static irqreturn_t pmu_sbi_ovf_handler(int irq, void *dev) | |
614 | { | |
615 | struct perf_sample_data data; | |
616 | struct pt_regs *regs; | |
617 | struct hw_perf_event *hw_evt; | |
618 | union sbi_pmu_ctr_info *info; | |
619 | int lidx, hidx, fidx; | |
620 | struct riscv_pmu *pmu; | |
621 | struct perf_event *event; | |
622 | unsigned long overflow; | |
623 | unsigned long overflowed_ctrs = 0; | |
624 | struct cpu_hw_events *cpu_hw_evt = dev; | |
096b52fd | 625 | u64 start_clock = sched_clock(); |
4905ec2f AP |
626 | |
627 | if (WARN_ON_ONCE(!cpu_hw_evt)) | |
628 | return IRQ_NONE; | |
629 | ||
630 | /* Firmware counter don't support overflow yet */ | |
631 | fidx = find_first_bit(cpu_hw_evt->used_hw_ctrs, RISCV_MAX_COUNTERS); | |
632 | event = cpu_hw_evt->events[fidx]; | |
633 | if (!event) { | |
65e9fb08 | 634 | csr_clear(CSR_SIP, BIT(riscv_pmu_irq_num)); |
4905ec2f AP |
635 | return IRQ_NONE; |
636 | } | |
637 | ||
638 | pmu = to_riscv_pmu(event->pmu); | |
639 | pmu_sbi_stop_hw_ctrs(pmu); | |
640 | ||
641 | /* Overflow status register should only be read after counter are stopped */ | |
65e9fb08 | 642 | ALT_SBI_PMU_OVERFLOW(overflow); |
4905ec2f | 643 | |
c7a9dcea | 644 | /* |
4905ec2f AP |
645 | * Overflow interrupt pending bit should only be cleared after stopping |
646 | * all the counters to avoid any race condition. | |
647 | */ | |
65e9fb08 | 648 | csr_clear(CSR_SIP, BIT(riscv_pmu_irq_num)); |
4905ec2f AP |
649 | |
650 | /* No overflow bit is set */ | |
651 | if (!overflow) | |
652 | return IRQ_NONE; | |
653 | ||
654 | regs = get_irq_regs(); | |
655 | ||
656 | for_each_set_bit(lidx, cpu_hw_evt->used_hw_ctrs, RISCV_MAX_COUNTERS) { | |
657 | struct perf_event *event = cpu_hw_evt->events[lidx]; | |
658 | ||
659 | /* Skip if invalid event or user did not request a sampling */ | |
660 | if (!event || !is_sampling_event(event)) | |
661 | continue; | |
662 | ||
663 | info = &pmu_ctr_list[lidx]; | |
664 | /* Do a sanity check */ | |
665 | if (!info || info->type != SBI_PMU_CTR_TYPE_HW) | |
666 | continue; | |
667 | ||
668 | /* compute hardware counter index */ | |
669 | hidx = info->csr - CSR_CYCLE; | |
670 | /* check if the corresponding bit is set in sscountovf */ | |
671 | if (!(overflow & (1 << hidx))) | |
672 | continue; | |
673 | ||
674 | /* | |
675 | * Keep a track of overflowed counters so that they can be started | |
676 | * with updated initial value. | |
677 | */ | |
678 | overflowed_ctrs |= 1 << lidx; | |
679 | hw_evt = &event->hw; | |
680 | riscv_pmu_event_update(event); | |
681 | perf_sample_data_init(&data, 0, hw_evt->last_period); | |
682 | if (riscv_pmu_event_set_period(event)) { | |
683 | /* | |
684 | * Unlike other ISAs, RISC-V don't have to disable interrupts | |
685 | * to avoid throttling here. As per the specification, the | |
686 | * interrupt remains disabled until the OF bit is set. | |
687 | * Interrupts are enabled again only during the start. | |
688 | * TODO: We will need to stop the guest counters once | |
689 | * virtualization support is added. | |
690 | */ | |
691 | perf_event_overflow(event, &data, regs); | |
692 | } | |
693 | } | |
096b52fd | 694 | |
4905ec2f | 695 | pmu_sbi_start_overflow_mask(pmu, overflowed_ctrs); |
096b52fd | 696 | perf_sample_event_took(sched_clock() - start_clock); |
4905ec2f AP |
697 | |
698 | return IRQ_HANDLED; | |
699 | } | |
700 | ||
e9991434 AP |
701 | static int pmu_sbi_starting_cpu(unsigned int cpu, struct hlist_node *node) |
702 | { | |
703 | struct riscv_pmu *pmu = hlist_entry_safe(node, struct riscv_pmu, node); | |
4905ec2f | 704 | struct cpu_hw_events *cpu_hw_evt = this_cpu_ptr(pmu->hw_events); |
e9991434 | 705 | |
5a5294fb PD |
706 | /* |
707 | * Enable the access for CYCLE, TIME, and INSTRET CSRs from userspace, | |
708 | * as is necessary to maintain uABI compatibility. | |
709 | */ | |
710 | csr_write(CSR_SCOUNTEREN, 0x7); | |
e9991434 AP |
711 | |
712 | /* Stop all the counters so that they can be enabled from perf */ | |
4905ec2f AP |
713 | pmu_sbi_stop_all(pmu); |
714 | ||
65e9fb08 | 715 | if (riscv_pmu_use_irq) { |
4905ec2f | 716 | cpu_hw_evt->irq = riscv_pmu_irq; |
65e9fb08 HS |
717 | csr_clear(CSR_IP, BIT(riscv_pmu_irq_num)); |
718 | csr_set(CSR_IE, BIT(riscv_pmu_irq_num)); | |
4905ec2f AP |
719 | enable_percpu_irq(riscv_pmu_irq, IRQ_TYPE_NONE); |
720 | } | |
e9991434 AP |
721 | |
722 | return 0; | |
723 | } | |
724 | ||
725 | static int pmu_sbi_dying_cpu(unsigned int cpu, struct hlist_node *node) | |
726 | { | |
65e9fb08 | 727 | if (riscv_pmu_use_irq) { |
4905ec2f | 728 | disable_percpu_irq(riscv_pmu_irq); |
65e9fb08 | 729 | csr_clear(CSR_IE, BIT(riscv_pmu_irq_num)); |
4905ec2f AP |
730 | } |
731 | ||
e9991434 AP |
732 | /* Disable all counters access for user mode now */ |
733 | csr_write(CSR_SCOUNTEREN, 0x0); | |
734 | ||
735 | return 0; | |
736 | } | |
737 | ||
4905ec2f AP |
738 | static int pmu_sbi_setup_irqs(struct riscv_pmu *pmu, struct platform_device *pdev) |
739 | { | |
740 | int ret; | |
741 | struct cpu_hw_events __percpu *hw_events = pmu->hw_events; | |
742 | struct device_node *cpu, *child; | |
743 | struct irq_domain *domain = NULL; | |
744 | ||
65e9fb08 HS |
745 | if (riscv_isa_extension_available(NULL, SSCOFPMF)) { |
746 | riscv_pmu_irq_num = RV_IRQ_PMU; | |
747 | riscv_pmu_use_irq = true; | |
748 | } else if (IS_ENABLED(CONFIG_ERRATA_THEAD_PMU) && | |
749 | riscv_cached_mvendorid(0) == THEAD_VENDOR_ID && | |
750 | riscv_cached_marchid(0) == 0 && | |
751 | riscv_cached_mimpid(0) == 0) { | |
752 | riscv_pmu_irq_num = THEAD_C9XX_RV_IRQ_PMU; | |
753 | riscv_pmu_use_irq = true; | |
754 | } | |
755 | ||
756 | if (!riscv_pmu_use_irq) | |
4905ec2f AP |
757 | return -EOPNOTSUPP; |
758 | ||
759 | for_each_of_cpu_node(cpu) { | |
760 | child = of_get_compatible_child(cpu, "riscv,cpu-intc"); | |
761 | if (!child) { | |
762 | pr_err("Failed to find INTC node\n"); | |
491f10d0 | 763 | of_node_put(cpu); |
4905ec2f AP |
764 | return -ENODEV; |
765 | } | |
766 | domain = irq_find_host(child); | |
767 | of_node_put(child); | |
491f10d0 LH |
768 | if (domain) { |
769 | of_node_put(cpu); | |
4905ec2f | 770 | break; |
491f10d0 | 771 | } |
4905ec2f AP |
772 | } |
773 | if (!domain) { | |
774 | pr_err("Failed to find INTC IRQ root domain\n"); | |
775 | return -ENODEV; | |
776 | } | |
777 | ||
65e9fb08 | 778 | riscv_pmu_irq = irq_create_mapping(domain, riscv_pmu_irq_num); |
4905ec2f AP |
779 | if (!riscv_pmu_irq) { |
780 | pr_err("Failed to map PMU interrupt for node\n"); | |
781 | return -ENODEV; | |
782 | } | |
783 | ||
784 | ret = request_percpu_irq(riscv_pmu_irq, pmu_sbi_ovf_handler, "riscv-pmu", hw_events); | |
785 | if (ret) { | |
786 | pr_err("registering percpu irq failed [%d]\n", ret); | |
787 | return ret; | |
788 | } | |
789 | ||
790 | return 0; | |
791 | } | |
792 | ||
e9a023f2 EL |
793 | #ifdef CONFIG_CPU_PM |
794 | static int riscv_pm_pmu_notify(struct notifier_block *b, unsigned long cmd, | |
795 | void *v) | |
796 | { | |
797 | struct riscv_pmu *rvpmu = container_of(b, struct riscv_pmu, riscv_pm_nb); | |
798 | struct cpu_hw_events *cpuc = this_cpu_ptr(rvpmu->hw_events); | |
799 | int enabled = bitmap_weight(cpuc->used_hw_ctrs, RISCV_MAX_COUNTERS); | |
800 | struct perf_event *event; | |
801 | int idx; | |
802 | ||
803 | if (!enabled) | |
804 | return NOTIFY_OK; | |
805 | ||
806 | for (idx = 0; idx < RISCV_MAX_COUNTERS; idx++) { | |
807 | event = cpuc->events[idx]; | |
808 | if (!event) | |
809 | continue; | |
810 | ||
811 | switch (cmd) { | |
812 | case CPU_PM_ENTER: | |
813 | /* | |
814 | * Stop and update the counter | |
815 | */ | |
816 | riscv_pmu_stop(event, PERF_EF_UPDATE); | |
817 | break; | |
818 | case CPU_PM_EXIT: | |
819 | case CPU_PM_ENTER_FAILED: | |
820 | /* | |
821 | * Restore and enable the counter. | |
e9a023f2 | 822 | */ |
1c38b061 | 823 | riscv_pmu_start(event, PERF_EF_RELOAD); |
e9a023f2 EL |
824 | break; |
825 | default: | |
826 | break; | |
827 | } | |
828 | } | |
829 | ||
830 | return NOTIFY_OK; | |
831 | } | |
832 | ||
833 | static int riscv_pm_pmu_register(struct riscv_pmu *pmu) | |
834 | { | |
835 | pmu->riscv_pm_nb.notifier_call = riscv_pm_pmu_notify; | |
836 | return cpu_pm_register_notifier(&pmu->riscv_pm_nb); | |
837 | } | |
838 | ||
839 | static void riscv_pm_pmu_unregister(struct riscv_pmu *pmu) | |
840 | { | |
841 | cpu_pm_unregister_notifier(&pmu->riscv_pm_nb); | |
842 | } | |
843 | #else | |
844 | static inline int riscv_pm_pmu_register(struct riscv_pmu *pmu) { return 0; } | |
845 | static inline void riscv_pm_pmu_unregister(struct riscv_pmu *pmu) { } | |
846 | #endif | |
847 | ||
848 | static void riscv_pmu_destroy(struct riscv_pmu *pmu) | |
849 | { | |
850 | riscv_pm_pmu_unregister(pmu); | |
851 | cpuhp_state_remove_instance(CPUHP_AP_PERF_RISCV_STARTING, &pmu->node); | |
852 | } | |
853 | ||
e9991434 AP |
854 | static int pmu_sbi_device_probe(struct platform_device *pdev) |
855 | { | |
856 | struct riscv_pmu *pmu = NULL; | |
4905ec2f | 857 | int ret = -ENODEV; |
1537bf26 | 858 | int num_counters; |
e9991434 AP |
859 | |
860 | pr_info("SBI PMU extension is available\n"); | |
861 | pmu = riscv_pmu_alloc(); | |
862 | if (!pmu) | |
863 | return -ENOMEM; | |
864 | ||
865 | num_counters = pmu_sbi_find_num_ctrs(); | |
866 | if (num_counters < 0) { | |
867 | pr_err("SBI PMU extension doesn't provide any counters\n"); | |
4905ec2f | 868 | goto out_free; |
e9991434 AP |
869 | } |
870 | ||
871 | /* cache all the information about counters now */ | |
1537bf26 | 872 | if (pmu_sbi_get_ctrinfo(num_counters, &cmask)) |
4905ec2f | 873 | goto out_free; |
e9991434 | 874 | |
4905ec2f AP |
875 | ret = pmu_sbi_setup_irqs(pmu, pdev); |
876 | if (ret < 0) { | |
877 | pr_info("Perf sampling/filtering is not supported as sscof extension is not available\n"); | |
878 | pmu->pmu.capabilities |= PERF_PMU_CAP_NO_INTERRUPT; | |
879 | pmu->pmu.capabilities |= PERF_PMU_CAP_NO_EXCLUDE; | |
880 | } | |
1537bf26 | 881 | |
26fabd6d | 882 | pmu->pmu.attr_groups = riscv_pmu_attr_groups; |
1537bf26 | 883 | pmu->cmask = cmask; |
e9991434 AP |
884 | pmu->ctr_start = pmu_sbi_ctr_start; |
885 | pmu->ctr_stop = pmu_sbi_ctr_stop; | |
886 | pmu->event_map = pmu_sbi_event_map; | |
887 | pmu->ctr_get_idx = pmu_sbi_ctr_get_idx; | |
888 | pmu->ctr_get_width = pmu_sbi_ctr_get_width; | |
889 | pmu->ctr_clear_idx = pmu_sbi_ctr_clear_idx; | |
890 | pmu->ctr_read = pmu_sbi_ctr_read; | |
891 | ||
892 | ret = cpuhp_state_add_instance(CPUHP_AP_PERF_RISCV_STARTING, &pmu->node); | |
893 | if (ret) | |
894 | return ret; | |
895 | ||
e9a023f2 EL |
896 | ret = riscv_pm_pmu_register(pmu); |
897 | if (ret) | |
898 | goto out_unregister; | |
899 | ||
e9991434 | 900 | ret = perf_pmu_register(&pmu->pmu, "cpu", PERF_TYPE_RAW); |
e9a023f2 EL |
901 | if (ret) |
902 | goto out_unregister; | |
e9991434 AP |
903 | |
904 | return 0; | |
4905ec2f | 905 | |
e9a023f2 EL |
906 | out_unregister: |
907 | riscv_pmu_destroy(pmu); | |
908 | ||
4905ec2f AP |
909 | out_free: |
910 | kfree(pmu); | |
911 | return ret; | |
e9991434 AP |
912 | } |
913 | ||
914 | static struct platform_driver pmu_sbi_driver = { | |
915 | .probe = pmu_sbi_device_probe, | |
916 | .driver = { | |
917 | .name = RISCV_PMU_PDEV_NAME, | |
918 | }, | |
919 | }; | |
920 | ||
921 | static int __init pmu_sbi_devinit(void) | |
922 | { | |
923 | int ret; | |
924 | struct platform_device *pdev; | |
925 | ||
926 | if (sbi_spec_version < sbi_mk_version(0, 3) || | |
41cad828 | 927 | !sbi_probe_extension(SBI_EXT_PMU)) { |
e9991434 AP |
928 | return 0; |
929 | } | |
930 | ||
931 | ret = cpuhp_setup_state_multi(CPUHP_AP_PERF_RISCV_STARTING, | |
932 | "perf/riscv/pmu:starting", | |
933 | pmu_sbi_starting_cpu, pmu_sbi_dying_cpu); | |
934 | if (ret) { | |
935 | pr_err("CPU hotplug notifier could not be registered: %d\n", | |
936 | ret); | |
937 | return ret; | |
938 | } | |
939 | ||
940 | ret = platform_driver_register(&pmu_sbi_driver); | |
941 | if (ret) | |
942 | return ret; | |
943 | ||
944 | pdev = platform_device_register_simple(RISCV_PMU_PDEV_NAME, -1, NULL, 0); | |
945 | if (IS_ERR(pdev)) { | |
946 | platform_driver_unregister(&pmu_sbi_driver); | |
947 | return PTR_ERR(pdev); | |
948 | } | |
949 | ||
950 | /* Notify legacy implementation that SBI pmu is available*/ | |
951 | riscv_pmu_legacy_skip_init(); | |
952 | ||
953 | return ret; | |
954 | } | |
955 | device_initcall(pmu_sbi_devinit) |