Commit | Line | Data |
---|---|---|
8404b0fb QL |
1 | // SPDX-License-Identifier: GPL-2.0-only |
2 | /* | |
3 | * This driver adds support for PCIe PMU RCiEP device. Related | |
4 | * perf events are bandwidth, latency etc. | |
5 | * | |
6 | * Copyright (C) 2021 HiSilicon Limited | |
7 | * Author: Qi Liu <liuqi115@huawei.com> | |
8 | */ | |
9 | #include <linux/bitfield.h> | |
10 | #include <linux/bitmap.h> | |
11 | #include <linux/bug.h> | |
12 | #include <linux/device.h> | |
13 | #include <linux/err.h> | |
14 | #include <linux/interrupt.h> | |
15 | #include <linux/irq.h> | |
16 | #include <linux/kernel.h> | |
17 | #include <linux/list.h> | |
18 | #include <linux/module.h> | |
19 | #include <linux/pci.h> | |
20 | #include <linux/perf_event.h> | |
21 | ||
22 | #define DRV_NAME "hisi_pcie_pmu" | |
23 | /* Define registers */ | |
24 | #define HISI_PCIE_GLOBAL_CTRL 0x00 | |
25 | #define HISI_PCIE_EVENT_CTRL 0x010 | |
26 | #define HISI_PCIE_CNT 0x090 | |
27 | #define HISI_PCIE_EXT_CNT 0x110 | |
28 | #define HISI_PCIE_INT_STAT 0x150 | |
29 | #define HISI_PCIE_INT_MASK 0x154 | |
30 | #define HISI_PCIE_REG_BDF 0xfe0 | |
31 | #define HISI_PCIE_REG_VERSION 0xfe4 | |
32 | #define HISI_PCIE_REG_INFO 0xfe8 | |
33 | ||
34 | /* Define command in HISI_PCIE_GLOBAL_CTRL */ | |
35 | #define HISI_PCIE_GLOBAL_EN 0x01 | |
36 | #define HISI_PCIE_GLOBAL_NONE 0 | |
37 | ||
38 | /* Define command in HISI_PCIE_EVENT_CTRL */ | |
39 | #define HISI_PCIE_EVENT_EN BIT_ULL(20) | |
40 | #define HISI_PCIE_RESET_CNT BIT_ULL(22) | |
41 | #define HISI_PCIE_INIT_SET BIT_ULL(34) | |
42 | #define HISI_PCIE_THR_EN BIT_ULL(26) | |
43 | #define HISI_PCIE_TARGET_EN BIT_ULL(32) | |
44 | #define HISI_PCIE_TRIG_EN BIT_ULL(52) | |
45 | ||
46 | /* Define offsets in HISI_PCIE_EVENT_CTRL */ | |
47 | #define HISI_PCIE_EVENT_M GENMASK_ULL(15, 0) | |
48 | #define HISI_PCIE_THR_MODE_M GENMASK_ULL(27, 27) | |
49 | #define HISI_PCIE_THR_M GENMASK_ULL(31, 28) | |
17d57398 | 50 | #define HISI_PCIE_LEN_M GENMASK_ULL(35, 34) |
8404b0fb QL |
51 | #define HISI_PCIE_TARGET_M GENMASK_ULL(52, 36) |
52 | #define HISI_PCIE_TRIG_MODE_M GENMASK_ULL(53, 53) | |
53 | #define HISI_PCIE_TRIG_M GENMASK_ULL(59, 56) | |
54 | ||
17d57398 YY |
55 | /* Default config of TLP length mode, will count both TLP headers and payloads */ |
56 | #define HISI_PCIE_LEN_M_DEFAULT 3ULL | |
57 | ||
8404b0fb QL |
58 | #define HISI_PCIE_MAX_COUNTERS 8 |
59 | #define HISI_PCIE_REG_STEP 8 | |
60 | #define HISI_PCIE_THR_MAX_VAL 10 | |
61 | #define HISI_PCIE_TRIG_MAX_VAL 10 | |
62 | #define HISI_PCIE_MAX_PERIOD (GENMASK_ULL(63, 0)) | |
63 | #define HISI_PCIE_INIT_VAL BIT_ULL(63) | |
64 | ||
65 | struct hisi_pcie_pmu { | |
66 | struct perf_event *hw_events[HISI_PCIE_MAX_COUNTERS]; | |
67 | struct hlist_node node; | |
68 | struct pci_dev *pdev; | |
69 | struct pmu pmu; | |
70 | void __iomem *base; | |
71 | int irq; | |
72 | u32 identifier; | |
73 | /* Minimum and maximum BDF of root ports monitored by PMU */ | |
74 | u16 bdf_min; | |
75 | u16 bdf_max; | |
76 | int on_cpu; | |
77 | }; | |
78 | ||
79 | struct hisi_pcie_reg_pair { | |
80 | u16 lo; | |
81 | u16 hi; | |
82 | }; | |
83 | ||
84 | #define to_pcie_pmu(p) (container_of((p), struct hisi_pcie_pmu, pmu)) | |
85 | #define GET_PCI_DEVFN(bdf) ((bdf) & 0xff) | |
86 | ||
87 | #define HISI_PCIE_PMU_FILTER_ATTR(_name, _config, _hi, _lo) \ | |
88 | static u64 hisi_pcie_get_##_name(struct perf_event *event) \ | |
89 | { \ | |
90 | return FIELD_GET(GENMASK(_hi, _lo), event->attr._config); \ | |
91 | } \ | |
92 | ||
93 | HISI_PCIE_PMU_FILTER_ATTR(event, config, 16, 0); | |
94 | HISI_PCIE_PMU_FILTER_ATTR(thr_len, config1, 3, 0); | |
95 | HISI_PCIE_PMU_FILTER_ATTR(thr_mode, config1, 4, 4); | |
96 | HISI_PCIE_PMU_FILTER_ATTR(trig_len, config1, 8, 5); | |
97 | HISI_PCIE_PMU_FILTER_ATTR(trig_mode, config1, 9, 9); | |
17d57398 | 98 | HISI_PCIE_PMU_FILTER_ATTR(len_mode, config1, 11, 10); |
8404b0fb QL |
99 | HISI_PCIE_PMU_FILTER_ATTR(port, config2, 15, 0); |
100 | HISI_PCIE_PMU_FILTER_ATTR(bdf, config2, 31, 16); | |
101 | ||
102 | static ssize_t hisi_pcie_format_sysfs_show(struct device *dev, struct device_attribute *attr, | |
103 | char *buf) | |
104 | { | |
105 | struct dev_ext_attribute *eattr; | |
106 | ||
107 | eattr = container_of(attr, struct dev_ext_attribute, attr); | |
108 | ||
109 | return sysfs_emit(buf, "%s\n", (char *)eattr->var); | |
110 | } | |
111 | ||
112 | static ssize_t hisi_pcie_event_sysfs_show(struct device *dev, struct device_attribute *attr, | |
113 | char *buf) | |
114 | { | |
115 | struct perf_pmu_events_attr *pmu_attr = | |
116 | container_of(attr, struct perf_pmu_events_attr, attr); | |
117 | ||
118 | return sysfs_emit(buf, "config=0x%llx\n", pmu_attr->id); | |
119 | } | |
120 | ||
121 | #define HISI_PCIE_PMU_FORMAT_ATTR(_name, _format) \ | |
122 | (&((struct dev_ext_attribute[]){ \ | |
123 | { .attr = __ATTR(_name, 0444, hisi_pcie_format_sysfs_show, \ | |
124 | NULL), \ | |
125 | .var = (void *)_format } \ | |
126 | })[0].attr.attr) | |
127 | ||
128 | #define HISI_PCIE_PMU_EVENT_ATTR(_name, _id) \ | |
129 | PMU_EVENT_ATTR_ID(_name, hisi_pcie_event_sysfs_show, _id) | |
130 | ||
131 | static ssize_t cpumask_show(struct device *dev, struct device_attribute *attr, char *buf) | |
132 | { | |
133 | struct hisi_pcie_pmu *pcie_pmu = to_pcie_pmu(dev_get_drvdata(dev)); | |
134 | ||
135 | return cpumap_print_to_pagebuf(true, buf, cpumask_of(pcie_pmu->on_cpu)); | |
136 | } | |
137 | static DEVICE_ATTR_RO(cpumask); | |
138 | ||
139 | static ssize_t identifier_show(struct device *dev, struct device_attribute *attr, char *buf) | |
140 | { | |
141 | struct hisi_pcie_pmu *pcie_pmu = to_pcie_pmu(dev_get_drvdata(dev)); | |
142 | ||
143 | return sysfs_emit(buf, "%#x\n", pcie_pmu->identifier); | |
144 | } | |
145 | static DEVICE_ATTR_RO(identifier); | |
146 | ||
147 | static ssize_t bus_show(struct device *dev, struct device_attribute *attr, char *buf) | |
148 | { | |
149 | struct hisi_pcie_pmu *pcie_pmu = to_pcie_pmu(dev_get_drvdata(dev)); | |
150 | ||
151 | return sysfs_emit(buf, "%#04x\n", PCI_BUS_NUM(pcie_pmu->bdf_min)); | |
152 | } | |
153 | static DEVICE_ATTR_RO(bus); | |
154 | ||
155 | static struct hisi_pcie_reg_pair | |
156 | hisi_pcie_parse_reg_value(struct hisi_pcie_pmu *pcie_pmu, u32 reg_off) | |
157 | { | |
158 | u32 val = readl_relaxed(pcie_pmu->base + reg_off); | |
159 | struct hisi_pcie_reg_pair regs = { | |
160 | .lo = val, | |
161 | .hi = val >> 16, | |
162 | }; | |
163 | ||
164 | return regs; | |
165 | } | |
166 | ||
167 | /* | |
168 | * Hardware counter and ext_counter work together for bandwidth, latency, bus | |
169 | * utilization and buffer occupancy events. For example, RX memory write latency | |
170 | * events(index = 0x0010), counter counts total delay cycles and ext_counter | |
171 | * counts RX memory write PCIe packets number. | |
172 | * | |
173 | * As we don't want PMU driver to process these two data, "delay cycles" can | |
174 | * be treated as an independent event(index = 0x0010), "RX memory write packets | |
175 | * number" as another(index = 0x10010). BIT 16 is used to distinguish and 0-15 | |
176 | * bits are "real" event index, which can be used to set HISI_PCIE_EVENT_CTRL. | |
177 | */ | |
178 | #define EXT_COUNTER_IS_USED(idx) ((idx) & BIT(16)) | |
179 | ||
180 | static u32 hisi_pcie_get_real_event(struct perf_event *event) | |
181 | { | |
182 | return hisi_pcie_get_event(event) & GENMASK(15, 0); | |
183 | } | |
184 | ||
185 | static u32 hisi_pcie_pmu_get_offset(u32 offset, u32 idx) | |
186 | { | |
187 | return offset + HISI_PCIE_REG_STEP * idx; | |
188 | } | |
189 | ||
190 | static u32 hisi_pcie_pmu_readl(struct hisi_pcie_pmu *pcie_pmu, u32 reg_offset, | |
191 | u32 idx) | |
192 | { | |
193 | u32 offset = hisi_pcie_pmu_get_offset(reg_offset, idx); | |
194 | ||
195 | return readl_relaxed(pcie_pmu->base + offset); | |
196 | } | |
197 | ||
198 | static void hisi_pcie_pmu_writel(struct hisi_pcie_pmu *pcie_pmu, u32 reg_offset, u32 idx, u32 val) | |
199 | { | |
200 | u32 offset = hisi_pcie_pmu_get_offset(reg_offset, idx); | |
201 | ||
202 | writel_relaxed(val, pcie_pmu->base + offset); | |
203 | } | |
204 | ||
205 | static u64 hisi_pcie_pmu_readq(struct hisi_pcie_pmu *pcie_pmu, u32 reg_offset, u32 idx) | |
206 | { | |
207 | u32 offset = hisi_pcie_pmu_get_offset(reg_offset, idx); | |
208 | ||
209 | return readq_relaxed(pcie_pmu->base + offset); | |
210 | } | |
211 | ||
212 | static void hisi_pcie_pmu_writeq(struct hisi_pcie_pmu *pcie_pmu, u32 reg_offset, u32 idx, u64 val) | |
213 | { | |
214 | u32 offset = hisi_pcie_pmu_get_offset(reg_offset, idx); | |
215 | ||
216 | writeq_relaxed(val, pcie_pmu->base + offset); | |
217 | } | |
218 | ||
4d473461 | 219 | static u64 hisi_pcie_pmu_get_event_ctrl_val(struct perf_event *event) |
8404b0fb | 220 | { |
17d57398 | 221 | u64 port, trig_len, thr_len, len_mode; |
8404b0fb | 222 | u64 reg = HISI_PCIE_INIT_SET; |
8404b0fb QL |
223 | |
224 | /* Config HISI_PCIE_EVENT_CTRL according to event. */ | |
225 | reg |= FIELD_PREP(HISI_PCIE_EVENT_M, hisi_pcie_get_real_event(event)); | |
226 | ||
227 | /* Config HISI_PCIE_EVENT_CTRL according to root port or EP device. */ | |
228 | port = hisi_pcie_get_port(event); | |
229 | if (port) | |
230 | reg |= FIELD_PREP(HISI_PCIE_TARGET_M, port); | |
231 | else | |
232 | reg |= HISI_PCIE_TARGET_EN | | |
233 | FIELD_PREP(HISI_PCIE_TARGET_M, hisi_pcie_get_bdf(event)); | |
234 | ||
235 | /* Config HISI_PCIE_EVENT_CTRL according to trigger condition. */ | |
236 | trig_len = hisi_pcie_get_trig_len(event); | |
237 | if (trig_len) { | |
238 | reg |= FIELD_PREP(HISI_PCIE_TRIG_M, trig_len); | |
239 | reg |= FIELD_PREP(HISI_PCIE_TRIG_MODE_M, hisi_pcie_get_trig_mode(event)); | |
240 | reg |= HISI_PCIE_TRIG_EN; | |
241 | } | |
242 | ||
243 | /* Config HISI_PCIE_EVENT_CTRL according to threshold condition. */ | |
244 | thr_len = hisi_pcie_get_thr_len(event); | |
245 | if (thr_len) { | |
246 | reg |= FIELD_PREP(HISI_PCIE_THR_M, thr_len); | |
247 | reg |= FIELD_PREP(HISI_PCIE_THR_MODE_M, hisi_pcie_get_thr_mode(event)); | |
248 | reg |= HISI_PCIE_THR_EN; | |
249 | } | |
250 | ||
17d57398 YY |
251 | len_mode = hisi_pcie_get_len_mode(event); |
252 | if (len_mode) | |
253 | reg |= FIELD_PREP(HISI_PCIE_LEN_M, len_mode); | |
254 | else | |
255 | reg |= FIELD_PREP(HISI_PCIE_LEN_M, HISI_PCIE_LEN_M_DEFAULT); | |
256 | ||
4d473461 YY |
257 | return reg; |
258 | } | |
259 | ||
260 | static void hisi_pcie_pmu_config_event_ctrl(struct perf_event *event) | |
261 | { | |
262 | struct hisi_pcie_pmu *pcie_pmu = to_pcie_pmu(event->pmu); | |
263 | struct hw_perf_event *hwc = &event->hw; | |
264 | u64 reg = hisi_pcie_pmu_get_event_ctrl_val(event); | |
265 | ||
8404b0fb QL |
266 | hisi_pcie_pmu_writeq(pcie_pmu, HISI_PCIE_EVENT_CTRL, hwc->idx, reg); |
267 | } | |
268 | ||
54a9e47e | 269 | static void hisi_pcie_pmu_clear_event_ctrl(struct perf_event *event) |
8404b0fb QL |
270 | { |
271 | struct hisi_pcie_pmu *pcie_pmu = to_pcie_pmu(event->pmu); | |
272 | struct hw_perf_event *hwc = &event->hw; | |
273 | ||
274 | hisi_pcie_pmu_writeq(pcie_pmu, HISI_PCIE_EVENT_CTRL, hwc->idx, HISI_PCIE_INIT_SET); | |
275 | } | |
276 | ||
277 | static bool hisi_pcie_pmu_valid_requester_id(struct hisi_pcie_pmu *pcie_pmu, u32 bdf) | |
278 | { | |
279 | struct pci_dev *root_port, *pdev; | |
280 | u16 rp_bdf; | |
281 | ||
282 | pdev = pci_get_domain_bus_and_slot(pci_domain_nr(pcie_pmu->pdev->bus), PCI_BUS_NUM(bdf), | |
283 | GET_PCI_DEVFN(bdf)); | |
284 | if (!pdev) | |
285 | return false; | |
286 | ||
287 | root_port = pcie_find_root_port(pdev); | |
288 | if (!root_port) { | |
289 | pci_dev_put(pdev); | |
290 | return false; | |
291 | } | |
292 | ||
293 | pci_dev_put(pdev); | |
294 | rp_bdf = pci_dev_id(root_port); | |
295 | return rp_bdf >= pcie_pmu->bdf_min && rp_bdf <= pcie_pmu->bdf_max; | |
296 | } | |
297 | ||
298 | static bool hisi_pcie_pmu_valid_filter(struct perf_event *event, | |
299 | struct hisi_pcie_pmu *pcie_pmu) | |
300 | { | |
301 | u32 requester_id = hisi_pcie_get_bdf(event); | |
302 | ||
303 | if (hisi_pcie_get_thr_len(event) > HISI_PCIE_THR_MAX_VAL) | |
304 | return false; | |
305 | ||
306 | if (hisi_pcie_get_trig_len(event) > HISI_PCIE_TRIG_MAX_VAL) | |
307 | return false; | |
308 | ||
2f864fee JH |
309 | /* Need to explicitly set filter of "port" or "bdf" */ |
310 | if (!hisi_pcie_get_port(event) && | |
311 | !hisi_pcie_pmu_valid_requester_id(pcie_pmu, requester_id)) | |
312 | return false; | |
8404b0fb QL |
313 | |
314 | return true; | |
315 | } | |
316 | ||
b6693ad6 YY |
317 | /* |
318 | * Check Whether two events share the same config. The same config means not | |
319 | * only the event code, but also the filter settings of the two events are | |
320 | * the same. | |
321 | */ | |
8404b0fb QL |
322 | static bool hisi_pcie_pmu_cmp_event(struct perf_event *target, |
323 | struct perf_event *event) | |
324 | { | |
b6693ad6 YY |
325 | return hisi_pcie_pmu_get_event_ctrl_val(target) == |
326 | hisi_pcie_pmu_get_event_ctrl_val(event); | |
8404b0fb QL |
327 | } |
328 | ||
329 | static bool hisi_pcie_pmu_validate_event_group(struct perf_event *event) | |
330 | { | |
331 | struct perf_event *sibling, *leader = event->group_leader; | |
332 | struct perf_event *event_group[HISI_PCIE_MAX_COUNTERS]; | |
333 | int counters = 1; | |
334 | int num; | |
335 | ||
336 | event_group[0] = leader; | |
337 | if (!is_software_event(leader)) { | |
338 | if (leader->pmu != event->pmu) | |
339 | return false; | |
340 | ||
341 | if (leader != event && !hisi_pcie_pmu_cmp_event(leader, event)) | |
342 | event_group[counters++] = event; | |
343 | } | |
344 | ||
345 | for_each_sibling_event(sibling, event->group_leader) { | |
346 | if (is_software_event(sibling)) | |
347 | continue; | |
348 | ||
349 | if (sibling->pmu != event->pmu) | |
350 | return false; | |
351 | ||
352 | for (num = 0; num < counters; num++) { | |
77fce826 JH |
353 | /* |
354 | * If we find a related event, then it's a valid group | |
355 | * since we don't need to allocate a new counter for it. | |
356 | */ | |
8404b0fb QL |
357 | if (hisi_pcie_pmu_cmp_event(event_group[num], sibling)) |
358 | break; | |
359 | } | |
360 | ||
77fce826 JH |
361 | /* |
362 | * Otherwise it's a new event but if there's no available counter, | |
363 | * fail the check since we cannot schedule all the events in | |
364 | * the group simultaneously. | |
365 | */ | |
366 | if (num == HISI_PCIE_MAX_COUNTERS) | |
367 | return false; | |
368 | ||
8404b0fb QL |
369 | if (num == counters) |
370 | event_group[counters++] = sibling; | |
371 | } | |
372 | ||
77fce826 | 373 | return true; |
8404b0fb QL |
374 | } |
375 | ||
376 | static int hisi_pcie_pmu_event_init(struct perf_event *event) | |
377 | { | |
378 | struct hisi_pcie_pmu *pcie_pmu = to_pcie_pmu(event->pmu); | |
379 | struct hw_perf_event *hwc = &event->hw; | |
380 | ||
6d7d51e8 YY |
381 | /* Check the type first before going on, otherwise it's not our event */ |
382 | if (event->attr.type != event->pmu->type) | |
383 | return -ENOENT; | |
384 | ||
8404b0fb QL |
385 | if (EXT_COUNTER_IS_USED(hisi_pcie_get_event(event))) |
386 | hwc->event_base = HISI_PCIE_EXT_CNT; | |
387 | else | |
388 | hwc->event_base = HISI_PCIE_CNT; | |
389 | ||
8404b0fb QL |
390 | /* Sampling is not supported. */ |
391 | if (is_sampling_event(event) || event->attach_state & PERF_ATTACH_TASK) | |
392 | return -EOPNOTSUPP; | |
393 | ||
394 | if (!hisi_pcie_pmu_valid_filter(event, pcie_pmu)) | |
395 | return -EINVAL; | |
396 | ||
397 | if (!hisi_pcie_pmu_validate_event_group(event)) | |
398 | return -EINVAL; | |
399 | ||
868f8a70 YY |
400 | event->cpu = pcie_pmu->on_cpu; |
401 | ||
8404b0fb QL |
402 | return 0; |
403 | } | |
404 | ||
405 | static u64 hisi_pcie_pmu_read_counter(struct perf_event *event) | |
406 | { | |
407 | struct hisi_pcie_pmu *pcie_pmu = to_pcie_pmu(event->pmu); | |
408 | u32 idx = event->hw.idx; | |
409 | ||
410 | return hisi_pcie_pmu_readq(pcie_pmu, event->hw.event_base, idx); | |
411 | } | |
412 | ||
7da37705 JH |
413 | /* |
414 | * Check all work events, if a relevant event is found then we return it | |
415 | * first, otherwise return the first idle counter (need to reset). | |
416 | */ | |
417 | static int hisi_pcie_pmu_get_event_idx(struct hisi_pcie_pmu *pcie_pmu, | |
418 | struct perf_event *event) | |
8404b0fb | 419 | { |
7da37705 | 420 | int first_idle = -EAGAIN; |
8404b0fb QL |
421 | struct perf_event *sibling; |
422 | int idx; | |
423 | ||
424 | for (idx = 0; idx < HISI_PCIE_MAX_COUNTERS; idx++) { | |
425 | sibling = pcie_pmu->hw_events[idx]; | |
7da37705 JH |
426 | if (!sibling) { |
427 | if (first_idle == -EAGAIN) | |
428 | first_idle = idx; | |
8404b0fb | 429 | continue; |
7da37705 | 430 | } |
8404b0fb | 431 | |
8404b0fb | 432 | /* Related events must be used in group */ |
2fbf96ed JH |
433 | if (hisi_pcie_pmu_cmp_event(sibling, event) && |
434 | sibling->group_leader == event->group_leader) | |
8404b0fb | 435 | return idx; |
8404b0fb QL |
436 | } |
437 | ||
7da37705 | 438 | return first_idle; |
8404b0fb QL |
439 | } |
440 | ||
441 | static void hisi_pcie_pmu_event_update(struct perf_event *event) | |
442 | { | |
443 | struct hw_perf_event *hwc = &event->hw; | |
444 | u64 new_cnt, prev_cnt, delta; | |
445 | ||
446 | do { | |
447 | prev_cnt = local64_read(&hwc->prev_count); | |
448 | new_cnt = hisi_pcie_pmu_read_counter(event); | |
449 | } while (local64_cmpxchg(&hwc->prev_count, prev_cnt, | |
450 | new_cnt) != prev_cnt); | |
451 | ||
452 | delta = (new_cnt - prev_cnt) & HISI_PCIE_MAX_PERIOD; | |
453 | local64_add(delta, &event->count); | |
454 | } | |
455 | ||
456 | static void hisi_pcie_pmu_read(struct perf_event *event) | |
457 | { | |
458 | hisi_pcie_pmu_event_update(event); | |
459 | } | |
460 | ||
461 | static void hisi_pcie_pmu_set_period(struct perf_event *event) | |
462 | { | |
463 | struct hisi_pcie_pmu *pcie_pmu = to_pcie_pmu(event->pmu); | |
464 | struct hw_perf_event *hwc = &event->hw; | |
465 | int idx = hwc->idx; | |
466 | ||
467 | local64_set(&hwc->prev_count, HISI_PCIE_INIT_VAL); | |
468 | hisi_pcie_pmu_writeq(pcie_pmu, HISI_PCIE_CNT, idx, HISI_PCIE_INIT_VAL); | |
469 | hisi_pcie_pmu_writeq(pcie_pmu, HISI_PCIE_EXT_CNT, idx, HISI_PCIE_INIT_VAL); | |
470 | } | |
471 | ||
472 | static void hisi_pcie_pmu_enable_counter(struct hisi_pcie_pmu *pcie_pmu, struct hw_perf_event *hwc) | |
473 | { | |
474 | u32 idx = hwc->idx; | |
475 | u64 val; | |
476 | ||
477 | val = hisi_pcie_pmu_readq(pcie_pmu, HISI_PCIE_EVENT_CTRL, idx); | |
478 | val |= HISI_PCIE_EVENT_EN; | |
479 | hisi_pcie_pmu_writeq(pcie_pmu, HISI_PCIE_EVENT_CTRL, idx, val); | |
480 | } | |
481 | ||
482 | static void hisi_pcie_pmu_disable_counter(struct hisi_pcie_pmu *pcie_pmu, struct hw_perf_event *hwc) | |
483 | { | |
484 | u32 idx = hwc->idx; | |
485 | u64 val; | |
486 | ||
487 | val = hisi_pcie_pmu_readq(pcie_pmu, HISI_PCIE_EVENT_CTRL, idx); | |
488 | val &= ~HISI_PCIE_EVENT_EN; | |
489 | hisi_pcie_pmu_writeq(pcie_pmu, HISI_PCIE_EVENT_CTRL, idx, val); | |
490 | } | |
491 | ||
492 | static void hisi_pcie_pmu_enable_int(struct hisi_pcie_pmu *pcie_pmu, struct hw_perf_event *hwc) | |
493 | { | |
494 | u32 idx = hwc->idx; | |
495 | ||
496 | hisi_pcie_pmu_writel(pcie_pmu, HISI_PCIE_INT_MASK, idx, 0); | |
497 | } | |
498 | ||
499 | static void hisi_pcie_pmu_disable_int(struct hisi_pcie_pmu *pcie_pmu, struct hw_perf_event *hwc) | |
500 | { | |
501 | u32 idx = hwc->idx; | |
502 | ||
503 | hisi_pcie_pmu_writel(pcie_pmu, HISI_PCIE_INT_MASK, idx, 1); | |
504 | } | |
505 | ||
506 | static void hisi_pcie_pmu_reset_counter(struct hisi_pcie_pmu *pcie_pmu, int idx) | |
507 | { | |
508 | hisi_pcie_pmu_writeq(pcie_pmu, HISI_PCIE_EVENT_CTRL, idx, HISI_PCIE_RESET_CNT); | |
509 | hisi_pcie_pmu_writeq(pcie_pmu, HISI_PCIE_EVENT_CTRL, idx, HISI_PCIE_INIT_SET); | |
510 | } | |
511 | ||
512 | static void hisi_pcie_pmu_start(struct perf_event *event, int flags) | |
513 | { | |
514 | struct hisi_pcie_pmu *pcie_pmu = to_pcie_pmu(event->pmu); | |
515 | struct hw_perf_event *hwc = &event->hw; | |
516 | int idx = hwc->idx; | |
517 | u64 prev_cnt; | |
518 | ||
519 | if (WARN_ON_ONCE(!(hwc->state & PERF_HES_STOPPED))) | |
520 | return; | |
521 | ||
522 | WARN_ON_ONCE(!(hwc->state & PERF_HES_UPTODATE)); | |
523 | hwc->state = 0; | |
524 | ||
54a9e47e | 525 | hisi_pcie_pmu_config_event_ctrl(event); |
8404b0fb QL |
526 | hisi_pcie_pmu_enable_counter(pcie_pmu, hwc); |
527 | hisi_pcie_pmu_enable_int(pcie_pmu, hwc); | |
528 | hisi_pcie_pmu_set_period(event); | |
529 | ||
530 | if (flags & PERF_EF_RELOAD) { | |
531 | prev_cnt = local64_read(&hwc->prev_count); | |
532 | hisi_pcie_pmu_writeq(pcie_pmu, hwc->event_base, idx, prev_cnt); | |
533 | } | |
534 | ||
535 | perf_event_update_userpage(event); | |
536 | } | |
537 | ||
538 | static void hisi_pcie_pmu_stop(struct perf_event *event, int flags) | |
539 | { | |
540 | struct hisi_pcie_pmu *pcie_pmu = to_pcie_pmu(event->pmu); | |
541 | struct hw_perf_event *hwc = &event->hw; | |
542 | ||
543 | hisi_pcie_pmu_event_update(event); | |
544 | hisi_pcie_pmu_disable_int(pcie_pmu, hwc); | |
545 | hisi_pcie_pmu_disable_counter(pcie_pmu, hwc); | |
54a9e47e | 546 | hisi_pcie_pmu_clear_event_ctrl(event); |
8404b0fb QL |
547 | WARN_ON_ONCE(hwc->state & PERF_HES_STOPPED); |
548 | hwc->state |= PERF_HES_STOPPED; | |
549 | ||
550 | if (hwc->state & PERF_HES_UPTODATE) | |
551 | return; | |
552 | ||
553 | hwc->state |= PERF_HES_UPTODATE; | |
554 | } | |
555 | ||
556 | static int hisi_pcie_pmu_add(struct perf_event *event, int flags) | |
557 | { | |
558 | struct hisi_pcie_pmu *pcie_pmu = to_pcie_pmu(event->pmu); | |
559 | struct hw_perf_event *hwc = &event->hw; | |
560 | int idx; | |
561 | ||
562 | hwc->state = PERF_HES_STOPPED | PERF_HES_UPTODATE; | |
563 | ||
7da37705 | 564 | idx = hisi_pcie_pmu_get_event_idx(pcie_pmu, event); |
8404b0fb QL |
565 | if (idx < 0) |
566 | return idx; | |
567 | ||
568 | hwc->idx = idx; | |
8404b0fb | 569 | |
7da37705 JH |
570 | /* No enabled counter found with related event, reset it */ |
571 | if (!pcie_pmu->hw_events[idx]) { | |
572 | hisi_pcie_pmu_reset_counter(pcie_pmu, idx); | |
573 | pcie_pmu->hw_events[idx] = event; | |
574 | } | |
575 | ||
8404b0fb QL |
576 | if (flags & PERF_EF_START) |
577 | hisi_pcie_pmu_start(event, PERF_EF_RELOAD); | |
578 | ||
579 | return 0; | |
580 | } | |
581 | ||
582 | static void hisi_pcie_pmu_del(struct perf_event *event, int flags) | |
583 | { | |
584 | struct hisi_pcie_pmu *pcie_pmu = to_pcie_pmu(event->pmu); | |
585 | struct hw_perf_event *hwc = &event->hw; | |
586 | ||
587 | hisi_pcie_pmu_stop(event, PERF_EF_UPDATE); | |
588 | pcie_pmu->hw_events[hwc->idx] = NULL; | |
589 | perf_event_update_userpage(event); | |
590 | } | |
591 | ||
592 | static void hisi_pcie_pmu_enable(struct pmu *pmu) | |
593 | { | |
594 | struct hisi_pcie_pmu *pcie_pmu = to_pcie_pmu(pmu); | |
595 | int num; | |
596 | ||
597 | for (num = 0; num < HISI_PCIE_MAX_COUNTERS; num++) { | |
598 | if (pcie_pmu->hw_events[num]) | |
599 | break; | |
600 | } | |
601 | ||
602 | if (num == HISI_PCIE_MAX_COUNTERS) | |
603 | return; | |
604 | ||
605 | writel(HISI_PCIE_GLOBAL_EN, pcie_pmu->base + HISI_PCIE_GLOBAL_CTRL); | |
606 | } | |
607 | ||
608 | static void hisi_pcie_pmu_disable(struct pmu *pmu) | |
609 | { | |
610 | struct hisi_pcie_pmu *pcie_pmu = to_pcie_pmu(pmu); | |
611 | ||
612 | writel(HISI_PCIE_GLOBAL_NONE, pcie_pmu->base + HISI_PCIE_GLOBAL_CTRL); | |
613 | } | |
614 | ||
615 | static irqreturn_t hisi_pcie_pmu_irq(int irq, void *data) | |
616 | { | |
617 | struct hisi_pcie_pmu *pcie_pmu = data; | |
618 | irqreturn_t ret = IRQ_NONE; | |
619 | struct perf_event *event; | |
620 | u32 overflown; | |
621 | int idx; | |
622 | ||
623 | for (idx = 0; idx < HISI_PCIE_MAX_COUNTERS; idx++) { | |
624 | overflown = hisi_pcie_pmu_readl(pcie_pmu, HISI_PCIE_INT_STAT, idx); | |
625 | if (!overflown) | |
626 | continue; | |
627 | ||
628 | /* Clear status of interrupt. */ | |
629 | hisi_pcie_pmu_writel(pcie_pmu, HISI_PCIE_INT_STAT, idx, 1); | |
630 | event = pcie_pmu->hw_events[idx]; | |
631 | if (!event) | |
632 | continue; | |
633 | ||
634 | hisi_pcie_pmu_event_update(event); | |
635 | hisi_pcie_pmu_set_period(event); | |
636 | ret = IRQ_HANDLED; | |
637 | } | |
638 | ||
639 | return ret; | |
640 | } | |
641 | ||
642 | static int hisi_pcie_pmu_irq_register(struct pci_dev *pdev, struct hisi_pcie_pmu *pcie_pmu) | |
643 | { | |
644 | int irq, ret; | |
645 | ||
646 | ret = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_MSI); | |
647 | if (ret < 0) { | |
648 | pci_err(pdev, "Failed to enable MSI vectors: %d\n", ret); | |
649 | return ret; | |
650 | } | |
651 | ||
652 | irq = pci_irq_vector(pdev, 0); | |
653 | ret = request_irq(irq, hisi_pcie_pmu_irq, IRQF_NOBALANCING | IRQF_NO_THREAD, DRV_NAME, | |
654 | pcie_pmu); | |
655 | if (ret) { | |
656 | pci_err(pdev, "Failed to register IRQ: %d\n", ret); | |
657 | pci_free_irq_vectors(pdev); | |
658 | return ret; | |
659 | } | |
660 | ||
661 | pcie_pmu->irq = irq; | |
662 | ||
663 | return 0; | |
664 | } | |
665 | ||
666 | static void hisi_pcie_pmu_irq_unregister(struct pci_dev *pdev, struct hisi_pcie_pmu *pcie_pmu) | |
667 | { | |
668 | free_irq(pcie_pmu->irq, pcie_pmu); | |
669 | pci_free_irq_vectors(pdev); | |
670 | } | |
671 | ||
672 | static int hisi_pcie_pmu_online_cpu(unsigned int cpu, struct hlist_node *node) | |
673 | { | |
674 | struct hisi_pcie_pmu *pcie_pmu = hlist_entry_safe(node, struct hisi_pcie_pmu, node); | |
675 | ||
676 | if (pcie_pmu->on_cpu == -1) { | |
83a6d80c YY |
677 | pcie_pmu->on_cpu = cpumask_local_spread(0, dev_to_node(&pcie_pmu->pdev->dev)); |
678 | WARN_ON(irq_set_affinity(pcie_pmu->irq, cpumask_of(pcie_pmu->on_cpu))); | |
8404b0fb QL |
679 | } |
680 | ||
681 | return 0; | |
682 | } | |
683 | ||
684 | static int hisi_pcie_pmu_offline_cpu(unsigned int cpu, struct hlist_node *node) | |
685 | { | |
686 | struct hisi_pcie_pmu *pcie_pmu = hlist_entry_safe(node, struct hisi_pcie_pmu, node); | |
687 | unsigned int target; | |
83a6d80c | 688 | int numa_node; |
8404b0fb QL |
689 | |
690 | /* Nothing to do if this CPU doesn't own the PMU */ | |
691 | if (pcie_pmu->on_cpu != cpu) | |
692 | return 0; | |
693 | ||
694 | pcie_pmu->on_cpu = -1; | |
83a6d80c YY |
695 | |
696 | /* Choose a local CPU from all online cpus. */ | |
697 | numa_node = dev_to_node(&pcie_pmu->pdev->dev); | |
d7df79e6 DL |
698 | |
699 | target = cpumask_any_and_but(cpumask_of_node(numa_node), | |
700 | cpu_online_mask, cpu); | |
701 | if (target >= nr_cpu_ids) | |
83a6d80c YY |
702 | target = cpumask_any_but(cpu_online_mask, cpu); |
703 | ||
8404b0fb QL |
704 | if (target >= nr_cpu_ids) { |
705 | pci_err(pcie_pmu->pdev, "There is no CPU to set\n"); | |
706 | return 0; | |
707 | } | |
708 | ||
709 | perf_pmu_migrate_context(&pcie_pmu->pmu, cpu, target); | |
710 | /* Use this CPU for event counting */ | |
711 | pcie_pmu->on_cpu = target; | |
712 | WARN_ON(irq_set_affinity(pcie_pmu->irq, cpumask_of(target))); | |
713 | ||
714 | return 0; | |
715 | } | |
716 | ||
717 | static struct attribute *hisi_pcie_pmu_events_attr[] = { | |
718 | HISI_PCIE_PMU_EVENT_ATTR(rx_mwr_latency, 0x0010), | |
719 | HISI_PCIE_PMU_EVENT_ATTR(rx_mwr_cnt, 0x10010), | |
720 | HISI_PCIE_PMU_EVENT_ATTR(rx_mrd_latency, 0x0210), | |
721 | HISI_PCIE_PMU_EVENT_ATTR(rx_mrd_cnt, 0x10210), | |
722 | HISI_PCIE_PMU_EVENT_ATTR(tx_mrd_latency, 0x0011), | |
723 | HISI_PCIE_PMU_EVENT_ATTR(tx_mrd_cnt, 0x10011), | |
00ca69b8 YY |
724 | HISI_PCIE_PMU_EVENT_ATTR(rx_mwr_flux, 0x0104), |
725 | HISI_PCIE_PMU_EVENT_ATTR(rx_mwr_time, 0x10104), | |
6b4bb4f3 YY |
726 | HISI_PCIE_PMU_EVENT_ATTR(rx_mrd_flux, 0x0804), |
727 | HISI_PCIE_PMU_EVENT_ATTR(rx_mrd_time, 0x10804), | |
00ca69b8 YY |
728 | HISI_PCIE_PMU_EVENT_ATTR(rx_cpl_flux, 0x2004), |
729 | HISI_PCIE_PMU_EVENT_ATTR(rx_cpl_time, 0x12004), | |
730 | HISI_PCIE_PMU_EVENT_ATTR(tx_mwr_flux, 0x0105), | |
731 | HISI_PCIE_PMU_EVENT_ATTR(tx_mwr_time, 0x10105), | |
6b4bb4f3 YY |
732 | HISI_PCIE_PMU_EVENT_ATTR(tx_mrd_flux, 0x0405), |
733 | HISI_PCIE_PMU_EVENT_ATTR(tx_mrd_time, 0x10405), | |
00ca69b8 YY |
734 | HISI_PCIE_PMU_EVENT_ATTR(tx_cpl_flux, 0x1005), |
735 | HISI_PCIE_PMU_EVENT_ATTR(tx_cpl_time, 0x11005), | |
8404b0fb QL |
736 | NULL |
737 | }; | |
738 | ||
739 | static struct attribute_group hisi_pcie_pmu_events_group = { | |
740 | .name = "events", | |
741 | .attrs = hisi_pcie_pmu_events_attr, | |
742 | }; | |
743 | ||
744 | static struct attribute *hisi_pcie_pmu_format_attr[] = { | |
745 | HISI_PCIE_PMU_FORMAT_ATTR(event, "config:0-16"), | |
746 | HISI_PCIE_PMU_FORMAT_ATTR(thr_len, "config1:0-3"), | |
747 | HISI_PCIE_PMU_FORMAT_ATTR(thr_mode, "config1:4"), | |
748 | HISI_PCIE_PMU_FORMAT_ATTR(trig_len, "config1:5-8"), | |
749 | HISI_PCIE_PMU_FORMAT_ATTR(trig_mode, "config1:9"), | |
17d57398 | 750 | HISI_PCIE_PMU_FORMAT_ATTR(len_mode, "config1:10-11"), |
8404b0fb QL |
751 | HISI_PCIE_PMU_FORMAT_ATTR(port, "config2:0-15"), |
752 | HISI_PCIE_PMU_FORMAT_ATTR(bdf, "config2:16-31"), | |
753 | NULL | |
754 | }; | |
755 | ||
756 | static const struct attribute_group hisi_pcie_pmu_format_group = { | |
757 | .name = "format", | |
758 | .attrs = hisi_pcie_pmu_format_attr, | |
759 | }; | |
760 | ||
761 | static struct attribute *hisi_pcie_pmu_bus_attrs[] = { | |
762 | &dev_attr_bus.attr, | |
763 | NULL | |
764 | }; | |
765 | ||
766 | static const struct attribute_group hisi_pcie_pmu_bus_attr_group = { | |
767 | .attrs = hisi_pcie_pmu_bus_attrs, | |
768 | }; | |
769 | ||
770 | static struct attribute *hisi_pcie_pmu_cpumask_attrs[] = { | |
771 | &dev_attr_cpumask.attr, | |
772 | NULL | |
773 | }; | |
774 | ||
775 | static const struct attribute_group hisi_pcie_pmu_cpumask_attr_group = { | |
776 | .attrs = hisi_pcie_pmu_cpumask_attrs, | |
777 | }; | |
778 | ||
779 | static struct attribute *hisi_pcie_pmu_identifier_attrs[] = { | |
780 | &dev_attr_identifier.attr, | |
781 | NULL | |
782 | }; | |
783 | ||
784 | static const struct attribute_group hisi_pcie_pmu_identifier_attr_group = { | |
785 | .attrs = hisi_pcie_pmu_identifier_attrs, | |
786 | }; | |
787 | ||
788 | static const struct attribute_group *hisi_pcie_pmu_attr_groups[] = { | |
789 | &hisi_pcie_pmu_events_group, | |
790 | &hisi_pcie_pmu_format_group, | |
791 | &hisi_pcie_pmu_bus_attr_group, | |
792 | &hisi_pcie_pmu_cpumask_attr_group, | |
793 | &hisi_pcie_pmu_identifier_attr_group, | |
794 | NULL | |
795 | }; | |
796 | ||
797 | static int hisi_pcie_alloc_pmu(struct pci_dev *pdev, struct hisi_pcie_pmu *pcie_pmu) | |
798 | { | |
799 | struct hisi_pcie_reg_pair regs; | |
800 | u16 sicl_id, core_id; | |
801 | char *name; | |
802 | ||
803 | regs = hisi_pcie_parse_reg_value(pcie_pmu, HISI_PCIE_REG_BDF); | |
804 | pcie_pmu->bdf_min = regs.lo; | |
805 | pcie_pmu->bdf_max = regs.hi; | |
806 | ||
807 | regs = hisi_pcie_parse_reg_value(pcie_pmu, HISI_PCIE_REG_INFO); | |
808 | sicl_id = regs.hi; | |
809 | core_id = regs.lo; | |
810 | ||
811 | name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "hisi_pcie%u_core%u", sicl_id, core_id); | |
812 | if (!name) | |
813 | return -ENOMEM; | |
814 | ||
815 | pcie_pmu->pdev = pdev; | |
816 | pcie_pmu->on_cpu = -1; | |
817 | pcie_pmu->identifier = readl(pcie_pmu->base + HISI_PCIE_REG_VERSION); | |
818 | pcie_pmu->pmu = (struct pmu) { | |
819 | .name = name, | |
820 | .module = THIS_MODULE, | |
1fb89504 | 821 | .parent = &pdev->dev, |
8404b0fb QL |
822 | .event_init = hisi_pcie_pmu_event_init, |
823 | .pmu_enable = hisi_pcie_pmu_enable, | |
824 | .pmu_disable = hisi_pcie_pmu_disable, | |
825 | .add = hisi_pcie_pmu_add, | |
826 | .del = hisi_pcie_pmu_del, | |
827 | .start = hisi_pcie_pmu_start, | |
828 | .stop = hisi_pcie_pmu_stop, | |
829 | .read = hisi_pcie_pmu_read, | |
830 | .task_ctx_nr = perf_invalid_context, | |
831 | .attr_groups = hisi_pcie_pmu_attr_groups, | |
832 | .capabilities = PERF_PMU_CAP_NO_EXCLUDE, | |
833 | }; | |
834 | ||
835 | return 0; | |
836 | } | |
837 | ||
838 | static int hisi_pcie_init_pmu(struct pci_dev *pdev, struct hisi_pcie_pmu *pcie_pmu) | |
839 | { | |
840 | int ret; | |
841 | ||
842 | pcie_pmu->base = pci_ioremap_bar(pdev, 2); | |
843 | if (!pcie_pmu->base) { | |
844 | pci_err(pdev, "Ioremap failed for pcie_pmu resource\n"); | |
845 | return -ENOMEM; | |
846 | } | |
847 | ||
848 | ret = hisi_pcie_alloc_pmu(pdev, pcie_pmu); | |
849 | if (ret) | |
850 | goto err_iounmap; | |
851 | ||
852 | ret = hisi_pcie_pmu_irq_register(pdev, pcie_pmu); | |
853 | if (ret) | |
854 | goto err_iounmap; | |
855 | ||
856 | ret = cpuhp_state_add_instance(CPUHP_AP_PERF_ARM_HISI_PCIE_PMU_ONLINE, &pcie_pmu->node); | |
857 | if (ret) { | |
858 | pci_err(pdev, "Failed to register hotplug: %d\n", ret); | |
859 | goto err_irq_unregister; | |
860 | } | |
861 | ||
862 | ret = perf_pmu_register(&pcie_pmu->pmu, pcie_pmu->pmu.name, -1); | |
863 | if (ret) { | |
864 | pci_err(pdev, "Failed to register PCIe PMU: %d\n", ret); | |
865 | goto err_hotplug_unregister; | |
866 | } | |
867 | ||
868 | return ret; | |
869 | ||
870 | err_hotplug_unregister: | |
871 | cpuhp_state_remove_instance_nocalls( | |
872 | CPUHP_AP_PERF_ARM_HISI_PCIE_PMU_ONLINE, &pcie_pmu->node); | |
873 | ||
874 | err_irq_unregister: | |
875 | hisi_pcie_pmu_irq_unregister(pdev, pcie_pmu); | |
876 | ||
877 | err_iounmap: | |
878 | iounmap(pcie_pmu->base); | |
879 | ||
880 | return ret; | |
881 | } | |
882 | ||
883 | static void hisi_pcie_uninit_pmu(struct pci_dev *pdev) | |
884 | { | |
885 | struct hisi_pcie_pmu *pcie_pmu = pci_get_drvdata(pdev); | |
886 | ||
887 | perf_pmu_unregister(&pcie_pmu->pmu); | |
888 | cpuhp_state_remove_instance_nocalls( | |
889 | CPUHP_AP_PERF_ARM_HISI_PCIE_PMU_ONLINE, &pcie_pmu->node); | |
890 | hisi_pcie_pmu_irq_unregister(pdev, pcie_pmu); | |
891 | iounmap(pcie_pmu->base); | |
892 | } | |
893 | ||
894 | static int hisi_pcie_init_dev(struct pci_dev *pdev) | |
895 | { | |
896 | int ret; | |
897 | ||
898 | ret = pcim_enable_device(pdev); | |
899 | if (ret) { | |
900 | pci_err(pdev, "Failed to enable PCI device: %d\n", ret); | |
901 | return ret; | |
902 | } | |
903 | ||
904 | ret = pcim_iomap_regions(pdev, BIT(2), DRV_NAME); | |
905 | if (ret < 0) { | |
906 | pci_err(pdev, "Failed to request PCI mem regions: %d\n", ret); | |
907 | return ret; | |
908 | } | |
909 | ||
910 | pci_set_master(pdev); | |
911 | ||
912 | return 0; | |
913 | } | |
914 | ||
915 | static int hisi_pcie_pmu_probe(struct pci_dev *pdev, const struct pci_device_id *id) | |
916 | { | |
917 | struct hisi_pcie_pmu *pcie_pmu; | |
918 | int ret; | |
919 | ||
920 | pcie_pmu = devm_kzalloc(&pdev->dev, sizeof(*pcie_pmu), GFP_KERNEL); | |
921 | if (!pcie_pmu) | |
922 | return -ENOMEM; | |
923 | ||
924 | ret = hisi_pcie_init_dev(pdev); | |
925 | if (ret) | |
926 | return ret; | |
927 | ||
928 | ret = hisi_pcie_init_pmu(pdev, pcie_pmu); | |
929 | if (ret) | |
930 | return ret; | |
931 | ||
932 | pci_set_drvdata(pdev, pcie_pmu); | |
933 | ||
934 | return ret; | |
935 | } | |
936 | ||
937 | static void hisi_pcie_pmu_remove(struct pci_dev *pdev) | |
938 | { | |
939 | hisi_pcie_uninit_pmu(pdev); | |
940 | pci_set_drvdata(pdev, NULL); | |
941 | } | |
942 | ||
943 | static const struct pci_device_id hisi_pcie_pmu_ids[] = { | |
944 | { PCI_DEVICE(PCI_VENDOR_ID_HUAWEI, 0xa12d) }, | |
945 | { 0, } | |
946 | }; | |
947 | MODULE_DEVICE_TABLE(pci, hisi_pcie_pmu_ids); | |
948 | ||
949 | static struct pci_driver hisi_pcie_pmu_driver = { | |
950 | .name = DRV_NAME, | |
951 | .id_table = hisi_pcie_pmu_ids, | |
952 | .probe = hisi_pcie_pmu_probe, | |
953 | .remove = hisi_pcie_pmu_remove, | |
954 | }; | |
955 | ||
956 | static int __init hisi_pcie_module_init(void) | |
957 | { | |
958 | int ret; | |
959 | ||
960 | ret = cpuhp_setup_state_multi(CPUHP_AP_PERF_ARM_HISI_PCIE_PMU_ONLINE, | |
961 | "AP_PERF_ARM_HISI_PCIE_PMU_ONLINE", | |
962 | hisi_pcie_pmu_online_cpu, | |
963 | hisi_pcie_pmu_offline_cpu); | |
964 | if (ret) { | |
965 | pr_err("Failed to setup PCIe PMU hotplug: %d\n", ret); | |
966 | return ret; | |
967 | } | |
968 | ||
969 | ret = pci_register_driver(&hisi_pcie_pmu_driver); | |
970 | if (ret) | |
971 | cpuhp_remove_multi_state(CPUHP_AP_PERF_ARM_HISI_PCIE_PMU_ONLINE); | |
972 | ||
973 | return ret; | |
974 | } | |
975 | module_init(hisi_pcie_module_init); | |
976 | ||
977 | static void __exit hisi_pcie_module_exit(void) | |
978 | { | |
979 | pci_unregister_driver(&hisi_pcie_pmu_driver); | |
980 | cpuhp_remove_multi_state(CPUHP_AP_PERF_ARM_HISI_PCIE_PMU_ONLINE); | |
981 | } | |
982 | module_exit(hisi_pcie_module_exit); | |
983 | ||
984 | MODULE_DESCRIPTION("HiSilicon PCIe PMU driver"); | |
985 | MODULE_LICENSE("GPL v2"); | |
986 | MODULE_AUTHOR("Qi Liu <liuqi115@huawei.com>"); |