Commit | Line | Data |
---|---|---|
45736a72 MR |
1 | /* |
2 | * ACPI probing code for ARM performance counters. | |
3 | * | |
4 | * Copyright (C) 2017 ARM Ltd. | |
5 | * | |
6 | * This program is free software; you can redistribute it and/or modify | |
7 | * it under the terms of the GNU General Public License version 2 as | |
8 | * published by the Free Software Foundation. | |
9 | */ | |
10 | ||
11 | #include <linux/acpi.h> | |
12 | #include <linux/cpumask.h> | |
13 | #include <linux/init.h> | |
43fc9a2f MR |
14 | #include <linux/irq.h> |
15 | #include <linux/irqdesc.h> | |
45736a72 MR |
16 | #include <linux/percpu.h> |
17 | #include <linux/perf/arm_pmu.h> | |
18 | ||
19 | #include <asm/cputype.h> | |
20 | ||
21 | static DEFINE_PER_CPU(struct arm_pmu *, probed_pmus); | |
22 | static DEFINE_PER_CPU(int, pmu_irqs); | |
23 | ||
24 | static int arm_pmu_acpi_register_irq(int cpu) | |
25 | { | |
26 | struct acpi_madt_generic_interrupt *gicc; | |
27 | int gsi, trigger; | |
28 | ||
29 | gicc = acpi_cpu_get_madt_gicc(cpu); | |
30 | if (WARN_ON(!gicc)) | |
31 | return -EINVAL; | |
32 | ||
33 | gsi = gicc->performance_interrupt; | |
477c50e8 WH |
34 | |
35 | /* | |
36 | * Per the ACPI spec, the MADT cannot describe a PMU that doesn't | |
37 | * have an interrupt. QEMU advertises this by using a GSI of zero, | |
38 | * which is not known to be valid on any hardware despite being | |
39 | * valid per the spec. Take the pragmatic approach and reject a | |
40 | * GSI of zero for now. | |
41 | */ | |
42 | if (!gsi) | |
43 | return 0; | |
44 | ||
45736a72 MR |
45 | if (gicc->flags & ACPI_MADT_PERFORMANCE_IRQ_MODE) |
46 | trigger = ACPI_EDGE_SENSITIVE; | |
47 | else | |
48 | trigger = ACPI_LEVEL_SENSITIVE; | |
49 | ||
50 | /* | |
51 | * Helpfully, the MADT GICC doesn't have a polarity flag for the | |
52 | * "performance interrupt". Luckily, on compliant GICs the polarity is | |
53 | * a fixed value in HW (for both SPIs and PPIs) that we cannot change | |
54 | * from SW. | |
55 | * | |
56 | * Here we pass in ACPI_ACTIVE_HIGH to keep the core code happy. This | |
57 | * may not match the real polarity, but that should not matter. | |
58 | * | |
59 | * Other interrupt controllers are not supported with ACPI. | |
60 | */ | |
61 | return acpi_register_gsi(NULL, gsi, trigger, ACPI_ACTIVE_HIGH); | |
62 | } | |
63 | ||
64 | static void arm_pmu_acpi_unregister_irq(int cpu) | |
65 | { | |
66 | struct acpi_madt_generic_interrupt *gicc; | |
67 | int gsi; | |
68 | ||
69 | gicc = acpi_cpu_get_madt_gicc(cpu); | |
70 | if (!gicc) | |
71 | return; | |
72 | ||
73 | gsi = gicc->performance_interrupt; | |
74 | acpi_unregister_gsi(gsi); | |
75 | } | |
76 | ||
77 | static int arm_pmu_acpi_parse_irqs(void) | |
78 | { | |
79 | int irq, cpu, irq_cpu, err; | |
80 | ||
81 | for_each_possible_cpu(cpu) { | |
82 | irq = arm_pmu_acpi_register_irq(cpu); | |
83 | if (irq < 0) { | |
84 | err = irq; | |
85 | pr_warn("Unable to parse ACPI PMU IRQ for CPU%d: %d\n", | |
86 | cpu, err); | |
87 | goto out_err; | |
88 | } else if (irq == 0) { | |
89 | pr_warn("No ACPI PMU IRQ for CPU%d\n", cpu); | |
90 | } | |
91 | ||
167e6143 MR |
92 | /* |
93 | * Log and request the IRQ so the core arm_pmu code can manage | |
94 | * it. We'll have to sanity-check IRQs later when we associate | |
95 | * them with their PMUs. | |
96 | */ | |
45736a72 | 97 | per_cpu(pmu_irqs, cpu) = irq; |
167e6143 | 98 | armpmu_request_irq(irq, cpu); |
45736a72 MR |
99 | } |
100 | ||
101 | return 0; | |
102 | ||
103 | out_err: | |
104 | for_each_possible_cpu(cpu) { | |
105 | irq = per_cpu(pmu_irqs, cpu); | |
106 | if (!irq) | |
107 | continue; | |
108 | ||
109 | arm_pmu_acpi_unregister_irq(cpu); | |
110 | ||
111 | /* | |
112 | * Blat all copies of the IRQ so that we only unregister the | |
113 | * corresponding GSI once (e.g. when we have PPIs). | |
114 | */ | |
115 | for_each_possible_cpu(irq_cpu) { | |
116 | if (per_cpu(pmu_irqs, irq_cpu) == irq) | |
117 | per_cpu(pmu_irqs, irq_cpu) = 0; | |
118 | } | |
119 | } | |
120 | ||
121 | return err; | |
122 | } | |
123 | ||
124 | static struct arm_pmu *arm_pmu_acpi_find_alloc_pmu(void) | |
125 | { | |
126 | unsigned long cpuid = read_cpuid_id(); | |
127 | struct arm_pmu *pmu; | |
128 | int cpu; | |
129 | ||
130 | for_each_possible_cpu(cpu) { | |
131 | pmu = per_cpu(probed_pmus, cpu); | |
132 | if (!pmu || pmu->acpi_cpuid != cpuid) | |
133 | continue; | |
134 | ||
135 | return pmu; | |
136 | } | |
137 | ||
0dc1a185 | 138 | pmu = armpmu_alloc_atomic(); |
45736a72 MR |
139 | if (!pmu) { |
140 | pr_warn("Unable to allocate PMU for CPU%d\n", | |
141 | smp_processor_id()); | |
142 | return NULL; | |
143 | } | |
144 | ||
145 | pmu->acpi_cpuid = cpuid; | |
146 | ||
147 | return pmu; | |
148 | } | |
149 | ||
43fc9a2f MR |
150 | /* |
151 | * Check whether the new IRQ is compatible with those already associated with | |
152 | * the PMU (e.g. we don't have mismatched PPIs). | |
153 | */ | |
154 | static bool pmu_irq_matches(struct arm_pmu *pmu, int irq) | |
155 | { | |
156 | struct pmu_hw_events __percpu *hw_events = pmu->hw_events; | |
157 | int cpu; | |
158 | ||
159 | if (!irq) | |
160 | return true; | |
161 | ||
162 | for_each_cpu(cpu, &pmu->supported_cpus) { | |
163 | int other_irq = per_cpu(hw_events->irq, cpu); | |
164 | if (!other_irq) | |
165 | continue; | |
166 | ||
167 | if (irq == other_irq) | |
168 | continue; | |
169 | if (!irq_is_percpu_devid(irq) && !irq_is_percpu_devid(other_irq)) | |
170 | continue; | |
171 | ||
172 | pr_warn("mismatched PPIs detected\n"); | |
173 | return false; | |
174 | } | |
175 | ||
176 | return true; | |
177 | } | |
178 | ||
45736a72 MR |
179 | /* |
180 | * This must run before the common arm_pmu hotplug logic, so that we can | |
181 | * associate a CPU and its interrupt before the common code tries to manage the | |
182 | * affinity and so on. | |
183 | * | |
184 | * Note that hotplug events are serialized, so we cannot race with another CPU | |
185 | * coming up. The perf core won't open events while a hotplug event is in | |
186 | * progress. | |
187 | */ | |
188 | static int arm_pmu_acpi_cpu_starting(unsigned int cpu) | |
189 | { | |
190 | struct arm_pmu *pmu; | |
191 | struct pmu_hw_events __percpu *hw_events; | |
192 | int irq; | |
193 | ||
194 | /* If we've already probed this CPU, we have nothing to do */ | |
195 | if (per_cpu(probed_pmus, cpu)) | |
196 | return 0; | |
197 | ||
198 | irq = per_cpu(pmu_irqs, cpu); | |
199 | ||
200 | pmu = arm_pmu_acpi_find_alloc_pmu(); | |
201 | if (!pmu) | |
202 | return -ENOMEM; | |
203 | ||
45736a72 MR |
204 | per_cpu(probed_pmus, cpu) = pmu; |
205 | ||
43fc9a2f MR |
206 | if (pmu_irq_matches(pmu, irq)) { |
207 | hw_events = pmu->hw_events; | |
208 | per_cpu(hw_events->irq, cpu) = irq; | |
209 | } | |
210 | ||
211 | cpumask_set_cpu(cpu, &pmu->supported_cpus); | |
212 | ||
45736a72 MR |
213 | /* |
214 | * Ideally, we'd probe the PMU here when we find the first matching | |
215 | * CPU. We can't do that for several reasons; see the comment in | |
216 | * arm_pmu_acpi_init(). | |
217 | * | |
218 | * So for the time being, we're done. | |
219 | */ | |
220 | return 0; | |
221 | } | |
222 | ||
223 | int arm_pmu_acpi_probe(armpmu_init_fn init_fn) | |
224 | { | |
225 | int pmu_idx = 0; | |
226 | int cpu, ret; | |
227 | ||
45736a72 MR |
228 | /* |
229 | * Initialise and register the set of PMUs which we know about right | |
230 | * now. Ideally we'd do this in arm_pmu_acpi_cpu_starting() so that we | |
231 | * could handle late hotplug, but this may lead to deadlock since we | |
232 | * might try to register a hotplug notifier instance from within a | |
233 | * hotplug notifier. | |
234 | * | |
235 | * There's also the problem of having access to the right init_fn, | |
236 | * without tying this too deeply into the "real" PMU driver. | |
237 | * | |
238 | * For the moment, as with the platform/DT case, we need at least one | |
239 | * of a PMU's CPUs to be online at probe time. | |
240 | */ | |
241 | for_each_possible_cpu(cpu) { | |
242 | struct arm_pmu *pmu = per_cpu(probed_pmus, cpu); | |
243 | char *base_name; | |
244 | ||
245 | if (!pmu || pmu->name) | |
246 | continue; | |
247 | ||
248 | ret = init_fn(pmu); | |
249 | if (ret == -ENODEV) { | |
250 | /* PMU not handled by this driver, or not present */ | |
251 | continue; | |
252 | } else if (ret) { | |
253 | pr_warn("Unable to initialise PMU for CPU%d\n", cpu); | |
254 | return ret; | |
255 | } | |
256 | ||
257 | base_name = pmu->name; | |
258 | pmu->name = kasprintf(GFP_KERNEL, "%s_%d", base_name, pmu_idx++); | |
259 | if (!pmu->name) { | |
260 | pr_warn("Unable to allocate PMU name for CPU%d\n", cpu); | |
261 | return -ENOMEM; | |
262 | } | |
263 | ||
264 | ret = armpmu_register(pmu); | |
265 | if (ret) { | |
266 | pr_warn("Failed to register PMU for CPU%d\n", cpu); | |
a88dc7ba | 267 | kfree(pmu->name); |
45736a72 MR |
268 | return ret; |
269 | } | |
270 | } | |
271 | ||
272 | return 0; | |
273 | } | |
274 | ||
275 | static int arm_pmu_acpi_init(void) | |
276 | { | |
277 | int ret; | |
278 | ||
279 | if (acpi_disabled) | |
280 | return 0; | |
281 | ||
45736a72 MR |
282 | ret = arm_pmu_acpi_parse_irqs(); |
283 | if (ret) | |
284 | return ret; | |
285 | ||
286 | ret = cpuhp_setup_state(CPUHP_AP_PERF_ARM_ACPI_STARTING, | |
287 | "perf/arm/pmu_acpi:starting", | |
288 | arm_pmu_acpi_cpu_starting, NULL); | |
289 | ||
290 | return ret; | |
291 | } | |
292 | subsys_initcall(arm_pmu_acpi_init) |