Merge tag 'selinux-pr-20220523' of git://git.kernel.org/pub/scm/linux/kernel/git...
[linux-block.git] / drivers / perf / arm_pmu_acpi.c
CommitLineData
d2912cb1 1// SPDX-License-Identifier: GPL-2.0-only
45736a72
MR
2/*
3 * ACPI probing code for ARM performance counters.
4 *
5 * Copyright (C) 2017 ARM Ltd.
45736a72
MR
6 */
7
8#include <linux/acpi.h>
9#include <linux/cpumask.h>
10#include <linux/init.h>
43fc9a2f
MR
11#include <linux/irq.h>
12#include <linux/irqdesc.h>
45736a72
MR
13#include <linux/percpu.h>
14#include <linux/perf/arm_pmu.h>
15
16#include <asm/cputype.h>
17
18static DEFINE_PER_CPU(struct arm_pmu *, probed_pmus);
19static DEFINE_PER_CPU(int, pmu_irqs);
20
21static int arm_pmu_acpi_register_irq(int cpu)
22{
23 struct acpi_madt_generic_interrupt *gicc;
24 int gsi, trigger;
25
26 gicc = acpi_cpu_get_madt_gicc(cpu);
45736a72
MR
27
28 gsi = gicc->performance_interrupt;
477c50e8
WH
29
30 /*
31 * Per the ACPI spec, the MADT cannot describe a PMU that doesn't
32 * have an interrupt. QEMU advertises this by using a GSI of zero,
33 * which is not known to be valid on any hardware despite being
34 * valid per the spec. Take the pragmatic approach and reject a
35 * GSI of zero for now.
36 */
37 if (!gsi)
38 return 0;
39
45736a72
MR
40 if (gicc->flags & ACPI_MADT_PERFORMANCE_IRQ_MODE)
41 trigger = ACPI_EDGE_SENSITIVE;
42 else
43 trigger = ACPI_LEVEL_SENSITIVE;
44
45 /*
46 * Helpfully, the MADT GICC doesn't have a polarity flag for the
47 * "performance interrupt". Luckily, on compliant GICs the polarity is
48 * a fixed value in HW (for both SPIs and PPIs) that we cannot change
49 * from SW.
50 *
51 * Here we pass in ACPI_ACTIVE_HIGH to keep the core code happy. This
52 * may not match the real polarity, but that should not matter.
53 *
54 * Other interrupt controllers are not supported with ACPI.
55 */
56 return acpi_register_gsi(NULL, gsi, trigger, ACPI_ACTIVE_HIGH);
57}
58
59static void arm_pmu_acpi_unregister_irq(int cpu)
60{
61 struct acpi_madt_generic_interrupt *gicc;
62 int gsi;
63
64 gicc = acpi_cpu_get_madt_gicc(cpu);
45736a72
MR
65
66 gsi = gicc->performance_interrupt;
3ba52ad5 67 if (gsi)
68 acpi_unregister_gsi(gsi);
45736a72
MR
69}
70
d24a0c70
JL
71#if IS_ENABLED(CONFIG_ARM_SPE_PMU)
72static struct resource spe_resources[] = {
73 {
74 /* irq */
75 .flags = IORESOURCE_IRQ,
76 }
77};
78
79static struct platform_device spe_dev = {
80 .name = ARMV8_SPE_PDEV_NAME,
81 .id = -1,
82 .resource = spe_resources,
83 .num_resources = ARRAY_SIZE(spe_resources)
84};
85
86/*
87 * For lack of a better place, hook the normal PMU MADT walk
88 * and create a SPE device if we detect a recent MADT with
89 * a homogeneous PPI mapping.
90 */
91static void arm_spe_acpi_register_device(void)
92{
93 int cpu, hetid, irq, ret;
94 bool first = true;
95 u16 gsi = 0;
96
97 /*
98 * Sanity check all the GICC tables for the same interrupt number.
99 * For now, we only support homogeneous ACPI/SPE machines.
100 */
101 for_each_possible_cpu(cpu) {
102 struct acpi_madt_generic_interrupt *gicc;
103
104 gicc = acpi_cpu_get_madt_gicc(cpu);
105 if (gicc->header.length < ACPI_MADT_GICC_SPE)
106 return;
107
108 if (first) {
109 gsi = gicc->spe_interrupt;
110 if (!gsi)
111 return;
112 hetid = find_acpi_cpu_topology_hetero_id(cpu);
113 first = false;
114 } else if ((gsi != gicc->spe_interrupt) ||
115 (hetid != find_acpi_cpu_topology_hetero_id(cpu))) {
116 pr_warn("ACPI: SPE must be homogeneous\n");
117 return;
118 }
119 }
120
121 irq = acpi_register_gsi(NULL, gsi, ACPI_LEVEL_SENSITIVE,
122 ACPI_ACTIVE_HIGH);
123 if (irq < 0) {
124 pr_warn("ACPI: SPE Unable to register interrupt: %d\n", gsi);
125 return;
126 }
127
128 spe_resources[0].start = irq;
129 ret = platform_device_register(&spe_dev);
130 if (ret < 0) {
131 pr_warn("ACPI: SPE: Unable to register device\n");
132 acpi_unregister_gsi(gsi);
133 }
134}
135#else
136static inline void arm_spe_acpi_register_device(void)
137{
138}
139#endif /* CONFIG_ARM_SPE_PMU */
140
45736a72
MR
141static int arm_pmu_acpi_parse_irqs(void)
142{
143 int irq, cpu, irq_cpu, err;
144
145 for_each_possible_cpu(cpu) {
146 irq = arm_pmu_acpi_register_irq(cpu);
147 if (irq < 0) {
148 err = irq;
149 pr_warn("Unable to parse ACPI PMU IRQ for CPU%d: %d\n",
150 cpu, err);
151 goto out_err;
152 } else if (irq == 0) {
153 pr_warn("No ACPI PMU IRQ for CPU%d\n", cpu);
154 }
155
167e6143
MR
156 /*
157 * Log and request the IRQ so the core arm_pmu code can manage
158 * it. We'll have to sanity-check IRQs later when we associate
159 * them with their PMUs.
160 */
45736a72 161 per_cpu(pmu_irqs, cpu) = irq;
4b5b7129
RY
162 err = armpmu_request_irq(irq, cpu);
163 if (err)
164 goto out_err;
45736a72
MR
165 }
166
167 return 0;
168
169out_err:
170 for_each_possible_cpu(cpu) {
171 irq = per_cpu(pmu_irqs, cpu);
172 if (!irq)
173 continue;
174
175 arm_pmu_acpi_unregister_irq(cpu);
176
177 /*
178 * Blat all copies of the IRQ so that we only unregister the
179 * corresponding GSI once (e.g. when we have PPIs).
180 */
181 for_each_possible_cpu(irq_cpu) {
182 if (per_cpu(pmu_irqs, irq_cpu) == irq)
183 per_cpu(pmu_irqs, irq_cpu) = 0;
184 }
185 }
186
187 return err;
188}
189
190static struct arm_pmu *arm_pmu_acpi_find_alloc_pmu(void)
191{
192 unsigned long cpuid = read_cpuid_id();
193 struct arm_pmu *pmu;
194 int cpu;
195
196 for_each_possible_cpu(cpu) {
197 pmu = per_cpu(probed_pmus, cpu);
198 if (!pmu || pmu->acpi_cpuid != cpuid)
199 continue;
200
201 return pmu;
202 }
203
0dc1a185 204 pmu = armpmu_alloc_atomic();
45736a72
MR
205 if (!pmu) {
206 pr_warn("Unable to allocate PMU for CPU%d\n",
207 smp_processor_id());
208 return NULL;
209 }
210
211 pmu->acpi_cpuid = cpuid;
212
213 return pmu;
214}
215
43fc9a2f
MR
216/*
217 * Check whether the new IRQ is compatible with those already associated with
218 * the PMU (e.g. we don't have mismatched PPIs).
219 */
220static bool pmu_irq_matches(struct arm_pmu *pmu, int irq)
221{
222 struct pmu_hw_events __percpu *hw_events = pmu->hw_events;
223 int cpu;
224
225 if (!irq)
226 return true;
227
228 for_each_cpu(cpu, &pmu->supported_cpus) {
229 int other_irq = per_cpu(hw_events->irq, cpu);
230 if (!other_irq)
231 continue;
232
233 if (irq == other_irq)
234 continue;
235 if (!irq_is_percpu_devid(irq) && !irq_is_percpu_devid(other_irq))
236 continue;
237
238 pr_warn("mismatched PPIs detected\n");
239 return false;
240 }
241
242 return true;
243}
244
45736a72
MR
245/*
246 * This must run before the common arm_pmu hotplug logic, so that we can
247 * associate a CPU and its interrupt before the common code tries to manage the
248 * affinity and so on.
249 *
250 * Note that hotplug events are serialized, so we cannot race with another CPU
251 * coming up. The perf core won't open events while a hotplug event is in
252 * progress.
253 */
254static int arm_pmu_acpi_cpu_starting(unsigned int cpu)
255{
256 struct arm_pmu *pmu;
257 struct pmu_hw_events __percpu *hw_events;
258 int irq;
259
260 /* If we've already probed this CPU, we have nothing to do */
261 if (per_cpu(probed_pmus, cpu))
262 return 0;
263
264 irq = per_cpu(pmu_irqs, cpu);
265
266 pmu = arm_pmu_acpi_find_alloc_pmu();
267 if (!pmu)
268 return -ENOMEM;
269
45736a72
MR
270 per_cpu(probed_pmus, cpu) = pmu;
271
43fc9a2f
MR
272 if (pmu_irq_matches(pmu, irq)) {
273 hw_events = pmu->hw_events;
274 per_cpu(hw_events->irq, cpu) = irq;
275 }
276
277 cpumask_set_cpu(cpu, &pmu->supported_cpus);
278
45736a72
MR
279 /*
280 * Ideally, we'd probe the PMU here when we find the first matching
281 * CPU. We can't do that for several reasons; see the comment in
282 * arm_pmu_acpi_init().
283 *
284 * So for the time being, we're done.
285 */
286 return 0;
287}
288
289int arm_pmu_acpi_probe(armpmu_init_fn init_fn)
290{
291 int pmu_idx = 0;
292 int cpu, ret;
293
45736a72
MR
294 /*
295 * Initialise and register the set of PMUs which we know about right
296 * now. Ideally we'd do this in arm_pmu_acpi_cpu_starting() so that we
297 * could handle late hotplug, but this may lead to deadlock since we
298 * might try to register a hotplug notifier instance from within a
299 * hotplug notifier.
300 *
301 * There's also the problem of having access to the right init_fn,
302 * without tying this too deeply into the "real" PMU driver.
303 *
304 * For the moment, as with the platform/DT case, we need at least one
305 * of a PMU's CPUs to be online at probe time.
306 */
307 for_each_possible_cpu(cpu) {
308 struct arm_pmu *pmu = per_cpu(probed_pmus, cpu);
309 char *base_name;
310
311 if (!pmu || pmu->name)
312 continue;
313
314 ret = init_fn(pmu);
315 if (ret == -ENODEV) {
316 /* PMU not handled by this driver, or not present */
317 continue;
318 } else if (ret) {
319 pr_warn("Unable to initialise PMU for CPU%d\n", cpu);
320 return ret;
321 }
322
323 base_name = pmu->name;
324 pmu->name = kasprintf(GFP_KERNEL, "%s_%d", base_name, pmu_idx++);
325 if (!pmu->name) {
326 pr_warn("Unable to allocate PMU name for CPU%d\n", cpu);
327 return -ENOMEM;
328 }
329
330 ret = armpmu_register(pmu);
331 if (ret) {
332 pr_warn("Failed to register PMU for CPU%d\n", cpu);
a88dc7ba 333 kfree(pmu->name);
45736a72
MR
334 return ret;
335 }
336 }
337
338 return 0;
339}
340
341static int arm_pmu_acpi_init(void)
342{
343 int ret;
344
345 if (acpi_disabled)
346 return 0;
347
d24a0c70
JL
348 arm_spe_acpi_register_device();
349
45736a72
MR
350 ret = arm_pmu_acpi_parse_irqs();
351 if (ret)
352 return ret;
353
354 ret = cpuhp_setup_state(CPUHP_AP_PERF_ARM_ACPI_STARTING,
355 "perf/arm/pmu_acpi:starting",
356 arm_pmu_acpi_cpu_starting, NULL);
357
358 return ret;
359}
360subsys_initcall(arm_pmu_acpi_init)