RISC-V: self-contained IPI handling routine
[linux-block.git] / drivers / irqchip / irq-sifive-plic.c
CommitLineData
8237f8bc
CH
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (C) 2017 SiFive
4 * Copyright (C) 2018 Christoph Hellwig
5 */
6#define pr_fmt(fmt) "plic: " fmt
ccbe80ba 7#include <linux/cpu.h>
8237f8bc
CH
8#include <linux/interrupt.h>
9#include <linux/io.h>
10#include <linux/irq.h>
11#include <linux/irqchip.h>
12#include <linux/irqdomain.h>
13#include <linux/module.h>
14#include <linux/of.h>
15#include <linux/of_address.h>
16#include <linux/of_irq.h>
17#include <linux/platform_device.h>
18#include <linux/spinlock.h>
f99fb607 19#include <asm/smp.h>
8237f8bc
CH
20
21/*
22 * This driver implements a version of the RISC-V PLIC with the actual layout
23 * specified in chapter 8 of the SiFive U5 Coreplex Series Manual:
24 *
25 * https://static.dev.sifive.com/U54-MC-RVCoreIP.pdf
26 *
27 * The largest number supported by devices marked as 'sifive,plic-1.0.0', is
28 * 1024, of which device 0 is defined as non-existent by the RISC-V Privileged
29 * Spec.
30 */
31
32#define MAX_DEVICES 1024
33#define MAX_CONTEXTS 15872
34
35/*
36 * Each interrupt source has a priority register associated with it.
37 * We always hardwire it to one in Linux.
38 */
39#define PRIORITY_BASE 0
40#define PRIORITY_PER_ID 4
41
42/*
43 * Each hart context has a vector of interrupt enable bits associated with it.
44 * There's one bit for each interrupt source.
45 */
46#define ENABLE_BASE 0x2000
47#define ENABLE_PER_HART 0x80
48
49/*
50 * Each hart context has a set of control registers associated with it. Right
51 * now there's only two: a source priority threshold over which the hart will
52 * take an interrupt, and a register to claim interrupts.
53 */
54#define CONTEXT_BASE 0x200000
55#define CONTEXT_PER_HART 0x1000
56#define CONTEXT_THRESHOLD 0x00
57#define CONTEXT_CLAIM 0x04
58
d727be7b 59#define PLIC_DISABLE_THRESHOLD 0x7
ccbe80ba
AP
60#define PLIC_ENABLE_THRESHOLD 0
61
f1ad1133
AP
62struct plic_priv {
63 struct cpumask lmask;
64 struct irq_domain *irqdomain;
65 void __iomem *regs;
66};
8237f8bc
CH
67
68struct plic_handler {
69 bool present;
86c7cbf1
AP
70 void __iomem *hart_base;
71 /*
72 * Protect mask operations on the registers given that we can't
73 * assume atomic memory operations work on them.
74 */
75 raw_spinlock_t enable_lock;
76 void __iomem *enable_base;
f1ad1133 77 struct plic_priv *priv;
8237f8bc 78};
2234ae84 79static bool plic_cpuhp_setup_done;
8237f8bc
CH
80static DEFINE_PER_CPU(struct plic_handler, plic_handlers);
81
86c7cbf1
AP
82static inline void plic_toggle(struct plic_handler *handler,
83 int hwirq, int enable)
8237f8bc 84{
86c7cbf1 85 u32 __iomem *reg = handler->enable_base + (hwirq / 32) * sizeof(u32);
8237f8bc
CH
86 u32 hwirq_mask = 1 << (hwirq % 32);
87
86c7cbf1 88 raw_spin_lock(&handler->enable_lock);
8237f8bc
CH
89 if (enable)
90 writel(readl(reg) | hwirq_mask, reg);
91 else
92 writel(readl(reg) & ~hwirq_mask, reg);
86c7cbf1 93 raw_spin_unlock(&handler->enable_lock);
8237f8bc
CH
94}
95
cc9f04f9 96static inline void plic_irq_toggle(const struct cpumask *mask,
f1ad1133 97 struct irq_data *d, int enable)
8237f8bc
CH
98{
99 int cpu;
f1ad1133 100 struct plic_priv *priv = irq_get_chip_data(d->irq);
8237f8bc 101
f1ad1133 102 writel(enable, priv->regs + PRIORITY_BASE + d->hwirq * PRIORITY_PER_ID);
cc9f04f9 103 for_each_cpu(cpu, mask) {
8237f8bc
CH
104 struct plic_handler *handler = per_cpu_ptr(&plic_handlers, cpu);
105
f1ad1133
AP
106 if (handler->present &&
107 cpumask_test_cpu(cpu, &handler->priv->lmask))
108 plic_toggle(handler, d->hwirq, enable);
8237f8bc
CH
109 }
110}
111
bb0fed1c 112static void plic_irq_unmask(struct irq_data *d)
8237f8bc 113{
f1ad1133
AP
114 struct cpumask amask;
115 unsigned int cpu;
116 struct plic_priv *priv = irq_get_chip_data(d->irq);
117
118 cpumask_and(&amask, &priv->lmask, cpu_online_mask);
119 cpu = cpumask_any_and(irq_data_get_affinity_mask(d),
120 &amask);
cc9f04f9
AP
121 if (WARN_ON_ONCE(cpu >= nr_cpu_ids))
122 return;
f1ad1133 123 plic_irq_toggle(cpumask_of(cpu), d, 1);
8237f8bc
CH
124}
125
bb0fed1c 126static void plic_irq_mask(struct irq_data *d)
8237f8bc 127{
f1ad1133
AP
128 struct plic_priv *priv = irq_get_chip_data(d->irq);
129
130 plic_irq_toggle(&priv->lmask, d, 0);
8237f8bc
CH
131}
132
cc9f04f9
AP
133#ifdef CONFIG_SMP
134static int plic_set_affinity(struct irq_data *d,
135 const struct cpumask *mask_val, bool force)
136{
137 unsigned int cpu;
f1ad1133
AP
138 struct cpumask amask;
139 struct plic_priv *priv = irq_get_chip_data(d->irq);
140
141 cpumask_and(&amask, &priv->lmask, mask_val);
cc9f04f9
AP
142
143 if (force)
f1ad1133 144 cpu = cpumask_first(&amask);
cc9f04f9 145 else
f1ad1133 146 cpu = cpumask_any_and(&amask, cpu_online_mask);
cc9f04f9
AP
147
148 if (cpu >= nr_cpu_ids)
149 return -EINVAL;
150
f1ad1133
AP
151 plic_irq_toggle(&priv->lmask, d, 0);
152 plic_irq_toggle(cpumask_of(cpu), d, 1);
cc9f04f9
AP
153
154 irq_data_update_effective_affinity(d, cpumask_of(cpu));
155
156 return IRQ_SET_MASK_OK_DONE;
157}
158#endif
159
bb0fed1c
MZ
160static void plic_irq_eoi(struct irq_data *d)
161{
162 struct plic_handler *handler = this_cpu_ptr(&plic_handlers);
163
164 writel(d->hwirq, handler->hart_base + CONTEXT_CLAIM);
165}
166
8237f8bc
CH
167static struct irq_chip plic_chip = {
168 .name = "SiFive PLIC",
bb0fed1c
MZ
169 .irq_mask = plic_irq_mask,
170 .irq_unmask = plic_irq_unmask,
171 .irq_eoi = plic_irq_eoi,
cc9f04f9
AP
172#ifdef CONFIG_SMP
173 .irq_set_affinity = plic_set_affinity,
174#endif
8237f8bc
CH
175};
176
177static int plic_irqdomain_map(struct irq_domain *d, unsigned int irq,
178 irq_hw_number_t hwirq)
179{
2458ed31
AP
180 struct plic_priv *priv = d->host_data;
181
466008f9
YS
182 irq_domain_set_info(d, irq, hwirq, &plic_chip, d->host_data,
183 handle_fasteoi_irq, NULL, NULL);
8237f8bc 184 irq_set_noprobe(irq);
2458ed31 185 irq_set_affinity(irq, &priv->lmask);
8237f8bc
CH
186 return 0;
187}
188
466008f9
YS
189static int plic_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
190 unsigned int nr_irqs, void *arg)
191{
192 int i, ret;
193 irq_hw_number_t hwirq;
194 unsigned int type;
195 struct irq_fwspec *fwspec = arg;
196
197 ret = irq_domain_translate_onecell(domain, fwspec, &hwirq, &type);
198 if (ret)
199 return ret;
200
201 for (i = 0; i < nr_irqs; i++) {
202 ret = plic_irqdomain_map(domain, virq + i, hwirq + i);
203 if (ret)
204 return ret;
205 }
206
207 return 0;
208}
209
8237f8bc 210static const struct irq_domain_ops plic_irqdomain_ops = {
466008f9
YS
211 .translate = irq_domain_translate_onecell,
212 .alloc = plic_irq_domain_alloc,
213 .free = irq_domain_free_irqs_top,
8237f8bc
CH
214};
215
8237f8bc
CH
216/*
217 * Handling an interrupt is a two-step process: first you claim the interrupt
218 * by reading the claim register, then you complete the interrupt by writing
219 * that source ID back to the same claim register. This automatically enables
220 * and disables the interrupt, so there's nothing else to do.
221 */
222static void plic_handle_irq(struct pt_regs *regs)
223{
224 struct plic_handler *handler = this_cpu_ptr(&plic_handlers);
86c7cbf1 225 void __iomem *claim = handler->hart_base + CONTEXT_CLAIM;
8237f8bc
CH
226 irq_hw_number_t hwirq;
227
228 WARN_ON_ONCE(!handler->present);
229
a4c3733d 230 csr_clear(CSR_IE, IE_EIE);
8237f8bc 231 while ((hwirq = readl(claim))) {
f1ad1133 232 int irq = irq_find_mapping(handler->priv->irqdomain, hwirq);
8237f8bc
CH
233
234 if (unlikely(irq <= 0))
235 pr_warn_ratelimited("can't find mapping for hwirq %lu\n",
236 hwirq);
237 else
238 generic_handle_irq(irq);
8237f8bc 239 }
a4c3733d 240 csr_set(CSR_IE, IE_EIE);
8237f8bc
CH
241}
242
243/*
244 * Walk up the DT tree until we find an active RISC-V core (HART) node and
245 * extract the cpuid from it.
246 */
247static int plic_find_hart_id(struct device_node *node)
248{
249 for (; node; node = node->parent) {
250 if (of_device_is_compatible(node, "riscv"))
b2f8cfa7 251 return riscv_of_processor_hartid(node);
8237f8bc
CH
252 }
253
254 return -1;
255}
256
ccbe80ba
AP
257static void plic_set_threshold(struct plic_handler *handler, u32 threshold)
258{
259 /* priority must be > threshold to trigger an interrupt */
260 writel(threshold, handler->hart_base + CONTEXT_THRESHOLD);
261}
262
263static int plic_dying_cpu(unsigned int cpu)
264{
265 struct plic_handler *handler = this_cpu_ptr(&plic_handlers);
266
267 csr_clear(CSR_IE, IE_EIE);
268 plic_set_threshold(handler, PLIC_DISABLE_THRESHOLD);
269
270 return 0;
271}
272
273static int plic_starting_cpu(unsigned int cpu)
274{
275 struct plic_handler *handler = this_cpu_ptr(&plic_handlers);
276
277 csr_set(CSR_IE, IE_EIE);
278 plic_set_threshold(handler, PLIC_ENABLE_THRESHOLD);
279
280 return 0;
281}
282
8237f8bc
CH
283static int __init plic_init(struct device_node *node,
284 struct device_node *parent)
285{
6adfe8d2 286 int error = 0, nr_contexts, nr_handlers = 0, i;
8237f8bc 287 u32 nr_irqs;
f1ad1133 288 struct plic_priv *priv;
2234ae84 289 struct plic_handler *handler;
8237f8bc 290
f1ad1133
AP
291 priv = kzalloc(sizeof(*priv), GFP_KERNEL);
292 if (!priv)
293 return -ENOMEM;
8237f8bc 294
f1ad1133
AP
295 priv->regs = of_iomap(node, 0);
296 if (WARN_ON(!priv->regs)) {
297 error = -EIO;
298 goto out_free_priv;
299 }
8237f8bc
CH
300
301 error = -EINVAL;
302 of_property_read_u32(node, "riscv,ndev", &nr_irqs);
303 if (WARN_ON(!nr_irqs))
304 goto out_iounmap;
305
6adfe8d2
AP
306 nr_contexts = of_irq_count(node);
307 if (WARN_ON(!nr_contexts))
8237f8bc 308 goto out_iounmap;
8237f8bc
CH
309
310 error = -ENOMEM;
f1ad1133
AP
311 priv->irqdomain = irq_domain_add_linear(node, nr_irqs + 1,
312 &plic_irqdomain_ops, priv);
313 if (WARN_ON(!priv->irqdomain))
8237f8bc
CH
314 goto out_iounmap;
315
6adfe8d2 316 for (i = 0; i < nr_contexts; i++) {
8237f8bc 317 struct of_phandle_args parent;
8237f8bc 318 irq_hw_number_t hwirq;
f99fb607 319 int cpu, hartid;
8237f8bc
CH
320
321 if (of_irq_parse_one(node, i, &parent)) {
322 pr_err("failed to parse parent for context %d.\n", i);
323 continue;
324 }
325
a4c3733d
CH
326 /*
327 * Skip contexts other than external interrupts for our
328 * privilege level.
329 */
2f3035da 330 if (parent.args[0] != RV_IRQ_EXT)
8237f8bc
CH
331 continue;
332
f99fb607
AP
333 hartid = plic_find_hart_id(parent.np);
334 if (hartid < 0) {
8237f8bc
CH
335 pr_warn("failed to parse hart ID for context %d.\n", i);
336 continue;
337 }
338
f99fb607 339 cpu = riscv_hartid_to_cpuid(hartid);
fc03acae
AP
340 if (cpu < 0) {
341 pr_warn("Invalid cpuid for context %d\n", i);
342 continue;
343 }
344
9ce06497
CH
345 /*
346 * When running in M-mode we need to ignore the S-mode handler.
347 * Here we assume it always comes later, but that might be a
348 * little fragile.
349 */
8237f8bc 350 handler = per_cpu_ptr(&plic_handlers, cpu);
3fecb5aa
AP
351 if (handler->present) {
352 pr_warn("handler already present for context %d.\n", i);
ccbe80ba 353 plic_set_threshold(handler, PLIC_DISABLE_THRESHOLD);
9ce06497 354 goto done;
3fecb5aa
AP
355 }
356
f1ad1133 357 cpumask_set_cpu(cpu, &priv->lmask);
8237f8bc 358 handler->present = true;
86c7cbf1 359 handler->hart_base =
f1ad1133 360 priv->regs + CONTEXT_BASE + i * CONTEXT_PER_HART;
86c7cbf1
AP
361 raw_spin_lock_init(&handler->enable_lock);
362 handler->enable_base =
f1ad1133
AP
363 priv->regs + ENABLE_BASE + i * ENABLE_PER_HART;
364 handler->priv = priv;
9ce06497 365done:
8237f8bc 366 for (hwirq = 1; hwirq <= nr_irqs; hwirq++)
86c7cbf1 367 plic_toggle(handler, hwirq, 0);
6adfe8d2 368 nr_handlers++;
8237f8bc
CH
369 }
370
2234ae84
AP
371 /*
372 * We can have multiple PLIC instances so setup cpuhp state only
373 * when context handler for current/boot CPU is present.
374 */
375 handler = this_cpu_ptr(&plic_handlers);
376 if (handler->present && !plic_cpuhp_setup_done) {
377 cpuhp_setup_state(CPUHP_AP_IRQ_SIFIVE_PLIC_STARTING,
ccbe80ba
AP
378 "irqchip/sifive/plic:starting",
379 plic_starting_cpu, plic_dying_cpu);
2234ae84
AP
380 plic_cpuhp_setup_done = true;
381 }
382
0e375f51
AP
383 pr_info("%pOFP: mapped %d interrupts with %d handlers for"
384 " %d contexts.\n", node, nr_irqs, nr_handlers, nr_contexts);
8237f8bc
CH
385 set_handle_irq(plic_handle_irq);
386 return 0;
387
388out_iounmap:
f1ad1133
AP
389 iounmap(priv->regs);
390out_free_priv:
391 kfree(priv);
8237f8bc
CH
392 return error;
393}
394
395IRQCHIP_DECLARE(sifive_plic, "sifive,plic-1.0.0", plic_init);
396IRQCHIP_DECLARE(riscv_plic0, "riscv,plic0", plic_init); /* for legacy systems */