Merge tag 'devicetree-for-6.4-2' of git://git.kernel.org/pub/scm/linux/kernel/git...
[linux-block.git] / drivers / irqchip / irq-loongson-eiointc.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Loongson Extend I/O Interrupt Controller support
4  *
5  * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
6  */
7
8 #define pr_fmt(fmt) "eiointc: " fmt
9
10 #include <linux/cpuhotplug.h>
11 #include <linux/interrupt.h>
12 #include <linux/irq.h>
13 #include <linux/irqchip.h>
14 #include <linux/irqdomain.h>
15 #include <linux/irqchip/chained_irq.h>
16 #include <linux/kernel.h>
17 #include <linux/syscore_ops.h>
18
19 #define EIOINTC_REG_NODEMAP     0x14a0
20 #define EIOINTC_REG_IPMAP       0x14c0
21 #define EIOINTC_REG_ENABLE      0x1600
22 #define EIOINTC_REG_BOUNCE      0x1680
23 #define EIOINTC_REG_ISR         0x1800
24 #define EIOINTC_REG_ROUTE       0x1c00
25
26 #define VEC_REG_COUNT           4
27 #define VEC_COUNT_PER_REG       64
28 #define VEC_COUNT               (VEC_REG_COUNT * VEC_COUNT_PER_REG)
29 #define VEC_REG_IDX(irq_id)     ((irq_id) / VEC_COUNT_PER_REG)
30 #define VEC_REG_BIT(irq_id)     ((irq_id) % VEC_COUNT_PER_REG)
31 #define EIOINTC_ALL_ENABLE      0xffffffff
32
33 #define MAX_EIO_NODES           (NR_CPUS / CORES_PER_EIO_NODE)
34
35 static int nr_pics;
36
37 struct eiointc_priv {
38         u32                     node;
39         nodemask_t              node_map;
40         cpumask_t               cpuspan_map;
41         struct fwnode_handle    *domain_handle;
42         struct irq_domain       *eiointc_domain;
43 };
44
45 static struct eiointc_priv *eiointc_priv[MAX_IO_PICS];
46
47 static void eiointc_enable(void)
48 {
49         uint64_t misc;
50
51         misc = iocsr_read64(LOONGARCH_IOCSR_MISC_FUNC);
52         misc |= IOCSR_MISC_FUNC_EXT_IOI_EN;
53         iocsr_write64(misc, LOONGARCH_IOCSR_MISC_FUNC);
54 }
55
56 static int cpu_to_eio_node(int cpu)
57 {
58         return cpu_logical_map(cpu) / CORES_PER_EIO_NODE;
59 }
60
61 static void eiointc_set_irq_route(int pos, unsigned int cpu, unsigned int mnode, nodemask_t *node_map)
62 {
63         int i, node, cpu_node, route_node;
64         unsigned char coremap;
65         uint32_t pos_off, data, data_byte, data_mask;
66
67         pos_off = pos & ~3;
68         data_byte = pos & 3;
69         data_mask = ~BIT_MASK(data_byte) & 0xf;
70
71         /* Calculate node and coremap of target irq */
72         cpu_node = cpu_logical_map(cpu) / CORES_PER_EIO_NODE;
73         coremap = BIT(cpu_logical_map(cpu) % CORES_PER_EIO_NODE);
74
75         for_each_online_cpu(i) {
76                 node = cpu_to_eio_node(i);
77                 if (!node_isset(node, *node_map))
78                         continue;
79
80                 /* EIO node 0 is in charge of inter-node interrupt dispatch */
81                 route_node = (node == mnode) ? cpu_node : node;
82                 data = ((coremap | (route_node << 4)) << (data_byte * 8));
83                 csr_any_send(EIOINTC_REG_ROUTE + pos_off, data, data_mask, node * CORES_PER_EIO_NODE);
84         }
85 }
86
87 static DEFINE_RAW_SPINLOCK(affinity_lock);
88
89 static int eiointc_set_irq_affinity(struct irq_data *d, const struct cpumask *affinity, bool force)
90 {
91         unsigned int cpu;
92         unsigned long flags;
93         uint32_t vector, regaddr;
94         struct cpumask intersect_affinity;
95         struct eiointc_priv *priv = d->domain->host_data;
96
97         raw_spin_lock_irqsave(&affinity_lock, flags);
98
99         cpumask_and(&intersect_affinity, affinity, cpu_online_mask);
100         cpumask_and(&intersect_affinity, &intersect_affinity, &priv->cpuspan_map);
101
102         if (cpumask_empty(&intersect_affinity)) {
103                 raw_spin_unlock_irqrestore(&affinity_lock, flags);
104                 return -EINVAL;
105         }
106         cpu = cpumask_first(&intersect_affinity);
107
108         vector = d->hwirq;
109         regaddr = EIOINTC_REG_ENABLE + ((vector >> 5) << 2);
110
111         /* Mask target vector */
112         csr_any_send(regaddr, EIOINTC_ALL_ENABLE & (~BIT(vector & 0x1F)),
113                         0x0, priv->node * CORES_PER_EIO_NODE);
114
115         /* Set route for target vector */
116         eiointc_set_irq_route(vector, cpu, priv->node, &priv->node_map);
117
118         /* Unmask target vector */
119         csr_any_send(regaddr, EIOINTC_ALL_ENABLE,
120                         0x0, priv->node * CORES_PER_EIO_NODE);
121
122         irq_data_update_effective_affinity(d, cpumask_of(cpu));
123
124         raw_spin_unlock_irqrestore(&affinity_lock, flags);
125
126         return IRQ_SET_MASK_OK;
127 }
128
129 static int eiointc_index(int node)
130 {
131         int i;
132
133         for (i = 0; i < nr_pics; i++) {
134                 if (node_isset(node, eiointc_priv[i]->node_map))
135                         return i;
136         }
137
138         return -1;
139 }
140
141 static int eiointc_router_init(unsigned int cpu)
142 {
143         int i, bit;
144         uint32_t data;
145         uint32_t node = cpu_to_eio_node(cpu);
146         uint32_t index = eiointc_index(node);
147
148         if (index < 0) {
149                 pr_err("Error: invalid nodemap!\n");
150                 return -1;
151         }
152
153         if ((cpu_logical_map(cpu) % CORES_PER_EIO_NODE) == 0) {
154                 eiointc_enable();
155
156                 for (i = 0; i < VEC_COUNT / 32; i++) {
157                         data = (((1 << (i * 2 + 1)) << 16) | (1 << (i * 2)));
158                         iocsr_write32(data, EIOINTC_REG_NODEMAP + i * 4);
159                 }
160
161                 for (i = 0; i < VEC_COUNT / 32 / 4; i++) {
162                         bit = BIT(1 + index); /* Route to IP[1 + index] */
163                         data = bit | (bit << 8) | (bit << 16) | (bit << 24);
164                         iocsr_write32(data, EIOINTC_REG_IPMAP + i * 4);
165                 }
166
167                 for (i = 0; i < VEC_COUNT / 4; i++) {
168                         /* Route to Node-0 Core-0 */
169                         if (index == 0)
170                                 bit = BIT(cpu_logical_map(0));
171                         else
172                                 bit = (eiointc_priv[index]->node << 4) | 1;
173
174                         data = bit | (bit << 8) | (bit << 16) | (bit << 24);
175                         iocsr_write32(data, EIOINTC_REG_ROUTE + i * 4);
176                 }
177
178                 for (i = 0; i < VEC_COUNT / 32; i++) {
179                         data = 0xffffffff;
180                         iocsr_write32(data, EIOINTC_REG_ENABLE + i * 4);
181                         iocsr_write32(data, EIOINTC_REG_BOUNCE + i * 4);
182                 }
183         }
184
185         return 0;
186 }
187
188 static void eiointc_irq_dispatch(struct irq_desc *desc)
189 {
190         int i;
191         u64 pending;
192         bool handled = false;
193         struct irq_chip *chip = irq_desc_get_chip(desc);
194         struct eiointc_priv *priv = irq_desc_get_handler_data(desc);
195
196         chained_irq_enter(chip, desc);
197
198         for (i = 0; i < VEC_REG_COUNT; i++) {
199                 pending = iocsr_read64(EIOINTC_REG_ISR + (i << 3));
200                 iocsr_write64(pending, EIOINTC_REG_ISR + (i << 3));
201                 while (pending) {
202                         int bit = __ffs(pending);
203                         int irq = bit + VEC_COUNT_PER_REG * i;
204
205                         generic_handle_domain_irq(priv->eiointc_domain, irq);
206                         pending &= ~BIT(bit);
207                         handled = true;
208                 }
209         }
210
211         if (!handled)
212                 spurious_interrupt();
213
214         chained_irq_exit(chip, desc);
215 }
216
217 static void eiointc_ack_irq(struct irq_data *d)
218 {
219 }
220
221 static void eiointc_mask_irq(struct irq_data *d)
222 {
223 }
224
225 static void eiointc_unmask_irq(struct irq_data *d)
226 {
227 }
228
229 static struct irq_chip eiointc_irq_chip = {
230         .name                   = "EIOINTC",
231         .irq_ack                = eiointc_ack_irq,
232         .irq_mask               = eiointc_mask_irq,
233         .irq_unmask             = eiointc_unmask_irq,
234         .irq_set_affinity       = eiointc_set_irq_affinity,
235 };
236
237 static int eiointc_domain_alloc(struct irq_domain *domain, unsigned int virq,
238                                 unsigned int nr_irqs, void *arg)
239 {
240         int ret;
241         unsigned int i, type;
242         unsigned long hwirq = 0;
243         struct eiointc *priv = domain->host_data;
244
245         ret = irq_domain_translate_onecell(domain, arg, &hwirq, &type);
246         if (ret)
247                 return ret;
248
249         for (i = 0; i < nr_irqs; i++) {
250                 irq_domain_set_info(domain, virq + i, hwirq + i, &eiointc_irq_chip,
251                                         priv, handle_edge_irq, NULL, NULL);
252         }
253
254         return 0;
255 }
256
257 static void eiointc_domain_free(struct irq_domain *domain, unsigned int virq,
258                                 unsigned int nr_irqs)
259 {
260         int i;
261
262         for (i = 0; i < nr_irqs; i++) {
263                 struct irq_data *d = irq_domain_get_irq_data(domain, virq + i);
264
265                 irq_set_handler(virq + i, NULL);
266                 irq_domain_reset_irq_data(d);
267         }
268 }
269
270 static const struct irq_domain_ops eiointc_domain_ops = {
271         .translate      = irq_domain_translate_onecell,
272         .alloc          = eiointc_domain_alloc,
273         .free           = eiointc_domain_free,
274 };
275
276 static void acpi_set_vec_parent(int node, struct irq_domain *parent, struct acpi_vector_group *vec_group)
277 {
278         int i;
279
280         for (i = 0; i < MAX_IO_PICS; i++) {
281                 if (node == vec_group[i].node) {
282                         vec_group[i].parent = parent;
283                         return;
284                 }
285         }
286 }
287
288 static struct irq_domain *acpi_get_vec_parent(int node, struct acpi_vector_group *vec_group)
289 {
290         int i;
291
292         for (i = 0; i < MAX_IO_PICS; i++) {
293                 if (node == vec_group[i].node)
294                         return vec_group[i].parent;
295         }
296         return NULL;
297 }
298
299 static int eiointc_suspend(void)
300 {
301         return 0;
302 }
303
304 static void eiointc_resume(void)
305 {
306         int i, j;
307         struct irq_desc *desc;
308         struct irq_data *irq_data;
309
310         eiointc_router_init(0);
311
312         for (i = 0; i < nr_pics; i++) {
313                 for (j = 0; j < VEC_COUNT; j++) {
314                         desc = irq_resolve_mapping(eiointc_priv[i]->eiointc_domain, j);
315                         if (desc && desc->handle_irq && desc->handle_irq != handle_bad_irq) {
316                                 raw_spin_lock(&desc->lock);
317                                 irq_data = &desc->irq_data;
318                                 eiointc_set_irq_affinity(irq_data, irq_data->common->affinity, 0);
319                                 raw_spin_unlock(&desc->lock);
320                         }
321                 }
322         }
323 }
324
325 static struct syscore_ops eiointc_syscore_ops = {
326         .suspend = eiointc_suspend,
327         .resume = eiointc_resume,
328 };
329
330 static int __init pch_pic_parse_madt(union acpi_subtable_headers *header,
331                                         const unsigned long end)
332 {
333         struct acpi_madt_bio_pic *pchpic_entry = (struct acpi_madt_bio_pic *)header;
334         unsigned int node = (pchpic_entry->address >> 44) & 0xf;
335         struct irq_domain *parent = acpi_get_vec_parent(node, pch_group);
336
337         if (parent)
338                 return pch_pic_acpi_init(parent, pchpic_entry);
339
340         return 0;
341 }
342
343 static int __init pch_msi_parse_madt(union acpi_subtable_headers *header,
344                                         const unsigned long end)
345 {
346         struct irq_domain *parent;
347         struct acpi_madt_msi_pic *pchmsi_entry = (struct acpi_madt_msi_pic *)header;
348         int node;
349
350         if (cpu_has_flatmode)
351                 node = cpu_to_node(eiointc_priv[nr_pics - 1]->node * CORES_PER_EIO_NODE);
352         else
353                 node = eiointc_priv[nr_pics - 1]->node;
354
355         parent = acpi_get_vec_parent(node, msi_group);
356
357         if (parent)
358                 return pch_msi_acpi_init(parent, pchmsi_entry);
359
360         return 0;
361 }
362
363 static int __init acpi_cascade_irqdomain_init(void)
364 {
365         int r;
366
367         r = acpi_table_parse_madt(ACPI_MADT_TYPE_BIO_PIC, pch_pic_parse_madt, 0);
368         if (r < 0)
369                 return r;
370
371         r = acpi_table_parse_madt(ACPI_MADT_TYPE_MSI_PIC, pch_msi_parse_madt, 1);
372         if (r < 0)
373                 return r;
374
375         return 0;
376 }
377
378 int __init eiointc_acpi_init(struct irq_domain *parent,
379                                      struct acpi_madt_eio_pic *acpi_eiointc)
380 {
381         int i, ret, parent_irq;
382         unsigned long node_map;
383         struct eiointc_priv *priv;
384         int node;
385
386         priv = kzalloc(sizeof(*priv), GFP_KERNEL);
387         if (!priv)
388                 return -ENOMEM;
389
390         priv->domain_handle = irq_domain_alloc_named_id_fwnode("EIOPIC",
391                                                                acpi_eiointc->node);
392         if (!priv->domain_handle) {
393                 pr_err("Unable to allocate domain handle\n");
394                 goto out_free_priv;
395         }
396
397         priv->node = acpi_eiointc->node;
398         node_map = acpi_eiointc->node_map ? : -1ULL;
399
400         for_each_possible_cpu(i) {
401                 if (node_map & (1ULL << cpu_to_eio_node(i))) {
402                         node_set(cpu_to_eio_node(i), priv->node_map);
403                         cpumask_or(&priv->cpuspan_map, &priv->cpuspan_map, cpumask_of(i));
404                 }
405         }
406
407         /* Setup IRQ domain */
408         priv->eiointc_domain = irq_domain_create_linear(priv->domain_handle, VEC_COUNT,
409                                         &eiointc_domain_ops, priv);
410         if (!priv->eiointc_domain) {
411                 pr_err("loongson-eiointc: cannot add IRQ domain\n");
412                 goto out_free_handle;
413         }
414
415         eiointc_priv[nr_pics++] = priv;
416
417         eiointc_router_init(0);
418
419         parent_irq = irq_create_mapping(parent, acpi_eiointc->cascade);
420         irq_set_chained_handler_and_data(parent_irq, eiointc_irq_dispatch, priv);
421
422         if (nr_pics == 1) {
423                 register_syscore_ops(&eiointc_syscore_ops);
424                 cpuhp_setup_state_nocalls(CPUHP_AP_IRQ_LOONGARCH_STARTING,
425                                   "irqchip/loongarch/intc:starting",
426                                   eiointc_router_init, NULL);
427         }
428
429         if (cpu_has_flatmode)
430                 node = cpu_to_node(acpi_eiointc->node * CORES_PER_EIO_NODE);
431         else
432                 node = acpi_eiointc->node;
433         acpi_set_vec_parent(node, priv->eiointc_domain, pch_group);
434         acpi_set_vec_parent(node, priv->eiointc_domain, msi_group);
435         ret = acpi_cascade_irqdomain_init();
436
437         return ret;
438
439 out_free_handle:
440         irq_domain_free_fwnode(priv->domain_handle);
441         priv->domain_handle = NULL;
442 out_free_priv:
443         kfree(priv);
444
445         return -ENOMEM;
446 }