Commit | Line | Data |
---|---|---|
d2912cb1 | 1 | // SPDX-License-Identifier: GPL-2.0-only |
82fea5a1 VG |
2 | /* |
3 | * ARC ARConnect (MultiCore IP) support (formerly known as MCIP) | |
4 | * | |
5 | * Copyright (C) 2013 Synopsys, Inc. (www.synopsys.com) | |
82fea5a1 VG |
6 | */ |
7 | ||
8 | #include <linux/smp.h> | |
9 | #include <linux/irq.h> | |
e51d5d02 | 10 | #include <linux/irqchip/chained_irq.h> |
82fea5a1 | 11 | #include <linux/spinlock.h> |
2d7f5c48 | 12 | #include <soc/arc/mcip.h> |
bb143f81 | 13 | #include <asm/irqflags-arcv2.h> |
964cf28f | 14 | #include <asm/setup.h> |
82fea5a1 | 15 | |
82fea5a1 VG |
16 | static DEFINE_RAW_SPINLOCK(mcip_lock); |
17 | ||
3ce0fefc VG |
18 | #ifdef CONFIG_SMP |
19 | ||
20 | static char smp_cpuinfo_buf[128]; | |
21 | ||
07423d00 EP |
22 | /* |
23 | * Set mask to halt GFRC if any online core in SMP cluster is halted. | |
24 | * Only works for ARC HS v3.0+, on earlier versions has no effect. | |
25 | */ | |
26 | static void mcip_update_gfrc_halt_mask(int cpu) | |
27 | { | |
28 | struct bcr_generic gfrc; | |
29 | unsigned long flags; | |
30 | u32 gfrc_halt_mask; | |
31 | ||
32 | READ_BCR(ARC_REG_GFRC_BUILD, gfrc); | |
33 | ||
34 | /* | |
35 | * CMD_GFRC_SET_CORE and CMD_GFRC_READ_CORE commands were added in | |
36 | * GFRC 0x3 version. | |
37 | */ | |
38 | if (gfrc.ver < 0x3) | |
39 | return; | |
40 | ||
41 | raw_spin_lock_irqsave(&mcip_lock, flags); | |
42 | ||
43 | __mcip_cmd(CMD_GFRC_READ_CORE, 0); | |
44 | gfrc_halt_mask = read_aux_reg(ARC_REG_MCIP_READBACK); | |
45 | gfrc_halt_mask |= BIT(cpu); | |
46 | __mcip_cmd_data(CMD_GFRC_SET_CORE, 0, gfrc_halt_mask); | |
47 | ||
48 | raw_spin_unlock_irqrestore(&mcip_lock, flags); | |
49 | } | |
50 | ||
f3205de9 EP |
51 | static void mcip_update_debug_halt_mask(int cpu) |
52 | { | |
53 | u32 mcip_mask = 0; | |
54 | unsigned long flags; | |
55 | ||
56 | raw_spin_lock_irqsave(&mcip_lock, flags); | |
57 | ||
58 | /* | |
59 | * mcip_mask is same for CMD_DEBUG_SET_SELECT and CMD_DEBUG_SET_MASK | |
60 | * commands. So read it once instead of reading both CMD_DEBUG_READ_MASK | |
61 | * and CMD_DEBUG_READ_SELECT. | |
62 | */ | |
63 | __mcip_cmd(CMD_DEBUG_READ_SELECT, 0); | |
64 | mcip_mask = read_aux_reg(ARC_REG_MCIP_READBACK); | |
65 | ||
66 | mcip_mask |= BIT(cpu); | |
67 | ||
68 | __mcip_cmd_data(CMD_DEBUG_SET_SELECT, 0, mcip_mask); | |
69 | /* | |
70 | * Parameter specified halt cause: | |
71 | * STATUS32[H]/actionpoint/breakpoint/self-halt | |
72 | * We choose all of them (0xF). | |
73 | */ | |
74 | __mcip_cmd_data(CMD_DEBUG_SET_MASK, 0xF, mcip_mask); | |
75 | ||
76 | raw_spin_unlock_irqrestore(&mcip_lock, flags); | |
77 | } | |
78 | ||
aa0efcde | 79 | static void mcip_setup_per_cpu(int cpu) |
82fea5a1 | 80 | { |
07423d00 EP |
81 | struct mcip_bcr mp; |
82 | ||
83 | READ_BCR(ARC_REG_MCIP_BCR, mp); | |
84 | ||
82fea5a1 | 85 | smp_ipi_irq_setup(cpu, IPI_IRQ); |
bb143f81 | 86 | smp_ipi_irq_setup(cpu, SOFTIRQ_IRQ); |
07423d00 EP |
87 | |
88 | /* Update GFRC halt mask as new CPU came online */ | |
89 | if (mp.gfrc) | |
90 | mcip_update_gfrc_halt_mask(cpu); | |
f3205de9 EP |
91 | |
92 | /* Update MCIP debug mask as new CPU came online */ | |
93 | if (mp.dbg) | |
94 | mcip_update_debug_halt_mask(cpu); | |
82fea5a1 VG |
95 | } |
96 | ||
97 | static void mcip_ipi_send(int cpu) | |
98 | { | |
99 | unsigned long flags; | |
aa6083ed VG |
100 | int ipi_was_pending; |
101 | ||
bb143f81 VG |
102 | /* ARConnect can only send IPI to others */ |
103 | if (unlikely(cpu == raw_smp_processor_id())) { | |
104 | arc_softirq_trigger(SOFTIRQ_IRQ); | |
105 | return; | |
106 | } | |
107 | ||
3dea30ca VG |
108 | raw_spin_lock_irqsave(&mcip_lock, flags); |
109 | ||
aa6083ed | 110 | /* |
3dea30ca VG |
111 | * If receiver already has a pending interrupt, elide sending this one. |
112 | * Linux cross core calling works well with concurrent IPIs | |
113 | * coalesced into one | |
114 | * see arch/arc/kernel/smp.c: ipi_send_msg_one() | |
aa6083ed | 115 | */ |
3dea30ca VG |
116 | __mcip_cmd(CMD_INTRPT_READ_STATUS, cpu); |
117 | ipi_was_pending = read_aux_reg(ARC_REG_MCIP_READBACK); | |
118 | if (!ipi_was_pending) | |
119 | __mcip_cmd(CMD_INTRPT_GENERATE_IRQ, cpu); | |
aa6083ed | 120 | |
82fea5a1 VG |
121 | raw_spin_unlock_irqrestore(&mcip_lock, flags); |
122 | } | |
123 | ||
124 | static void mcip_ipi_clear(int irq) | |
125 | { | |
aa6083ed | 126 | unsigned int cpu, c; |
82fea5a1 VG |
127 | unsigned long flags; |
128 | ||
bb143f81 VG |
129 | if (unlikely(irq == SOFTIRQ_IRQ)) { |
130 | arc_softirq_clear(irq); | |
131 | return; | |
132 | } | |
133 | ||
82fea5a1 VG |
134 | raw_spin_lock_irqsave(&mcip_lock, flags); |
135 | ||
136 | /* Who sent the IPI */ | |
137 | __mcip_cmd(CMD_INTRPT_CHECK_SOURCE, 0); | |
138 | ||
d73b73f5 | 139 | cpu = read_aux_reg(ARC_REG_MCIP_READBACK); /* 1,2,4,8... */ |
82fea5a1 | 140 | |
aa6083ed VG |
141 | /* |
142 | * In rare case, multiple concurrent IPIs sent to same target can | |
143 | * possibly be coalesced by MCIP into 1 asserted IRQ, so @cpus can be | |
144 | * "vectored" (multiple bits sets) as opposed to typical single bit | |
145 | */ | |
146 | do { | |
147 | c = __ffs(cpu); /* 0,1,2,3 */ | |
148 | __mcip_cmd(CMD_INTRPT_GENERATE_ACK, c); | |
149 | cpu &= ~(1U << c); | |
150 | } while (cpu); | |
82fea5a1 VG |
151 | |
152 | raw_spin_unlock_irqrestore(&mcip_lock, flags); | |
153 | } | |
154 | ||
26b8f996 | 155 | static void mcip_probe_n_setup(void) |
82fea5a1 | 156 | { |
3ce0fefc | 157 | struct mcip_bcr mp; |
82fea5a1 VG |
158 | |
159 | READ_BCR(ARC_REG_MCIP_BCR, mp); | |
160 | ||
161 | sprintf(smp_cpuinfo_buf, | |
517e7610 | 162 | "Extn [SMP]\t: ARConnect (v%d): %d cores with %s%s%s%s\n", |
82fea5a1 VG |
163 | mp.ver, mp.num_cores, |
164 | IS_AVAIL1(mp.ipi, "IPI "), | |
165 | IS_AVAIL1(mp.idu, "IDU "), | |
166 | IS_AVAIL1(mp.dbg, "DEBUG "), | |
d584f0fb | 167 | IS_AVAIL1(mp.gfrc, "GFRC")); |
82fea5a1 | 168 | |
e608b53e | 169 | cpuinfo_arc700[0].extn.gfrc = mp.gfrc; |
82fea5a1 | 170 | } |
eaf0ecc3 | 171 | |
26b8f996 VG |
172 | struct plat_smp_ops plat_smp_ops = { |
173 | .info = smp_cpuinfo_buf, | |
174 | .init_early_smp = mcip_probe_n_setup, | |
b474a023 | 175 | .init_per_cpu = mcip_setup_per_cpu, |
26b8f996 VG |
176 | .ipi_send = mcip_ipi_send, |
177 | .ipi_clear = mcip_ipi_clear, | |
178 | }; | |
179 | ||
3ce0fefc VG |
180 | #endif |
181 | ||
eaf0ecc3 VG |
182 | /*************************************************************************** |
183 | * ARCv2 Interrupt Distribution Unit (IDU) | |
184 | * | |
185 | * Connects external "COMMON" IRQs to core intc, providing: | |
186 | * -dynamic routing (IRQ affinity) | |
187 | * -load balancing (Round Robin interrupt distribution) | |
188 | * -1:N distribution | |
189 | * | |
190 | * It physically resides in the MCIP hw block | |
191 | */ | |
192 | ||
193 | #include <linux/irqchip.h> | |
194 | #include <linux/of.h> | |
195 | #include <linux/of_irq.h> | |
eaf0ecc3 VG |
196 | |
197 | /* | |
198 | * Set the DEST for @cmn_irq to @cpu_mask (1 bit per core) | |
199 | */ | |
200 | static void idu_set_dest(unsigned int cmn_irq, unsigned int cpu_mask) | |
201 | { | |
202 | __mcip_cmd_data(CMD_IDU_SET_DEST, cmn_irq, cpu_mask); | |
203 | } | |
204 | ||
174ae4e9 MJ |
205 | static void idu_set_mode(unsigned int cmn_irq, bool set_lvl, unsigned int lvl, |
206 | bool set_distr, unsigned int distr) | |
eaf0ecc3 VG |
207 | { |
208 | union { | |
209 | unsigned int word; | |
210 | struct { | |
211 | unsigned int distr:2, pad:2, lvl:1, pad2:27; | |
212 | }; | |
213 | } data; | |
214 | ||
174ae4e9 MJ |
215 | data.word = __mcip_cmd_read(CMD_IDU_READ_MODE, cmn_irq); |
216 | if (set_distr) | |
217 | data.distr = distr; | |
218 | if (set_lvl) | |
219 | data.lvl = lvl; | |
eaf0ecc3 VG |
220 | __mcip_cmd_data(CMD_IDU_SET_MODE, cmn_irq, data.word); |
221 | } | |
222 | ||
fc73965e | 223 | static void idu_irq_mask_raw(irq_hw_number_t hwirq) |
eaf0ecc3 VG |
224 | { |
225 | unsigned long flags; | |
226 | ||
227 | raw_spin_lock_irqsave(&mcip_lock, flags); | |
fc73965e | 228 | __mcip_cmd_data(CMD_IDU_SET_MASK, hwirq, 1); |
eaf0ecc3 VG |
229 | raw_spin_unlock_irqrestore(&mcip_lock, flags); |
230 | } | |
231 | ||
fc73965e YK |
232 | static void idu_irq_mask(struct irq_data *data) |
233 | { | |
234 | idu_irq_mask_raw(data->hwirq); | |
235 | } | |
236 | ||
eaf0ecc3 VG |
237 | static void idu_irq_unmask(struct irq_data *data) |
238 | { | |
239 | unsigned long flags; | |
240 | ||
241 | raw_spin_lock_irqsave(&mcip_lock, flags); | |
242 | __mcip_cmd_data(CMD_IDU_SET_MASK, data->hwirq, 0); | |
243 | raw_spin_unlock_irqrestore(&mcip_lock, flags); | |
244 | } | |
245 | ||
174ae4e9 MJ |
246 | static void idu_irq_ack(struct irq_data *data) |
247 | { | |
248 | unsigned long flags; | |
249 | ||
250 | raw_spin_lock_irqsave(&mcip_lock, flags); | |
251 | __mcip_cmd(CMD_IDU_ACK_CIRQ, data->hwirq); | |
252 | raw_spin_unlock_irqrestore(&mcip_lock, flags); | |
253 | } | |
254 | ||
255 | static void idu_irq_mask_ack(struct irq_data *data) | |
256 | { | |
257 | unsigned long flags; | |
258 | ||
259 | raw_spin_lock_irqsave(&mcip_lock, flags); | |
260 | __mcip_cmd_data(CMD_IDU_SET_MASK, data->hwirq, 1); | |
261 | __mcip_cmd(CMD_IDU_ACK_CIRQ, data->hwirq); | |
262 | raw_spin_unlock_irqrestore(&mcip_lock, flags); | |
263 | } | |
264 | ||
eaf0ecc3 | 265 | static int |
83ce3e6f VG |
266 | idu_irq_set_affinity(struct irq_data *data, const struct cpumask *cpumask, |
267 | bool force) | |
eaf0ecc3 | 268 | { |
83ce3e6f VG |
269 | unsigned long flags; |
270 | cpumask_t online; | |
0a0a047d YK |
271 | unsigned int destination_bits; |
272 | unsigned int distribution_mode; | |
83ce3e6f VG |
273 | |
274 | /* errout if no online cpu per @cpumask */ | |
275 | if (!cpumask_and(&online, cpumask, cpu_online_mask)) | |
276 | return -EINVAL; | |
277 | ||
278 | raw_spin_lock_irqsave(&mcip_lock, flags); | |
279 | ||
0a0a047d YK |
280 | destination_bits = cpumask_bits(&online)[0]; |
281 | idu_set_dest(data->hwirq, destination_bits); | |
282 | ||
283 | if (ffs(destination_bits) == fls(destination_bits)) | |
284 | distribution_mode = IDU_M_DISTRI_DEST; | |
285 | else | |
286 | distribution_mode = IDU_M_DISTRI_RR; | |
287 | ||
174ae4e9 | 288 | idu_set_mode(data->hwirq, false, 0, true, distribution_mode); |
83ce3e6f VG |
289 | |
290 | raw_spin_unlock_irqrestore(&mcip_lock, flags); | |
291 | ||
eaf0ecc3 VG |
292 | return IRQ_SET_MASK_OK; |
293 | } | |
92fdb527 | 294 | |
174ae4e9 MJ |
295 | static int idu_irq_set_type(struct irq_data *data, u32 type) |
296 | { | |
297 | unsigned long flags; | |
298 | ||
299 | /* | |
300 | * ARCv2 IDU HW does not support inverse polarity, so these are the | |
301 | * only interrupt types supported. | |
302 | */ | |
303 | if (type & ~(IRQ_TYPE_EDGE_RISING | IRQ_TYPE_LEVEL_HIGH)) | |
304 | return -EINVAL; | |
305 | ||
306 | raw_spin_lock_irqsave(&mcip_lock, flags); | |
307 | ||
308 | idu_set_mode(data->hwirq, true, | |
309 | type & IRQ_TYPE_EDGE_RISING ? IDU_M_TRIG_EDGE : | |
310 | IDU_M_TRIG_LEVEL, | |
311 | false, 0); | |
312 | ||
313 | raw_spin_unlock_irqrestore(&mcip_lock, flags); | |
314 | ||
315 | return 0; | |
316 | } | |
317 | ||
92fdb527 YK |
318 | static void idu_irq_enable(struct irq_data *data) |
319 | { | |
320 | /* | |
321 | * By default send all common interrupts to all available online CPUs. | |
322 | * The affinity of common interrupts in IDU must be set manually since | |
323 | * in some cases the kernel will not call irq_set_affinity() by itself: | |
324 | * 1. When the kernel is not configured with support of SMP. | |
325 | * 2. When the kernel is configured with support of SMP but upper | |
326 | * interrupt controllers does not support setting of the affinity | |
327 | * and cannot propagate it to IDU. | |
328 | */ | |
329 | idu_irq_set_affinity(data, cpu_online_mask, false); | |
330 | idu_irq_unmask(data); | |
331 | } | |
eaf0ecc3 VG |
332 | |
333 | static struct irq_chip idu_irq_chip = { | |
334 | .name = "MCIP IDU Intc", | |
335 | .irq_mask = idu_irq_mask, | |
336 | .irq_unmask = idu_irq_unmask, | |
174ae4e9 MJ |
337 | .irq_ack = idu_irq_ack, |
338 | .irq_mask_ack = idu_irq_mask_ack, | |
92fdb527 | 339 | .irq_enable = idu_irq_enable, |
174ae4e9 | 340 | .irq_set_type = idu_irq_set_type, |
eaf0ecc3 VG |
341 | #ifdef CONFIG_SMP |
342 | .irq_set_affinity = idu_irq_set_affinity, | |
343 | #endif | |
344 | ||
345 | }; | |
346 | ||
bd0b9ac4 | 347 | static void idu_cascade_isr(struct irq_desc *desc) |
eaf0ecc3 | 348 | { |
34e71e4c | 349 | struct irq_domain *idu_domain = irq_desc_get_handler_data(desc); |
e51d5d02 | 350 | struct irq_chip *core_chip = irq_desc_get_chip(desc); |
34e71e4c | 351 | irq_hw_number_t core_hwirq = irqd_to_hwirq(irq_desc_get_irq_data(desc)); |
6f0310a1 | 352 | irq_hw_number_t idu_hwirq = core_hwirq - FIRST_EXT_IRQ; |
eaf0ecc3 | 353 | |
e51d5d02 | 354 | chained_irq_enter(core_chip, desc); |
34e71e4c | 355 | generic_handle_irq(irq_find_mapping(idu_domain, idu_hwirq)); |
e51d5d02 | 356 | chained_irq_exit(core_chip, desc); |
eaf0ecc3 VG |
357 | } |
358 | ||
359 | static int idu_irq_map(struct irq_domain *d, unsigned int virq, irq_hw_number_t hwirq) | |
360 | { | |
361 | irq_set_chip_and_handler(virq, &idu_irq_chip, handle_level_irq); | |
362 | irq_set_status_flags(virq, IRQ_MOVE_PCNTXT); | |
363 | ||
364 | return 0; | |
365 | } | |
366 | ||
eaf0ecc3 | 367 | static const struct irq_domain_ops idu_irq_ops = { |
174ae4e9 | 368 | .xlate = irq_domain_xlate_onetwocell, |
eaf0ecc3 VG |
369 | .map = idu_irq_map, |
370 | }; | |
371 | ||
372 | /* | |
373 | * [16, 23]: Statically assigned always private-per-core (Timers, WDT, IPI) | |
374 | * [24, 23+C]: If C > 0 then "C" common IRQs | |
375 | * [24+C, N]: Not statically assigned, private-per-core | |
376 | */ | |
377 | ||
378 | ||
379 | static int __init | |
380 | idu_of_init(struct device_node *intc, struct device_node *parent) | |
381 | { | |
382 | struct irq_domain *domain; | |
6f0310a1 | 383 | int nr_irqs; |
34e71e4c | 384 | int i, virq; |
3ce0fefc | 385 | struct mcip_bcr mp; |
6f0310a1 | 386 | struct mcip_idu_bcr idu_bcr; |
3ce0fefc VG |
387 | |
388 | READ_BCR(ARC_REG_MCIP_BCR, mp); | |
eaf0ecc3 | 389 | |
3ce0fefc | 390 | if (!mp.idu) |
eaf0ecc3 VG |
391 | panic("IDU not detected, but DeviceTree using it"); |
392 | ||
6f0310a1 YK |
393 | READ_BCR(ARC_REG_MCIP_IDU_BCR, idu_bcr); |
394 | nr_irqs = mcip_idu_bcr_to_nr_irqs(idu_bcr); | |
395 | ||
396 | pr_info("MCIP: IDU supports %u common irqs\n", nr_irqs); | |
eaf0ecc3 VG |
397 | |
398 | domain = irq_domain_add_linear(intc, nr_irqs, &idu_irq_ops, NULL); | |
399 | ||
400 | /* Parent interrupts (core-intc) are already mapped */ | |
401 | ||
402 | for (i = 0; i < nr_irqs; i++) { | |
fc73965e YK |
403 | /* Mask all common interrupts by default */ |
404 | idu_irq_mask_raw(i); | |
405 | ||
eaf0ecc3 VG |
406 | /* |
407 | * Return parent uplink IRQs (towards core intc) 24,25,..... | |
408 | * this step has been done before already | |
409 | * however we need it to get the parent virq and set IDU handler | |
410 | * as first level isr | |
411 | */ | |
6f0310a1 YK |
412 | virq = irq_create_mapping(NULL, i + FIRST_EXT_IRQ); |
413 | BUG_ON(!virq); | |
34e71e4c | 414 | irq_set_chained_handler_and_data(virq, idu_cascade_isr, domain); |
eaf0ecc3 VG |
415 | } |
416 | ||
417 | __mcip_cmd(CMD_IDU_ENABLE, 0); | |
418 | ||
419 | return 0; | |
420 | } | |
421 | IRQCHIP_DECLARE(arcv2_idu_intc, "snps,archs-idu-intc", idu_of_init); |