2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
6 * Copyright (C) 2004-2012 Cavium, Inc.
9 #include <linux/interrupt.h>
10 #include <linux/irqdomain.h>
11 #include <linux/bitops.h>
12 #include <linux/percpu.h>
13 #include <linux/slab.h>
14 #include <linux/irq.h>
15 #include <linux/smp.h>
18 #include <asm/octeon/octeon.h>
19 #include <asm/octeon/cvmx-ciu2-defs.h>
21 static DEFINE_PER_CPU(unsigned long, octeon_irq_ciu0_en_mirror);
22 static DEFINE_PER_CPU(unsigned long, octeon_irq_ciu1_en_mirror);
23 static DEFINE_PER_CPU(raw_spinlock_t, octeon_irq_ciu_spinlock);
25 static __read_mostly u8 octeon_irq_ciu_to_irq[8][64];
27 union octeon_ciu_chip_data {
33 unsigned long gpio_line:6;
37 struct octeon_core_chip_data {
38 struct mutex core_irq_mutex;
44 #define MIPS_CORE_IRQ_LINES 8
46 static struct octeon_core_chip_data octeon_irq_core_chip_data[MIPS_CORE_IRQ_LINES];
48 static void octeon_irq_set_ciu_mapping(int irq, int line, int bit, int gpio_line,
49 struct irq_chip *chip,
50 irq_flow_handler_t handler)
52 union octeon_ciu_chip_data cd;
54 irq_set_chip_and_handler(irq, chip, handler);
59 cd.s.gpio_line = gpio_line;
61 irq_set_chip_data(irq, cd.p);
62 octeon_irq_ciu_to_irq[line][bit] = irq;
65 static void octeon_irq_force_ciu_mapping(struct irq_domain *domain,
66 int irq, int line, int bit)
68 irq_domain_associate(domain, irq, line << 6 | bit);
71 static int octeon_coreid_for_cpu(int cpu)
74 return cpu_logical_map(cpu);
76 return cvmx_get_core_num();
80 static int octeon_cpu_for_coreid(int coreid)
83 return cpu_number_map(coreid);
85 return smp_processor_id();
89 static void octeon_irq_core_ack(struct irq_data *data)
91 struct octeon_core_chip_data *cd = irq_data_get_irq_chip_data(data);
92 unsigned int bit = cd->bit;
95 * We don't need to disable IRQs to make these atomic since
96 * they are already disabled earlier in the low level
99 clear_c0_status(0x100 << bit);
100 /* The two user interrupts must be cleared manually. */
102 clear_c0_cause(0x100 << bit);
105 static void octeon_irq_core_eoi(struct irq_data *data)
107 struct octeon_core_chip_data *cd = irq_data_get_irq_chip_data(data);
110 * We don't need to disable IRQs to make these atomic since
111 * they are already disabled earlier in the low level
114 set_c0_status(0x100 << cd->bit);
117 static void octeon_irq_core_set_enable_local(void *arg)
119 struct irq_data *data = arg;
120 struct octeon_core_chip_data *cd = irq_data_get_irq_chip_data(data);
121 unsigned int mask = 0x100 << cd->bit;
124 * Interrupts are already disabled, so these are atomic.
129 clear_c0_status(mask);
133 static void octeon_irq_core_disable(struct irq_data *data)
135 struct octeon_core_chip_data *cd = irq_data_get_irq_chip_data(data);
136 cd->desired_en = false;
139 static void octeon_irq_core_enable(struct irq_data *data)
141 struct octeon_core_chip_data *cd = irq_data_get_irq_chip_data(data);
142 cd->desired_en = true;
145 static void octeon_irq_core_bus_lock(struct irq_data *data)
147 struct octeon_core_chip_data *cd = irq_data_get_irq_chip_data(data);
149 mutex_lock(&cd->core_irq_mutex);
152 static void octeon_irq_core_bus_sync_unlock(struct irq_data *data)
154 struct octeon_core_chip_data *cd = irq_data_get_irq_chip_data(data);
156 if (cd->desired_en != cd->current_en) {
157 on_each_cpu(octeon_irq_core_set_enable_local, data, 1);
159 cd->current_en = cd->desired_en;
162 mutex_unlock(&cd->core_irq_mutex);
165 static struct irq_chip octeon_irq_chip_core = {
167 .irq_enable = octeon_irq_core_enable,
168 .irq_disable = octeon_irq_core_disable,
169 .irq_ack = octeon_irq_core_ack,
170 .irq_eoi = octeon_irq_core_eoi,
171 .irq_bus_lock = octeon_irq_core_bus_lock,
172 .irq_bus_sync_unlock = octeon_irq_core_bus_sync_unlock,
174 .irq_cpu_online = octeon_irq_core_eoi,
175 .irq_cpu_offline = octeon_irq_core_ack,
176 .flags = IRQCHIP_ONOFFLINE_ENABLED,
179 static void __init octeon_irq_init_core(void)
183 struct octeon_core_chip_data *cd;
185 for (i = 0; i < MIPS_CORE_IRQ_LINES; i++) {
186 cd = &octeon_irq_core_chip_data[i];
187 cd->current_en = false;
188 cd->desired_en = false;
190 mutex_init(&cd->core_irq_mutex);
192 irq = OCTEON_IRQ_SW0 + i;
193 irq_set_chip_data(irq, cd);
194 irq_set_chip_and_handler(irq, &octeon_irq_chip_core,
199 static int next_cpu_for_irq(struct irq_data *data)
204 int weight = cpumask_weight(data->affinity);
207 cpu = smp_processor_id();
209 cpu = cpumask_next(cpu, data->affinity);
210 if (cpu >= nr_cpu_ids) {
213 } else if (cpumask_test_cpu(cpu, cpu_online_mask)) {
217 } else if (weight == 1) {
218 cpu = cpumask_first(data->affinity);
220 cpu = smp_processor_id();
224 return smp_processor_id();
228 static void octeon_irq_ciu_enable(struct irq_data *data)
230 int cpu = next_cpu_for_irq(data);
231 int coreid = octeon_coreid_for_cpu(cpu);
234 union octeon_ciu_chip_data cd;
235 raw_spinlock_t *lock = &per_cpu(octeon_irq_ciu_spinlock, cpu);
237 cd.p = irq_data_get_irq_chip_data(data);
239 raw_spin_lock_irqsave(lock, flags);
240 if (cd.s.line == 0) {
241 pen = &per_cpu(octeon_irq_ciu0_en_mirror, cpu);
242 __set_bit(cd.s.bit, pen);
244 * Must be visible to octeon_irq_ip{2,3}_ciu() before
248 cvmx_write_csr(CVMX_CIU_INTX_EN0(coreid * 2), *pen);
250 pen = &per_cpu(octeon_irq_ciu1_en_mirror, cpu);
251 __set_bit(cd.s.bit, pen);
253 * Must be visible to octeon_irq_ip{2,3}_ciu() before
257 cvmx_write_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1), *pen);
259 raw_spin_unlock_irqrestore(lock, flags);
262 static void octeon_irq_ciu_enable_local(struct irq_data *data)
266 union octeon_ciu_chip_data cd;
267 raw_spinlock_t *lock = &__get_cpu_var(octeon_irq_ciu_spinlock);
269 cd.p = irq_data_get_irq_chip_data(data);
271 raw_spin_lock_irqsave(lock, flags);
272 if (cd.s.line == 0) {
273 pen = &__get_cpu_var(octeon_irq_ciu0_en_mirror);
274 __set_bit(cd.s.bit, pen);
276 * Must be visible to octeon_irq_ip{2,3}_ciu() before
280 cvmx_write_csr(CVMX_CIU_INTX_EN0(cvmx_get_core_num() * 2), *pen);
282 pen = &__get_cpu_var(octeon_irq_ciu1_en_mirror);
283 __set_bit(cd.s.bit, pen);
285 * Must be visible to octeon_irq_ip{2,3}_ciu() before
289 cvmx_write_csr(CVMX_CIU_INTX_EN1(cvmx_get_core_num() * 2 + 1), *pen);
291 raw_spin_unlock_irqrestore(lock, flags);
294 static void octeon_irq_ciu_disable_local(struct irq_data *data)
298 union octeon_ciu_chip_data cd;
299 raw_spinlock_t *lock = &__get_cpu_var(octeon_irq_ciu_spinlock);
301 cd.p = irq_data_get_irq_chip_data(data);
303 raw_spin_lock_irqsave(lock, flags);
304 if (cd.s.line == 0) {
305 pen = &__get_cpu_var(octeon_irq_ciu0_en_mirror);
306 __clear_bit(cd.s.bit, pen);
308 * Must be visible to octeon_irq_ip{2,3}_ciu() before
312 cvmx_write_csr(CVMX_CIU_INTX_EN0(cvmx_get_core_num() * 2), *pen);
314 pen = &__get_cpu_var(octeon_irq_ciu1_en_mirror);
315 __clear_bit(cd.s.bit, pen);
317 * Must be visible to octeon_irq_ip{2,3}_ciu() before
321 cvmx_write_csr(CVMX_CIU_INTX_EN1(cvmx_get_core_num() * 2 + 1), *pen);
323 raw_spin_unlock_irqrestore(lock, flags);
326 static void octeon_irq_ciu_disable_all(struct irq_data *data)
331 union octeon_ciu_chip_data cd;
332 raw_spinlock_t *lock;
334 cd.p = irq_data_get_irq_chip_data(data);
336 for_each_online_cpu(cpu) {
337 int coreid = octeon_coreid_for_cpu(cpu);
338 lock = &per_cpu(octeon_irq_ciu_spinlock, cpu);
340 pen = &per_cpu(octeon_irq_ciu0_en_mirror, cpu);
342 pen = &per_cpu(octeon_irq_ciu1_en_mirror, cpu);
344 raw_spin_lock_irqsave(lock, flags);
345 __clear_bit(cd.s.bit, pen);
347 * Must be visible to octeon_irq_ip{2,3}_ciu() before
352 cvmx_write_csr(CVMX_CIU_INTX_EN0(coreid * 2), *pen);
354 cvmx_write_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1), *pen);
355 raw_spin_unlock_irqrestore(lock, flags);
359 static void octeon_irq_ciu_enable_all(struct irq_data *data)
364 union octeon_ciu_chip_data cd;
365 raw_spinlock_t *lock;
367 cd.p = irq_data_get_irq_chip_data(data);
369 for_each_online_cpu(cpu) {
370 int coreid = octeon_coreid_for_cpu(cpu);
371 lock = &per_cpu(octeon_irq_ciu_spinlock, cpu);
373 pen = &per_cpu(octeon_irq_ciu0_en_mirror, cpu);
375 pen = &per_cpu(octeon_irq_ciu1_en_mirror, cpu);
377 raw_spin_lock_irqsave(lock, flags);
378 __set_bit(cd.s.bit, pen);
380 * Must be visible to octeon_irq_ip{2,3}_ciu() before
385 cvmx_write_csr(CVMX_CIU_INTX_EN0(coreid * 2), *pen);
387 cvmx_write_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1), *pen);
388 raw_spin_unlock_irqrestore(lock, flags);
393 * Enable the irq on the next core in the affinity set for chips that
394 * have the EN*_W1{S,C} registers.
396 static void octeon_irq_ciu_enable_v2(struct irq_data *data)
399 int cpu = next_cpu_for_irq(data);
400 union octeon_ciu_chip_data cd;
402 cd.p = irq_data_get_irq_chip_data(data);
403 mask = 1ull << (cd.s.bit);
406 * Called under the desc lock, so these should never get out
409 if (cd.s.line == 0) {
410 int index = octeon_coreid_for_cpu(cpu) * 2;
411 set_bit(cd.s.bit, &per_cpu(octeon_irq_ciu0_en_mirror, cpu));
412 cvmx_write_csr(CVMX_CIU_INTX_EN0_W1S(index), mask);
414 int index = octeon_coreid_for_cpu(cpu) * 2 + 1;
415 set_bit(cd.s.bit, &per_cpu(octeon_irq_ciu1_en_mirror, cpu));
416 cvmx_write_csr(CVMX_CIU_INTX_EN1_W1S(index), mask);
421 * Enable the irq on the current CPU for chips that
422 * have the EN*_W1{S,C} registers.
424 static void octeon_irq_ciu_enable_local_v2(struct irq_data *data)
427 union octeon_ciu_chip_data cd;
429 cd.p = irq_data_get_irq_chip_data(data);
430 mask = 1ull << (cd.s.bit);
432 if (cd.s.line == 0) {
433 int index = cvmx_get_core_num() * 2;
434 set_bit(cd.s.bit, &__get_cpu_var(octeon_irq_ciu0_en_mirror));
435 cvmx_write_csr(CVMX_CIU_INTX_EN0_W1S(index), mask);
437 int index = cvmx_get_core_num() * 2 + 1;
438 set_bit(cd.s.bit, &__get_cpu_var(octeon_irq_ciu1_en_mirror));
439 cvmx_write_csr(CVMX_CIU_INTX_EN1_W1S(index), mask);
443 static void octeon_irq_ciu_disable_local_v2(struct irq_data *data)
446 union octeon_ciu_chip_data cd;
448 cd.p = irq_data_get_irq_chip_data(data);
449 mask = 1ull << (cd.s.bit);
451 if (cd.s.line == 0) {
452 int index = cvmx_get_core_num() * 2;
453 clear_bit(cd.s.bit, &__get_cpu_var(octeon_irq_ciu0_en_mirror));
454 cvmx_write_csr(CVMX_CIU_INTX_EN0_W1C(index), mask);
456 int index = cvmx_get_core_num() * 2 + 1;
457 clear_bit(cd.s.bit, &__get_cpu_var(octeon_irq_ciu1_en_mirror));
458 cvmx_write_csr(CVMX_CIU_INTX_EN1_W1C(index), mask);
463 * Write to the W1C bit in CVMX_CIU_INTX_SUM0 to clear the irq.
465 static void octeon_irq_ciu_ack(struct irq_data *data)
468 union octeon_ciu_chip_data cd;
470 cd.p = irq_data_get_irq_chip_data(data);
471 mask = 1ull << (cd.s.bit);
473 if (cd.s.line == 0) {
474 int index = cvmx_get_core_num() * 2;
475 cvmx_write_csr(CVMX_CIU_INTX_SUM0(index), mask);
477 cvmx_write_csr(CVMX_CIU_INT_SUM1, mask);
482 * Disable the irq on the all cores for chips that have the EN*_W1{S,C}
485 static void octeon_irq_ciu_disable_all_v2(struct irq_data *data)
489 union octeon_ciu_chip_data cd;
491 cd.p = irq_data_get_irq_chip_data(data);
492 mask = 1ull << (cd.s.bit);
494 if (cd.s.line == 0) {
495 for_each_online_cpu(cpu) {
496 int index = octeon_coreid_for_cpu(cpu) * 2;
497 clear_bit(cd.s.bit, &per_cpu(octeon_irq_ciu0_en_mirror, cpu));
498 cvmx_write_csr(CVMX_CIU_INTX_EN0_W1C(index), mask);
501 for_each_online_cpu(cpu) {
502 int index = octeon_coreid_for_cpu(cpu) * 2 + 1;
503 clear_bit(cd.s.bit, &per_cpu(octeon_irq_ciu1_en_mirror, cpu));
504 cvmx_write_csr(CVMX_CIU_INTX_EN1_W1C(index), mask);
510 * Enable the irq on the all cores for chips that have the EN*_W1{S,C}
513 static void octeon_irq_ciu_enable_all_v2(struct irq_data *data)
517 union octeon_ciu_chip_data cd;
519 cd.p = irq_data_get_irq_chip_data(data);
520 mask = 1ull << (cd.s.bit);
522 if (cd.s.line == 0) {
523 for_each_online_cpu(cpu) {
524 int index = octeon_coreid_for_cpu(cpu) * 2;
525 set_bit(cd.s.bit, &per_cpu(octeon_irq_ciu0_en_mirror, cpu));
526 cvmx_write_csr(CVMX_CIU_INTX_EN0_W1S(index), mask);
529 for_each_online_cpu(cpu) {
530 int index = octeon_coreid_for_cpu(cpu) * 2 + 1;
531 set_bit(cd.s.bit, &per_cpu(octeon_irq_ciu1_en_mirror, cpu));
532 cvmx_write_csr(CVMX_CIU_INTX_EN1_W1S(index), mask);
537 static void octeon_irq_gpio_setup(struct irq_data *data)
539 union cvmx_gpio_bit_cfgx cfg;
540 union octeon_ciu_chip_data cd;
541 u32 t = irqd_get_trigger_type(data);
543 cd.p = irq_data_get_irq_chip_data(data);
547 cfg.s.int_type = (t & IRQ_TYPE_EDGE_BOTH) != 0;
548 cfg.s.rx_xor = (t & (IRQ_TYPE_LEVEL_LOW | IRQ_TYPE_EDGE_FALLING)) != 0;
550 /* 140 nS glitch filter*/
554 cvmx_write_csr(CVMX_GPIO_BIT_CFGX(cd.s.gpio_line), cfg.u64);
557 static void octeon_irq_ciu_enable_gpio_v2(struct irq_data *data)
559 octeon_irq_gpio_setup(data);
560 octeon_irq_ciu_enable_v2(data);
563 static void octeon_irq_ciu_enable_gpio(struct irq_data *data)
565 octeon_irq_gpio_setup(data);
566 octeon_irq_ciu_enable(data);
569 static int octeon_irq_ciu_gpio_set_type(struct irq_data *data, unsigned int t)
571 irqd_set_trigger_type(data, t);
572 octeon_irq_gpio_setup(data);
574 return IRQ_SET_MASK_OK;
577 static void octeon_irq_ciu_disable_gpio_v2(struct irq_data *data)
579 union octeon_ciu_chip_data cd;
581 cd.p = irq_data_get_irq_chip_data(data);
582 cvmx_write_csr(CVMX_GPIO_BIT_CFGX(cd.s.gpio_line), 0);
584 octeon_irq_ciu_disable_all_v2(data);
587 static void octeon_irq_ciu_disable_gpio(struct irq_data *data)
589 union octeon_ciu_chip_data cd;
591 cd.p = irq_data_get_irq_chip_data(data);
592 cvmx_write_csr(CVMX_GPIO_BIT_CFGX(cd.s.gpio_line), 0);
594 octeon_irq_ciu_disable_all(data);
597 static void octeon_irq_ciu_gpio_ack(struct irq_data *data)
599 union octeon_ciu_chip_data cd;
602 cd.p = irq_data_get_irq_chip_data(data);
603 mask = 1ull << (cd.s.gpio_line);
605 cvmx_write_csr(CVMX_GPIO_INT_CLR, mask);
608 static void octeon_irq_handle_gpio(unsigned int irq, struct irq_desc *desc)
610 if (irqd_get_trigger_type(irq_desc_get_irq_data(desc)) & IRQ_TYPE_EDGE_BOTH)
611 handle_edge_irq(irq, desc);
613 handle_level_irq(irq, desc);
618 static void octeon_irq_cpu_offline_ciu(struct irq_data *data)
620 int cpu = smp_processor_id();
621 cpumask_t new_affinity;
623 if (!cpumask_test_cpu(cpu, data->affinity))
626 if (cpumask_weight(data->affinity) > 1) {
628 * It has multi CPU affinity, just remove this CPU
629 * from the affinity set.
631 cpumask_copy(&new_affinity, data->affinity);
632 cpumask_clear_cpu(cpu, &new_affinity);
634 /* Otherwise, put it on lowest numbered online CPU. */
635 cpumask_clear(&new_affinity);
636 cpumask_set_cpu(cpumask_first(cpu_online_mask), &new_affinity);
638 __irq_set_affinity_locked(data, &new_affinity);
641 static int octeon_irq_ciu_set_affinity(struct irq_data *data,
642 const struct cpumask *dest, bool force)
645 bool enable_one = !irqd_irq_disabled(data) && !irqd_irq_masked(data);
647 union octeon_ciu_chip_data cd;
649 raw_spinlock_t *lock;
651 cd.p = irq_data_get_irq_chip_data(data);
654 * For non-v2 CIU, we will allow only single CPU affinity.
655 * This removes the need to do locking in the .ack/.eoi
658 if (cpumask_weight(dest) != 1)
665 for_each_online_cpu(cpu) {
666 int coreid = octeon_coreid_for_cpu(cpu);
668 lock = &per_cpu(octeon_irq_ciu_spinlock, cpu);
669 raw_spin_lock_irqsave(lock, flags);
672 pen = &per_cpu(octeon_irq_ciu0_en_mirror, cpu);
674 pen = &per_cpu(octeon_irq_ciu1_en_mirror, cpu);
676 if (cpumask_test_cpu(cpu, dest) && enable_one) {
678 __set_bit(cd.s.bit, pen);
680 __clear_bit(cd.s.bit, pen);
683 * Must be visible to octeon_irq_ip{2,3}_ciu() before
689 cvmx_write_csr(CVMX_CIU_INTX_EN0(coreid * 2), *pen);
691 cvmx_write_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1), *pen);
693 raw_spin_unlock_irqrestore(lock, flags);
699 * Set affinity for the irq for chips that have the EN*_W1{S,C}
702 static int octeon_irq_ciu_set_affinity_v2(struct irq_data *data,
703 const struct cpumask *dest,
707 bool enable_one = !irqd_irq_disabled(data) && !irqd_irq_masked(data);
709 union octeon_ciu_chip_data cd;
714 cd.p = irq_data_get_irq_chip_data(data);
715 mask = 1ull << cd.s.bit;
717 if (cd.s.line == 0) {
718 for_each_online_cpu(cpu) {
719 unsigned long *pen = &per_cpu(octeon_irq_ciu0_en_mirror, cpu);
720 int index = octeon_coreid_for_cpu(cpu) * 2;
721 if (cpumask_test_cpu(cpu, dest) && enable_one) {
723 set_bit(cd.s.bit, pen);
724 cvmx_write_csr(CVMX_CIU_INTX_EN0_W1S(index), mask);
726 clear_bit(cd.s.bit, pen);
727 cvmx_write_csr(CVMX_CIU_INTX_EN0_W1C(index), mask);
731 for_each_online_cpu(cpu) {
732 unsigned long *pen = &per_cpu(octeon_irq_ciu1_en_mirror, cpu);
733 int index = octeon_coreid_for_cpu(cpu) * 2 + 1;
734 if (cpumask_test_cpu(cpu, dest) && enable_one) {
736 set_bit(cd.s.bit, pen);
737 cvmx_write_csr(CVMX_CIU_INTX_EN1_W1S(index), mask);
739 clear_bit(cd.s.bit, pen);
740 cvmx_write_csr(CVMX_CIU_INTX_EN1_W1C(index), mask);
749 * Newer octeon chips have support for lockless CIU operation.
751 static struct irq_chip octeon_irq_chip_ciu_v2 = {
753 .irq_enable = octeon_irq_ciu_enable_v2,
754 .irq_disable = octeon_irq_ciu_disable_all_v2,
755 .irq_ack = octeon_irq_ciu_ack,
756 .irq_mask = octeon_irq_ciu_disable_local_v2,
757 .irq_unmask = octeon_irq_ciu_enable_v2,
759 .irq_set_affinity = octeon_irq_ciu_set_affinity_v2,
760 .irq_cpu_offline = octeon_irq_cpu_offline_ciu,
764 static struct irq_chip octeon_irq_chip_ciu = {
766 .irq_enable = octeon_irq_ciu_enable,
767 .irq_disable = octeon_irq_ciu_disable_all,
768 .irq_ack = octeon_irq_ciu_ack,
769 .irq_mask = octeon_irq_ciu_disable_local,
770 .irq_unmask = octeon_irq_ciu_enable,
772 .irq_set_affinity = octeon_irq_ciu_set_affinity,
773 .irq_cpu_offline = octeon_irq_cpu_offline_ciu,
777 /* The mbox versions don't do any affinity or round-robin. */
778 static struct irq_chip octeon_irq_chip_ciu_mbox_v2 = {
780 .irq_enable = octeon_irq_ciu_enable_all_v2,
781 .irq_disable = octeon_irq_ciu_disable_all_v2,
782 .irq_ack = octeon_irq_ciu_disable_local_v2,
783 .irq_eoi = octeon_irq_ciu_enable_local_v2,
785 .irq_cpu_online = octeon_irq_ciu_enable_local_v2,
786 .irq_cpu_offline = octeon_irq_ciu_disable_local_v2,
787 .flags = IRQCHIP_ONOFFLINE_ENABLED,
790 static struct irq_chip octeon_irq_chip_ciu_mbox = {
792 .irq_enable = octeon_irq_ciu_enable_all,
793 .irq_disable = octeon_irq_ciu_disable_all,
794 .irq_ack = octeon_irq_ciu_disable_local,
795 .irq_eoi = octeon_irq_ciu_enable_local,
797 .irq_cpu_online = octeon_irq_ciu_enable_local,
798 .irq_cpu_offline = octeon_irq_ciu_disable_local,
799 .flags = IRQCHIP_ONOFFLINE_ENABLED,
802 static struct irq_chip octeon_irq_chip_ciu_gpio_v2 = {
804 .irq_enable = octeon_irq_ciu_enable_gpio_v2,
805 .irq_disable = octeon_irq_ciu_disable_gpio_v2,
806 .irq_ack = octeon_irq_ciu_gpio_ack,
807 .irq_mask = octeon_irq_ciu_disable_local_v2,
808 .irq_unmask = octeon_irq_ciu_enable_v2,
809 .irq_set_type = octeon_irq_ciu_gpio_set_type,
811 .irq_set_affinity = octeon_irq_ciu_set_affinity_v2,
813 .flags = IRQCHIP_SET_TYPE_MASKED,
816 static struct irq_chip octeon_irq_chip_ciu_gpio = {
818 .irq_enable = octeon_irq_ciu_enable_gpio,
819 .irq_disable = octeon_irq_ciu_disable_gpio,
820 .irq_mask = octeon_irq_ciu_disable_local,
821 .irq_unmask = octeon_irq_ciu_enable,
822 .irq_ack = octeon_irq_ciu_gpio_ack,
823 .irq_set_type = octeon_irq_ciu_gpio_set_type,
825 .irq_set_affinity = octeon_irq_ciu_set_affinity,
827 .flags = IRQCHIP_SET_TYPE_MASKED,
831 * Watchdog interrupts are special. They are associated with a single
832 * core, so we hardwire the affinity to that core.
834 static void octeon_irq_ciu_wd_enable(struct irq_data *data)
838 int coreid = data->irq - OCTEON_IRQ_WDOG0; /* Bit 0-63 of EN1 */
839 int cpu = octeon_cpu_for_coreid(coreid);
840 raw_spinlock_t *lock = &per_cpu(octeon_irq_ciu_spinlock, cpu);
842 raw_spin_lock_irqsave(lock, flags);
843 pen = &per_cpu(octeon_irq_ciu1_en_mirror, cpu);
844 __set_bit(coreid, pen);
846 * Must be visible to octeon_irq_ip{2,3}_ciu() before enabling
850 cvmx_write_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1), *pen);
851 raw_spin_unlock_irqrestore(lock, flags);
855 * Watchdog interrupts are special. They are associated with a single
856 * core, so we hardwire the affinity to that core.
858 static void octeon_irq_ciu1_wd_enable_v2(struct irq_data *data)
860 int coreid = data->irq - OCTEON_IRQ_WDOG0;
861 int cpu = octeon_cpu_for_coreid(coreid);
863 set_bit(coreid, &per_cpu(octeon_irq_ciu1_en_mirror, cpu));
864 cvmx_write_csr(CVMX_CIU_INTX_EN1_W1S(coreid * 2 + 1), 1ull << coreid);
868 static struct irq_chip octeon_irq_chip_ciu_wd_v2 = {
870 .irq_enable = octeon_irq_ciu1_wd_enable_v2,
871 .irq_disable = octeon_irq_ciu_disable_all_v2,
872 .irq_mask = octeon_irq_ciu_disable_local_v2,
873 .irq_unmask = octeon_irq_ciu_enable_local_v2,
876 static struct irq_chip octeon_irq_chip_ciu_wd = {
878 .irq_enable = octeon_irq_ciu_wd_enable,
879 .irq_disable = octeon_irq_ciu_disable_all,
880 .irq_mask = octeon_irq_ciu_disable_local,
881 .irq_unmask = octeon_irq_ciu_enable_local,
884 static bool octeon_irq_ciu_is_edge(unsigned int line, unsigned int bit)
890 case 48 ... 49: /* GMX DRP */
891 case 50: /* IPD_DRP */
892 case 52 ... 55: /* Timers */
910 struct octeon_irq_gpio_domain_data {
911 unsigned int base_hwirq;
914 static int octeon_irq_gpio_xlat(struct irq_domain *d,
915 struct device_node *node,
917 unsigned int intsize,
918 unsigned long *out_hwirq,
919 unsigned int *out_type)
923 unsigned int trigger;
925 if (d->of_node != node)
935 trigger = intspec[1];
939 type = IRQ_TYPE_EDGE_RISING;
942 type = IRQ_TYPE_EDGE_FALLING;
945 type = IRQ_TYPE_LEVEL_HIGH;
948 type = IRQ_TYPE_LEVEL_LOW;
951 pr_err("Error: (%s) Invalid irq trigger specification: %x\n",
954 type = IRQ_TYPE_LEVEL_LOW;
963 static int octeon_irq_ciu_xlat(struct irq_domain *d,
964 struct device_node *node,
966 unsigned int intsize,
967 unsigned long *out_hwirq,
968 unsigned int *out_type)
970 unsigned int ciu, bit;
975 if (ciu > 1 || bit > 63)
978 /* These are the GPIO lines */
979 if (ciu == 0 && bit >= 16 && bit < 32)
982 *out_hwirq = (ciu << 6) | bit;
988 static struct irq_chip *octeon_irq_ciu_chip;
989 static struct irq_chip *octeon_irq_gpio_chip;
991 static bool octeon_irq_virq_in_range(unsigned int virq)
993 /* We cannot let it overflow the mapping array. */
994 if (virq < (1ul << 8 * sizeof(octeon_irq_ciu_to_irq[0][0])))
997 WARN_ONCE(true, "virq out of range %u.\n", virq);
1001 static int octeon_irq_ciu_map(struct irq_domain *d,
1002 unsigned int virq, irq_hw_number_t hw)
1004 unsigned int line = hw >> 6;
1005 unsigned int bit = hw & 63;
1007 if (!octeon_irq_virq_in_range(virq))
1010 if (line > 1 || octeon_irq_ciu_to_irq[line][bit] != 0)
1013 if (octeon_irq_ciu_is_edge(line, bit))
1014 octeon_irq_set_ciu_mapping(virq, line, bit, 0,
1015 octeon_irq_ciu_chip,
1018 octeon_irq_set_ciu_mapping(virq, line, bit, 0,
1019 octeon_irq_ciu_chip,
1025 static int octeon_irq_gpio_map_common(struct irq_domain *d,
1026 unsigned int virq, irq_hw_number_t hw,
1027 int line_limit, struct irq_chip *chip)
1029 struct octeon_irq_gpio_domain_data *gpiod = d->host_data;
1030 unsigned int line, bit;
1032 if (!octeon_irq_virq_in_range(virq))
1035 hw += gpiod->base_hwirq;
1038 if (line > line_limit || octeon_irq_ciu_to_irq[line][bit] != 0)
1041 octeon_irq_set_ciu_mapping(virq, line, bit, hw,
1042 chip, octeon_irq_handle_gpio);
1046 static int octeon_irq_gpio_map(struct irq_domain *d,
1047 unsigned int virq, irq_hw_number_t hw)
1049 return octeon_irq_gpio_map_common(d, virq, hw, 1, octeon_irq_gpio_chip);
1052 static struct irq_domain_ops octeon_irq_domain_ciu_ops = {
1053 .map = octeon_irq_ciu_map,
1054 .xlate = octeon_irq_ciu_xlat,
1057 static struct irq_domain_ops octeon_irq_domain_gpio_ops = {
1058 .map = octeon_irq_gpio_map,
1059 .xlate = octeon_irq_gpio_xlat,
1062 static void octeon_irq_ip2_ciu(void)
1064 const unsigned long core_id = cvmx_get_core_num();
1065 u64 ciu_sum = cvmx_read_csr(CVMX_CIU_INTX_SUM0(core_id * 2));
1067 ciu_sum &= __get_cpu_var(octeon_irq_ciu0_en_mirror);
1068 if (likely(ciu_sum)) {
1069 int bit = fls64(ciu_sum) - 1;
1070 int irq = octeon_irq_ciu_to_irq[0][bit];
1074 spurious_interrupt();
1076 spurious_interrupt();
1080 static void octeon_irq_ip3_ciu(void)
1082 u64 ciu_sum = cvmx_read_csr(CVMX_CIU_INT_SUM1);
1084 ciu_sum &= __get_cpu_var(octeon_irq_ciu1_en_mirror);
1085 if (likely(ciu_sum)) {
1086 int bit = fls64(ciu_sum) - 1;
1087 int irq = octeon_irq_ciu_to_irq[1][bit];
1091 spurious_interrupt();
1093 spurious_interrupt();
1097 static bool octeon_irq_use_ip4;
1099 static void __cpuinit octeon_irq_local_enable_ip4(void *arg)
1101 set_c0_status(STATUSF_IP4);
1104 static void octeon_irq_ip4_mask(void)
1106 clear_c0_status(STATUSF_IP4);
1107 spurious_interrupt();
1110 static void (*octeon_irq_ip2)(void);
1111 static void (*octeon_irq_ip3)(void);
1112 static void (*octeon_irq_ip4)(void);
1114 void __cpuinitdata (*octeon_irq_setup_secondary)(void);
1116 void __cpuinit octeon_irq_set_ip4_handler(octeon_irq_ip4_handler_t h)
1119 octeon_irq_use_ip4 = true;
1120 on_each_cpu(octeon_irq_local_enable_ip4, NULL, 1);
1123 static void __cpuinit octeon_irq_percpu_enable(void)
1128 static void __cpuinit octeon_irq_init_ciu_percpu(void)
1130 int coreid = cvmx_get_core_num();
1133 __get_cpu_var(octeon_irq_ciu0_en_mirror) = 0;
1134 __get_cpu_var(octeon_irq_ciu1_en_mirror) = 0;
1136 raw_spin_lock_init(&__get_cpu_var(octeon_irq_ciu_spinlock));
1138 * Disable All CIU Interrupts. The ones we need will be
1139 * enabled later. Read the SUM register so we know the write
1142 cvmx_write_csr(CVMX_CIU_INTX_EN0((coreid * 2)), 0);
1143 cvmx_write_csr(CVMX_CIU_INTX_EN0((coreid * 2 + 1)), 0);
1144 cvmx_write_csr(CVMX_CIU_INTX_EN1((coreid * 2)), 0);
1145 cvmx_write_csr(CVMX_CIU_INTX_EN1((coreid * 2 + 1)), 0);
1146 cvmx_read_csr(CVMX_CIU_INTX_SUM0((coreid * 2)));
1149 static void octeon_irq_init_ciu2_percpu(void)
1152 int coreid = cvmx_get_core_num();
1153 u64 base = CVMX_CIU2_EN_PPX_IP2_WRKQ(coreid);
1156 * Disable All CIU2 Interrupts. The ones we need will be
1157 * enabled later. Read the SUM register so we know the write
1160 * There are 9 registers and 3 IPX levels with strides 0x1000
1161 * and 0x200 respectivly. Use loops to clear them.
1163 for (regx = 0; regx <= 0x8000; regx += 0x1000) {
1164 for (ipx = 0; ipx <= 0x400; ipx += 0x200)
1165 cvmx_write_csr(base + regx + ipx, 0);
1168 cvmx_read_csr(CVMX_CIU2_SUM_PPX_IP2(coreid));
1171 static void __cpuinit octeon_irq_setup_secondary_ciu(void)
1173 octeon_irq_init_ciu_percpu();
1174 octeon_irq_percpu_enable();
1176 /* Enable the CIU lines */
1177 set_c0_status(STATUSF_IP3 | STATUSF_IP2);
1178 clear_c0_status(STATUSF_IP4);
1181 static void octeon_irq_setup_secondary_ciu2(void)
1183 octeon_irq_init_ciu2_percpu();
1184 octeon_irq_percpu_enable();
1186 /* Enable the CIU lines */
1187 set_c0_status(STATUSF_IP3 | STATUSF_IP2);
1188 if (octeon_irq_use_ip4)
1189 set_c0_status(STATUSF_IP4);
1191 clear_c0_status(STATUSF_IP4);
1194 static void __init octeon_irq_init_ciu(void)
1197 struct irq_chip *chip;
1198 struct irq_chip *chip_mbox;
1199 struct irq_chip *chip_wd;
1200 struct device_node *gpio_node;
1201 struct device_node *ciu_node;
1202 struct irq_domain *ciu_domain = NULL;
1204 octeon_irq_init_ciu_percpu();
1205 octeon_irq_setup_secondary = octeon_irq_setup_secondary_ciu;
1207 octeon_irq_ip2 = octeon_irq_ip2_ciu;
1208 octeon_irq_ip3 = octeon_irq_ip3_ciu;
1209 if (OCTEON_IS_MODEL(OCTEON_CN58XX_PASS2_X) ||
1210 OCTEON_IS_MODEL(OCTEON_CN56XX_PASS2_X) ||
1211 OCTEON_IS_MODEL(OCTEON_CN52XX_PASS2_X) ||
1212 OCTEON_IS_MODEL(OCTEON_CN6XXX)) {
1213 chip = &octeon_irq_chip_ciu_v2;
1214 chip_mbox = &octeon_irq_chip_ciu_mbox_v2;
1215 chip_wd = &octeon_irq_chip_ciu_wd_v2;
1216 octeon_irq_gpio_chip = &octeon_irq_chip_ciu_gpio_v2;
1218 chip = &octeon_irq_chip_ciu;
1219 chip_mbox = &octeon_irq_chip_ciu_mbox;
1220 chip_wd = &octeon_irq_chip_ciu_wd;
1221 octeon_irq_gpio_chip = &octeon_irq_chip_ciu_gpio;
1223 octeon_irq_ciu_chip = chip;
1224 octeon_irq_ip4 = octeon_irq_ip4_mask;
1227 octeon_irq_init_core();
1229 gpio_node = of_find_compatible_node(NULL, NULL, "cavium,octeon-3860-gpio");
1231 struct octeon_irq_gpio_domain_data *gpiod;
1233 gpiod = kzalloc(sizeof(*gpiod), GFP_KERNEL);
1235 /* gpio domain host_data is the base hwirq number. */
1236 gpiod->base_hwirq = 16;
1237 irq_domain_add_linear(gpio_node, 16, &octeon_irq_domain_gpio_ops, gpiod);
1238 of_node_put(gpio_node);
1240 pr_warn("Cannot allocate memory for GPIO irq_domain.\n");
1242 pr_warn("Cannot find device node for cavium,octeon-3860-gpio.\n");
1244 ciu_node = of_find_compatible_node(NULL, NULL, "cavium,octeon-3860-ciu");
1246 ciu_domain = irq_domain_add_tree(ciu_node, &octeon_irq_domain_ciu_ops, NULL);
1247 irq_set_default_host(ciu_domain);
1248 of_node_put(ciu_node);
1250 panic("Cannot find device node for cavium,octeon-3860-ciu.");
1253 for (i = 0; i < 16; i++)
1254 octeon_irq_force_ciu_mapping(ciu_domain, i + OCTEON_IRQ_WORKQ0, 0, i + 0);
1256 octeon_irq_set_ciu_mapping(OCTEON_IRQ_MBOX0, 0, 32, 0, chip_mbox, handle_percpu_irq);
1257 octeon_irq_set_ciu_mapping(OCTEON_IRQ_MBOX1, 0, 33, 0, chip_mbox, handle_percpu_irq);
1259 for (i = 0; i < 4; i++)
1260 octeon_irq_force_ciu_mapping(ciu_domain, i + OCTEON_IRQ_PCI_INT0, 0, i + 36);
1261 for (i = 0; i < 4; i++)
1262 octeon_irq_force_ciu_mapping(ciu_domain, i + OCTEON_IRQ_PCI_MSI0, 0, i + 40);
1264 octeon_irq_force_ciu_mapping(ciu_domain, OCTEON_IRQ_RML, 0, 46);
1265 for (i = 0; i < 4; i++)
1266 octeon_irq_force_ciu_mapping(ciu_domain, i + OCTEON_IRQ_TIMER0, 0, i + 52);
1268 octeon_irq_force_ciu_mapping(ciu_domain, OCTEON_IRQ_USB0, 0, 56);
1271 for (i = 0; i < 16; i++)
1272 octeon_irq_set_ciu_mapping(i + OCTEON_IRQ_WDOG0, 1, i + 0, 0, chip_wd, handle_level_irq);
1274 octeon_irq_force_ciu_mapping(ciu_domain, OCTEON_IRQ_USB1, 1, 17);
1276 /* Enable the CIU lines */
1277 set_c0_status(STATUSF_IP3 | STATUSF_IP2);
1278 clear_c0_status(STATUSF_IP4);
1282 * Watchdog interrupts are special. They are associated with a single
1283 * core, so we hardwire the affinity to that core.
1285 static void octeon_irq_ciu2_wd_enable(struct irq_data *data)
1289 int coreid = data->irq - OCTEON_IRQ_WDOG0;
1290 union octeon_ciu_chip_data cd;
1292 cd.p = irq_data_get_irq_chip_data(data);
1293 mask = 1ull << (cd.s.bit);
1295 en_addr = CVMX_CIU2_EN_PPX_IP2_WRKQ_W1S(coreid) + (0x1000ull * cd.s.line);
1296 cvmx_write_csr(en_addr, mask);
1300 static void octeon_irq_ciu2_enable(struct irq_data *data)
1304 int cpu = next_cpu_for_irq(data);
1305 int coreid = octeon_coreid_for_cpu(cpu);
1306 union octeon_ciu_chip_data cd;
1308 cd.p = irq_data_get_irq_chip_data(data);
1309 mask = 1ull << (cd.s.bit);
1311 en_addr = CVMX_CIU2_EN_PPX_IP2_WRKQ_W1S(coreid) + (0x1000ull * cd.s.line);
1312 cvmx_write_csr(en_addr, mask);
1315 static void octeon_irq_ciu2_enable_local(struct irq_data *data)
1319 int coreid = cvmx_get_core_num();
1320 union octeon_ciu_chip_data cd;
1322 cd.p = irq_data_get_irq_chip_data(data);
1323 mask = 1ull << (cd.s.bit);
1325 en_addr = CVMX_CIU2_EN_PPX_IP2_WRKQ_W1S(coreid) + (0x1000ull * cd.s.line);
1326 cvmx_write_csr(en_addr, mask);
1330 static void octeon_irq_ciu2_disable_local(struct irq_data *data)
1334 int coreid = cvmx_get_core_num();
1335 union octeon_ciu_chip_data cd;
1337 cd.p = irq_data_get_irq_chip_data(data);
1338 mask = 1ull << (cd.s.bit);
1340 en_addr = CVMX_CIU2_EN_PPX_IP2_WRKQ_W1C(coreid) + (0x1000ull * cd.s.line);
1341 cvmx_write_csr(en_addr, mask);
1345 static void octeon_irq_ciu2_ack(struct irq_data *data)
1349 int coreid = cvmx_get_core_num();
1350 union octeon_ciu_chip_data cd;
1352 cd.p = irq_data_get_irq_chip_data(data);
1353 mask = 1ull << (cd.s.bit);
1355 en_addr = CVMX_CIU2_RAW_PPX_IP2_WRKQ(coreid) + (0x1000ull * cd.s.line);
1356 cvmx_write_csr(en_addr, mask);
1360 static void octeon_irq_ciu2_disable_all(struct irq_data *data)
1364 union octeon_ciu_chip_data cd;
1366 cd.p = irq_data_get_irq_chip_data(data);
1367 mask = 1ull << (cd.s.bit);
1369 for_each_online_cpu(cpu) {
1370 u64 en_addr = CVMX_CIU2_EN_PPX_IP2_WRKQ_W1C(octeon_coreid_for_cpu(cpu)) + (0x1000ull * cd.s.line);
1371 cvmx_write_csr(en_addr, mask);
1375 static void octeon_irq_ciu2_mbox_enable_all(struct irq_data *data)
1380 mask = 1ull << (data->irq - OCTEON_IRQ_MBOX0);
1382 for_each_online_cpu(cpu) {
1383 u64 en_addr = CVMX_CIU2_EN_PPX_IP3_MBOX_W1S(octeon_coreid_for_cpu(cpu));
1384 cvmx_write_csr(en_addr, mask);
1388 static void octeon_irq_ciu2_mbox_disable_all(struct irq_data *data)
1393 mask = 1ull << (data->irq - OCTEON_IRQ_MBOX0);
1395 for_each_online_cpu(cpu) {
1396 u64 en_addr = CVMX_CIU2_EN_PPX_IP3_MBOX_W1C(octeon_coreid_for_cpu(cpu));
1397 cvmx_write_csr(en_addr, mask);
1401 static void octeon_irq_ciu2_mbox_enable_local(struct irq_data *data)
1405 int coreid = cvmx_get_core_num();
1407 mask = 1ull << (data->irq - OCTEON_IRQ_MBOX0);
1408 en_addr = CVMX_CIU2_EN_PPX_IP3_MBOX_W1S(coreid);
1409 cvmx_write_csr(en_addr, mask);
1412 static void octeon_irq_ciu2_mbox_disable_local(struct irq_data *data)
1416 int coreid = cvmx_get_core_num();
1418 mask = 1ull << (data->irq - OCTEON_IRQ_MBOX0);
1419 en_addr = CVMX_CIU2_EN_PPX_IP3_MBOX_W1C(coreid);
1420 cvmx_write_csr(en_addr, mask);
1424 static int octeon_irq_ciu2_set_affinity(struct irq_data *data,
1425 const struct cpumask *dest, bool force)
1428 bool enable_one = !irqd_irq_disabled(data) && !irqd_irq_masked(data);
1430 union octeon_ciu_chip_data cd;
1435 cd.p = irq_data_get_irq_chip_data(data);
1436 mask = 1ull << cd.s.bit;
1438 for_each_online_cpu(cpu) {
1440 if (cpumask_test_cpu(cpu, dest) && enable_one) {
1442 en_addr = CVMX_CIU2_EN_PPX_IP2_WRKQ_W1S(octeon_coreid_for_cpu(cpu)) + (0x1000ull * cd.s.line);
1444 en_addr = CVMX_CIU2_EN_PPX_IP2_WRKQ_W1C(octeon_coreid_for_cpu(cpu)) + (0x1000ull * cd.s.line);
1446 cvmx_write_csr(en_addr, mask);
1453 static void octeon_irq_ciu2_enable_gpio(struct irq_data *data)
1455 octeon_irq_gpio_setup(data);
1456 octeon_irq_ciu2_enable(data);
1459 static void octeon_irq_ciu2_disable_gpio(struct irq_data *data)
1461 union octeon_ciu_chip_data cd;
1462 cd.p = irq_data_get_irq_chip_data(data);
1464 cvmx_write_csr(CVMX_GPIO_BIT_CFGX(cd.s.gpio_line), 0);
1466 octeon_irq_ciu2_disable_all(data);
1469 static struct irq_chip octeon_irq_chip_ciu2 = {
1471 .irq_enable = octeon_irq_ciu2_enable,
1472 .irq_disable = octeon_irq_ciu2_disable_all,
1473 .irq_ack = octeon_irq_ciu2_ack,
1474 .irq_mask = octeon_irq_ciu2_disable_local,
1475 .irq_unmask = octeon_irq_ciu2_enable,
1477 .irq_set_affinity = octeon_irq_ciu2_set_affinity,
1478 .irq_cpu_offline = octeon_irq_cpu_offline_ciu,
1482 static struct irq_chip octeon_irq_chip_ciu2_mbox = {
1484 .irq_enable = octeon_irq_ciu2_mbox_enable_all,
1485 .irq_disable = octeon_irq_ciu2_mbox_disable_all,
1486 .irq_ack = octeon_irq_ciu2_mbox_disable_local,
1487 .irq_eoi = octeon_irq_ciu2_mbox_enable_local,
1489 .irq_cpu_online = octeon_irq_ciu2_mbox_enable_local,
1490 .irq_cpu_offline = octeon_irq_ciu2_mbox_disable_local,
1491 .flags = IRQCHIP_ONOFFLINE_ENABLED,
1494 static struct irq_chip octeon_irq_chip_ciu2_wd = {
1496 .irq_enable = octeon_irq_ciu2_wd_enable,
1497 .irq_disable = octeon_irq_ciu2_disable_all,
1498 .irq_mask = octeon_irq_ciu2_disable_local,
1499 .irq_unmask = octeon_irq_ciu2_enable_local,
1502 static struct irq_chip octeon_irq_chip_ciu2_gpio = {
1504 .irq_enable = octeon_irq_ciu2_enable_gpio,
1505 .irq_disable = octeon_irq_ciu2_disable_gpio,
1506 .irq_ack = octeon_irq_ciu_gpio_ack,
1507 .irq_mask = octeon_irq_ciu2_disable_local,
1508 .irq_unmask = octeon_irq_ciu2_enable,
1509 .irq_set_type = octeon_irq_ciu_gpio_set_type,
1511 .irq_set_affinity = octeon_irq_ciu2_set_affinity,
1512 .irq_cpu_offline = octeon_irq_cpu_offline_ciu,
1514 .flags = IRQCHIP_SET_TYPE_MASKED,
1517 static int octeon_irq_ciu2_xlat(struct irq_domain *d,
1518 struct device_node *node,
1520 unsigned int intsize,
1521 unsigned long *out_hwirq,
1522 unsigned int *out_type)
1524 unsigned int ciu, bit;
1529 /* Line 7 are the GPIO lines */
1530 if (ciu > 6 || bit > 63)
1533 *out_hwirq = (ciu << 6) | bit;
1539 static bool octeon_irq_ciu2_is_edge(unsigned int line, unsigned int bit)
1543 if (line == 3) /* MIO */
1545 case 2: /* IPD_DRP */
1546 case 8 ... 11: /* Timers */
1553 else if (line == 6) /* PKT */
1555 case 52 ... 53: /* ILK_DRP */
1556 case 8 ... 12: /* GMX_DRP */
1565 static int octeon_irq_ciu2_map(struct irq_domain *d,
1566 unsigned int virq, irq_hw_number_t hw)
1568 unsigned int line = hw >> 6;
1569 unsigned int bit = hw & 63;
1571 if (!octeon_irq_virq_in_range(virq))
1574 /* Line 7 are the GPIO lines */
1575 if (line > 6 || octeon_irq_ciu_to_irq[line][bit] != 0)
1578 if (octeon_irq_ciu2_is_edge(line, bit))
1579 octeon_irq_set_ciu_mapping(virq, line, bit, 0,
1580 &octeon_irq_chip_ciu2,
1583 octeon_irq_set_ciu_mapping(virq, line, bit, 0,
1584 &octeon_irq_chip_ciu2,
1589 static int octeon_irq_ciu2_gpio_map(struct irq_domain *d,
1590 unsigned int virq, irq_hw_number_t hw)
1592 return octeon_irq_gpio_map_common(d, virq, hw, 7, &octeon_irq_chip_ciu2_gpio);
1595 static struct irq_domain_ops octeon_irq_domain_ciu2_ops = {
1596 .map = octeon_irq_ciu2_map,
1597 .xlate = octeon_irq_ciu2_xlat,
1600 static struct irq_domain_ops octeon_irq_domain_ciu2_gpio_ops = {
1601 .map = octeon_irq_ciu2_gpio_map,
1602 .xlate = octeon_irq_gpio_xlat,
1605 static void octeon_irq_ciu2(void)
1610 u64 src_reg, src, sum;
1611 const unsigned long core_id = cvmx_get_core_num();
1613 sum = cvmx_read_csr(CVMX_CIU2_SUM_PPX_IP2(core_id)) & 0xfful;
1618 line = fls64(sum) - 1;
1619 src_reg = CVMX_CIU2_SRC_PPX_IP2_WRKQ(core_id) + (0x1000 * line);
1620 src = cvmx_read_csr(src_reg);
1625 bit = fls64(src) - 1;
1626 irq = octeon_irq_ciu_to_irq[line][bit];
1634 spurious_interrupt();
1636 /* CN68XX pass 1.x has an errata that accessing the ACK registers
1637 can stop interrupts from propagating */
1638 if (OCTEON_IS_MODEL(OCTEON_CN68XX))
1639 cvmx_read_csr(CVMX_CIU2_INTR_CIU_READY);
1641 cvmx_read_csr(CVMX_CIU2_ACK_PPX_IP2(core_id));
1645 static void octeon_irq_ciu2_mbox(void)
1649 const unsigned long core_id = cvmx_get_core_num();
1650 u64 sum = cvmx_read_csr(CVMX_CIU2_SUM_PPX_IP3(core_id)) >> 60;
1655 line = fls64(sum) - 1;
1657 do_IRQ(OCTEON_IRQ_MBOX0 + line);
1661 spurious_interrupt();
1663 /* CN68XX pass 1.x has an errata that accessing the ACK registers
1664 can stop interrupts from propagating */
1665 if (OCTEON_IS_MODEL(OCTEON_CN68XX))
1666 cvmx_read_csr(CVMX_CIU2_INTR_CIU_READY);
1668 cvmx_read_csr(CVMX_CIU2_ACK_PPX_IP3(core_id));
1672 static void __init octeon_irq_init_ciu2(void)
1675 struct device_node *gpio_node;
1676 struct device_node *ciu_node;
1677 struct irq_domain *ciu_domain = NULL;
1679 octeon_irq_init_ciu2_percpu();
1680 octeon_irq_setup_secondary = octeon_irq_setup_secondary_ciu2;
1682 octeon_irq_ip2 = octeon_irq_ciu2;
1683 octeon_irq_ip3 = octeon_irq_ciu2_mbox;
1684 octeon_irq_ip4 = octeon_irq_ip4_mask;
1687 octeon_irq_init_core();
1689 gpio_node = of_find_compatible_node(NULL, NULL, "cavium,octeon-3860-gpio");
1691 struct octeon_irq_gpio_domain_data *gpiod;
1693 gpiod = kzalloc(sizeof(*gpiod), GFP_KERNEL);
1695 /* gpio domain host_data is the base hwirq number. */
1696 gpiod->base_hwirq = 7 << 6;
1697 irq_domain_add_linear(gpio_node, 16, &octeon_irq_domain_ciu2_gpio_ops, gpiod);
1698 of_node_put(gpio_node);
1700 pr_warn("Cannot allocate memory for GPIO irq_domain.\n");
1702 pr_warn("Cannot find device node for cavium,octeon-3860-gpio.\n");
1704 ciu_node = of_find_compatible_node(NULL, NULL, "cavium,octeon-6880-ciu2");
1706 ciu_domain = irq_domain_add_tree(ciu_node, &octeon_irq_domain_ciu2_ops, NULL);
1707 irq_set_default_host(ciu_domain);
1708 of_node_put(ciu_node);
1710 panic("Cannot find device node for cavium,octeon-6880-ciu2.");
1713 for (i = 0; i < 64; i++)
1714 octeon_irq_force_ciu_mapping(ciu_domain, i + OCTEON_IRQ_WORKQ0, 0, i);
1716 for (i = 0; i < 32; i++)
1717 octeon_irq_set_ciu_mapping(i + OCTEON_IRQ_WDOG0, 1, i, 0,
1718 &octeon_irq_chip_ciu2_wd, handle_level_irq);
1720 for (i = 0; i < 4; i++)
1721 octeon_irq_force_ciu_mapping(ciu_domain, i + OCTEON_IRQ_TIMER0, 3, i + 8);
1723 octeon_irq_force_ciu_mapping(ciu_domain, OCTEON_IRQ_USB0, 3, 44);
1725 for (i = 0; i < 4; i++)
1726 octeon_irq_force_ciu_mapping(ciu_domain, i + OCTEON_IRQ_PCI_INT0, 4, i);
1728 for (i = 0; i < 4; i++)
1729 octeon_irq_force_ciu_mapping(ciu_domain, i + OCTEON_IRQ_PCI_MSI0, 4, i + 8);
1731 irq_set_chip_and_handler(OCTEON_IRQ_MBOX0, &octeon_irq_chip_ciu2_mbox, handle_percpu_irq);
1732 irq_set_chip_and_handler(OCTEON_IRQ_MBOX1, &octeon_irq_chip_ciu2_mbox, handle_percpu_irq);
1733 irq_set_chip_and_handler(OCTEON_IRQ_MBOX2, &octeon_irq_chip_ciu2_mbox, handle_percpu_irq);
1734 irq_set_chip_and_handler(OCTEON_IRQ_MBOX3, &octeon_irq_chip_ciu2_mbox, handle_percpu_irq);
1736 /* Enable the CIU lines */
1737 set_c0_status(STATUSF_IP3 | STATUSF_IP2);
1738 clear_c0_status(STATUSF_IP4);
1741 void __init arch_init_irq(void)
1744 /* Set the default affinity to the boot cpu. */
1745 cpumask_clear(irq_default_affinity);
1746 cpumask_set_cpu(smp_processor_id(), irq_default_affinity);
1748 if (OCTEON_IS_MODEL(OCTEON_CN68XX))
1749 octeon_irq_init_ciu2();
1751 octeon_irq_init_ciu();
1754 asmlinkage void plat_irq_dispatch(void)
1756 unsigned long cop0_cause;
1757 unsigned long cop0_status;
1760 cop0_cause = read_c0_cause();
1761 cop0_status = read_c0_status();
1762 cop0_cause &= cop0_status;
1763 cop0_cause &= ST0_IM;
1765 if (unlikely(cop0_cause & STATUSF_IP2))
1767 else if (unlikely(cop0_cause & STATUSF_IP3))
1769 else if (unlikely(cop0_cause & STATUSF_IP4))
1771 else if (likely(cop0_cause))
1772 do_IRQ(fls(cop0_cause) - 9 + MIPS_CPU_IRQ_BASE);
1778 #ifdef CONFIG_HOTPLUG_CPU
1780 void fixup_irqs(void)
1785 #endif /* CONFIG_HOTPLUG_CPU */