2 * Shared interrupt handling code for IPR and INTC2 types of IRQs.
4 * Copyright (C) 2007, 2008 Magnus Damm
5 * Copyright (C) 2009, 2010 Paul Mundt
7 * Based on intc2.c and ipr.c
9 * Copyright (C) 1999 Niibe Yutaka & Takeshi Yaegashi
10 * Copyright (C) 2000 Kazumoto Kojima
11 * Copyright (C) 2001 David J. Mckay (david.mckay@st.com)
12 * Copyright (C) 2003 Takashi Kusuda <kusuda-takashi@hitachi-ul.co.jp>
13 * Copyright (C) 2005, 2006 Paul Mundt
15 * This file is subject to the terms and conditions of the GNU General Public
16 * License. See the file "COPYING" in the main directory of this archive
19 #include <linux/init.h>
20 #include <linux/irq.h>
21 #include <linux/module.h>
23 #include <linux/interrupt.h>
24 #include <linux/sh_intc.h>
25 #include <linux/sysdev.h>
26 #include <linux/list.h>
27 #include <linux/topology.h>
28 #include <linux/bitmap.h>
29 #include <linux/cpumask.h>
30 #include <asm/sizes.h>
32 #define _INTC_MK(fn, mode, addr_e, addr_d, width, shift) \
33 ((shift) | ((width) << 5) | ((fn) << 9) | ((mode) << 13) | \
34 ((addr_e) << 16) | ((addr_d << 24)))
36 #define _INTC_SHIFT(h) (h & 0x1f)
37 #define _INTC_WIDTH(h) ((h >> 5) & 0xf)
38 #define _INTC_FN(h) ((h >> 9) & 0xf)
39 #define _INTC_MODE(h) ((h >> 13) & 0x7)
40 #define _INTC_ADDR_E(h) ((h >> 16) & 0xff)
41 #define _INTC_ADDR_D(h) ((h >> 24) & 0xff)
43 struct intc_handle_int {
54 struct intc_desc_int {
55 struct list_head list;
56 struct sys_device sysdev;
63 struct intc_handle_int *prio;
65 struct intc_handle_int *sense;
66 unsigned int nr_sense;
67 struct intc_window *window;
68 unsigned int nr_windows;
72 static LIST_HEAD(intc_list);
75 * The intc_irq_map provides a global map of bound IRQ vectors for a
76 * given platform. Allocation of IRQs are either static through the CPU
77 * vector map, or dynamic in the case of board mux vectors or MSI.
79 * As this is a central point for all IRQ controllers on the system,
80 * each of the available sources are mapped out here. This combined with
81 * sparseirq makes it quite trivial to keep the vector map tightly packed
82 * when dynamically creating IRQs, as well as tying in to otherwise
83 * unused irq_desc positions in the sparse array.
85 static DECLARE_BITMAP(intc_irq_map, NR_IRQS);
86 static DEFINE_SPINLOCK(vector_lock);
89 #define IS_SMP(x) x.smp
90 #define INTC_REG(d, x, c) (d->reg[(x)] + ((d->smp[(x)] & 0xff) * c))
91 #define SMP_NR(d, x) ((d->smp[(x)] >> 8) ? (d->smp[(x)] >> 8) : 1)
94 #define INTC_REG(d, x, c) (d->reg[(x)])
95 #define SMP_NR(d, x) 1
98 static unsigned int intc_prio_level[NR_IRQS]; /* for now */
99 static unsigned int default_prio_level = 2; /* 2 - 16 */
100 static unsigned long ack_handle[NR_IRQS];
102 static inline struct intc_desc_int *get_intc_desc(unsigned int irq)
104 struct irq_chip *chip = get_irq_chip(irq);
105 return container_of(chip, struct intc_desc_int, chip);
108 static inline unsigned int set_field(unsigned int value,
109 unsigned int field_value,
112 unsigned int width = _INTC_WIDTH(handle);
113 unsigned int shift = _INTC_SHIFT(handle);
115 value &= ~(((1 << width) - 1) << shift);
116 value |= field_value << shift;
120 static void write_8(unsigned long addr, unsigned long h, unsigned long data)
122 __raw_writeb(set_field(0, data, h), addr);
123 (void)__raw_readb(addr); /* Defeat write posting */
126 static void write_16(unsigned long addr, unsigned long h, unsigned long data)
128 __raw_writew(set_field(0, data, h), addr);
129 (void)__raw_readw(addr); /* Defeat write posting */
132 static void write_32(unsigned long addr, unsigned long h, unsigned long data)
134 __raw_writel(set_field(0, data, h), addr);
135 (void)__raw_readl(addr); /* Defeat write posting */
138 static void modify_8(unsigned long addr, unsigned long h, unsigned long data)
141 local_irq_save(flags);
142 __raw_writeb(set_field(__raw_readb(addr), data, h), addr);
143 (void)__raw_readb(addr); /* Defeat write posting */
144 local_irq_restore(flags);
147 static void modify_16(unsigned long addr, unsigned long h, unsigned long data)
150 local_irq_save(flags);
151 __raw_writew(set_field(__raw_readw(addr), data, h), addr);
152 (void)__raw_readw(addr); /* Defeat write posting */
153 local_irq_restore(flags);
156 static void modify_32(unsigned long addr, unsigned long h, unsigned long data)
159 local_irq_save(flags);
160 __raw_writel(set_field(__raw_readl(addr), data, h), addr);
161 (void)__raw_readl(addr); /* Defeat write posting */
162 local_irq_restore(flags);
165 enum { REG_FN_ERR = 0, REG_FN_WRITE_BASE = 1, REG_FN_MODIFY_BASE = 5 };
167 static void (*intc_reg_fns[])(unsigned long addr,
169 unsigned long data) = {
170 [REG_FN_WRITE_BASE + 0] = write_8,
171 [REG_FN_WRITE_BASE + 1] = write_16,
172 [REG_FN_WRITE_BASE + 3] = write_32,
173 [REG_FN_MODIFY_BASE + 0] = modify_8,
174 [REG_FN_MODIFY_BASE + 1] = modify_16,
175 [REG_FN_MODIFY_BASE + 3] = modify_32,
178 enum { MODE_ENABLE_REG = 0, /* Bit(s) set -> interrupt enabled */
179 MODE_MASK_REG, /* Bit(s) set -> interrupt disabled */
180 MODE_DUAL_REG, /* Two registers, set bit to enable / disable */
181 MODE_PRIO_REG, /* Priority value written to enable interrupt */
182 MODE_PCLR_REG, /* Above plus all bits set to disable interrupt */
185 static void intc_mode_field(unsigned long addr,
186 unsigned long handle,
187 void (*fn)(unsigned long,
192 fn(addr, handle, ((1 << _INTC_WIDTH(handle)) - 1));
195 static void intc_mode_zero(unsigned long addr,
196 unsigned long handle,
197 void (*fn)(unsigned long,
205 static void intc_mode_prio(unsigned long addr,
206 unsigned long handle,
207 void (*fn)(unsigned long,
212 fn(addr, handle, intc_prio_level[irq]);
215 static void (*intc_enable_fns[])(unsigned long addr,
216 unsigned long handle,
217 void (*fn)(unsigned long,
220 unsigned int irq) = {
221 [MODE_ENABLE_REG] = intc_mode_field,
222 [MODE_MASK_REG] = intc_mode_zero,
223 [MODE_DUAL_REG] = intc_mode_field,
224 [MODE_PRIO_REG] = intc_mode_prio,
225 [MODE_PCLR_REG] = intc_mode_prio,
228 static void (*intc_disable_fns[])(unsigned long addr,
229 unsigned long handle,
230 void (*fn)(unsigned long,
233 unsigned int irq) = {
234 [MODE_ENABLE_REG] = intc_mode_zero,
235 [MODE_MASK_REG] = intc_mode_field,
236 [MODE_DUAL_REG] = intc_mode_field,
237 [MODE_PRIO_REG] = intc_mode_zero,
238 [MODE_PCLR_REG] = intc_mode_field,
241 static inline void _intc_enable(unsigned int irq, unsigned long handle)
243 struct intc_desc_int *d = get_intc_desc(irq);
247 for (cpu = 0; cpu < SMP_NR(d, _INTC_ADDR_E(handle)); cpu++) {
249 if (!cpumask_test_cpu(cpu, irq_to_desc(irq)->affinity))
252 addr = INTC_REG(d, _INTC_ADDR_E(handle), cpu);
253 intc_enable_fns[_INTC_MODE(handle)](addr, handle, intc_reg_fns\
254 [_INTC_FN(handle)], irq);
258 static void intc_enable(unsigned int irq)
260 _intc_enable(irq, (unsigned long)get_irq_chip_data(irq));
263 static void intc_disable(unsigned int irq)
265 struct intc_desc_int *d = get_intc_desc(irq);
266 unsigned long handle = (unsigned long) get_irq_chip_data(irq);
270 for (cpu = 0; cpu < SMP_NR(d, _INTC_ADDR_D(handle)); cpu++) {
272 if (!cpumask_test_cpu(cpu, irq_to_desc(irq)->affinity))
275 addr = INTC_REG(d, _INTC_ADDR_D(handle), cpu);
276 intc_disable_fns[_INTC_MODE(handle)](addr, handle,intc_reg_fns\
277 [_INTC_FN(handle)], irq);
281 static void (*intc_enable_noprio_fns[])(unsigned long addr,
282 unsigned long handle,
283 void (*fn)(unsigned long,
286 unsigned int irq) = {
287 [MODE_ENABLE_REG] = intc_mode_field,
288 [MODE_MASK_REG] = intc_mode_zero,
289 [MODE_DUAL_REG] = intc_mode_field,
290 [MODE_PRIO_REG] = intc_mode_field,
291 [MODE_PCLR_REG] = intc_mode_field,
294 static void intc_enable_disable(struct intc_desc_int *d,
295 unsigned long handle, int do_enable)
299 void (*fn)(unsigned long, unsigned long,
300 void (*)(unsigned long, unsigned long, unsigned long),
304 for (cpu = 0; cpu < SMP_NR(d, _INTC_ADDR_E(handle)); cpu++) {
305 addr = INTC_REG(d, _INTC_ADDR_E(handle), cpu);
306 fn = intc_enable_noprio_fns[_INTC_MODE(handle)];
307 fn(addr, handle, intc_reg_fns[_INTC_FN(handle)], 0);
310 for (cpu = 0; cpu < SMP_NR(d, _INTC_ADDR_D(handle)); cpu++) {
311 addr = INTC_REG(d, _INTC_ADDR_D(handle), cpu);
312 fn = intc_disable_fns[_INTC_MODE(handle)];
313 fn(addr, handle, intc_reg_fns[_INTC_FN(handle)], 0);
318 static int intc_set_wake(unsigned int irq, unsigned int on)
320 return 0; /* allow wakeup, but setup hardware in intc_suspend() */
325 * This is held with the irq desc lock held, so we don't require any
326 * additional locking here at the intc desc level. The affinity mask is
327 * later tested in the enable/disable paths.
329 static int intc_set_affinity(unsigned int irq, const struct cpumask *cpumask)
331 if (!cpumask_intersects(cpumask, cpu_online_mask))
334 cpumask_copy(irq_to_desc(irq)->affinity, cpumask);
340 static void intc_mask_ack(unsigned int irq)
342 struct intc_desc_int *d = get_intc_desc(irq);
343 unsigned long handle = ack_handle[irq];
348 /* read register and write zero only to the assocaited bit */
351 addr = INTC_REG(d, _INTC_ADDR_D(handle), 0);
352 switch (_INTC_FN(handle)) {
353 case REG_FN_MODIFY_BASE + 0: /* 8bit */
355 __raw_writeb(0xff ^ set_field(0, 1, handle), addr);
357 case REG_FN_MODIFY_BASE + 1: /* 16bit */
359 __raw_writew(0xffff ^ set_field(0, 1, handle), addr);
361 case REG_FN_MODIFY_BASE + 3: /* 32bit */
363 __raw_writel(0xffffffff ^ set_field(0, 1, handle), addr);
372 static struct intc_handle_int *intc_find_irq(struct intc_handle_int *hp,
378 /* this doesn't scale well, but...
380 * this function should only be used for cerain uncommon
381 * operations such as intc_set_priority() and intc_set_sense()
382 * and in those rare cases performance doesn't matter that much.
383 * keeping the memory footprint low is more important.
385 * one rather simple way to speed this up and still keep the
386 * memory footprint down is to make sure the array is sorted
387 * and then perform a bisect to lookup the irq.
390 for (i = 0; i < nr_hp; i++) {
391 if ((hp + i)->irq != irq)
400 int intc_set_priority(unsigned int irq, unsigned int prio)
402 struct intc_desc_int *d = get_intc_desc(irq);
403 struct intc_handle_int *ihp;
405 if (!intc_prio_level[irq] || prio <= 1)
408 ihp = intc_find_irq(d->prio, d->nr_prio, irq);
410 if (prio >= (1 << _INTC_WIDTH(ihp->handle)))
413 intc_prio_level[irq] = prio;
416 * only set secondary masking method directly
417 * primary masking method is using intc_prio_level[irq]
418 * priority level will be set during next enable()
421 if (_INTC_FN(ihp->handle) != REG_FN_ERR)
422 _intc_enable(irq, ihp->handle);
427 #define VALID(x) (x | 0x80)
429 static unsigned char intc_irq_sense_table[IRQ_TYPE_SENSE_MASK + 1] = {
430 [IRQ_TYPE_EDGE_FALLING] = VALID(0),
431 [IRQ_TYPE_EDGE_RISING] = VALID(1),
432 [IRQ_TYPE_LEVEL_LOW] = VALID(2),
433 /* SH7706, SH7707 and SH7709 do not support high level triggered */
434 #if !defined(CONFIG_CPU_SUBTYPE_SH7706) && \
435 !defined(CONFIG_CPU_SUBTYPE_SH7707) && \
436 !defined(CONFIG_CPU_SUBTYPE_SH7709)
437 [IRQ_TYPE_LEVEL_HIGH] = VALID(3),
441 static int intc_set_sense(unsigned int irq, unsigned int type)
443 struct intc_desc_int *d = get_intc_desc(irq);
444 unsigned char value = intc_irq_sense_table[type & IRQ_TYPE_SENSE_MASK];
445 struct intc_handle_int *ihp;
451 ihp = intc_find_irq(d->sense, d->nr_sense, irq);
453 addr = INTC_REG(d, _INTC_ADDR_E(ihp->handle), 0);
454 intc_reg_fns[_INTC_FN(ihp->handle)](addr, ihp->handle, value);
459 static unsigned long intc_phys_to_virt(struct intc_desc_int *d,
460 unsigned long address)
462 struct intc_window *window;
465 /* scan through physical windows and convert address */
466 for (k = 0; k < d->nr_windows; k++) {
467 window = d->window + k;
469 if (address < window->phys)
472 if (address >= (window->phys + window->size))
475 address -= window->phys;
476 address += (unsigned long)window->virt;
481 /* no windows defined, register must be 1:1 mapped virt:phys */
485 static unsigned int __init intc_get_reg(struct intc_desc_int *d,
486 unsigned long address)
490 address = intc_phys_to_virt(d, address);
492 for (k = 0; k < d->nr_reg; k++) {
493 if (d->reg[k] == address)
501 static intc_enum __init intc_grp_id(struct intc_desc *desc,
504 struct intc_group *g = desc->hw.groups;
507 for (i = 0; g && enum_id && i < desc->hw.nr_groups; i++) {
508 g = desc->hw.groups + i;
510 for (j = 0; g->enum_ids[j]; j++) {
511 if (g->enum_ids[j] != enum_id)
521 static unsigned int __init _intc_mask_data(struct intc_desc *desc,
522 struct intc_desc_int *d,
524 unsigned int *reg_idx,
525 unsigned int *fld_idx)
527 struct intc_mask_reg *mr = desc->hw.mask_regs;
528 unsigned int fn, mode;
529 unsigned long reg_e, reg_d;
531 while (mr && enum_id && *reg_idx < desc->hw.nr_mask_regs) {
532 mr = desc->hw.mask_regs + *reg_idx;
534 for (; *fld_idx < ARRAY_SIZE(mr->enum_ids); (*fld_idx)++) {
535 if (mr->enum_ids[*fld_idx] != enum_id)
538 if (mr->set_reg && mr->clr_reg) {
539 fn = REG_FN_WRITE_BASE;
540 mode = MODE_DUAL_REG;
544 fn = REG_FN_MODIFY_BASE;
546 mode = MODE_ENABLE_REG;
550 mode = MODE_MASK_REG;
556 fn += (mr->reg_width >> 3) - 1;
557 return _INTC_MK(fn, mode,
558 intc_get_reg(d, reg_e),
559 intc_get_reg(d, reg_d),
561 (mr->reg_width - 1) - *fld_idx);
571 static unsigned int __init intc_mask_data(struct intc_desc *desc,
572 struct intc_desc_int *d,
573 intc_enum enum_id, int do_grps)
579 ret = _intc_mask_data(desc, d, enum_id, &i, &j);
584 return intc_mask_data(desc, d, intc_grp_id(desc, enum_id), 0);
589 static unsigned int __init _intc_prio_data(struct intc_desc *desc,
590 struct intc_desc_int *d,
592 unsigned int *reg_idx,
593 unsigned int *fld_idx)
595 struct intc_prio_reg *pr = desc->hw.prio_regs;
596 unsigned int fn, n, mode, bit;
597 unsigned long reg_e, reg_d;
599 while (pr && enum_id && *reg_idx < desc->hw.nr_prio_regs) {
600 pr = desc->hw.prio_regs + *reg_idx;
602 for (; *fld_idx < ARRAY_SIZE(pr->enum_ids); (*fld_idx)++) {
603 if (pr->enum_ids[*fld_idx] != enum_id)
606 if (pr->set_reg && pr->clr_reg) {
607 fn = REG_FN_WRITE_BASE;
608 mode = MODE_PCLR_REG;
612 fn = REG_FN_MODIFY_BASE;
613 mode = MODE_PRIO_REG;
620 fn += (pr->reg_width >> 3) - 1;
623 BUG_ON(n * pr->field_width > pr->reg_width);
625 bit = pr->reg_width - (n * pr->field_width);
627 return _INTC_MK(fn, mode,
628 intc_get_reg(d, reg_e),
629 intc_get_reg(d, reg_d),
630 pr->field_width, bit);
640 static unsigned int __init intc_prio_data(struct intc_desc *desc,
641 struct intc_desc_int *d,
642 intc_enum enum_id, int do_grps)
648 ret = _intc_prio_data(desc, d, enum_id, &i, &j);
653 return intc_prio_data(desc, d, intc_grp_id(desc, enum_id), 0);
658 static void __init intc_enable_disable_enum(struct intc_desc *desc,
659 struct intc_desc_int *d,
660 intc_enum enum_id, int enable)
662 unsigned int i, j, data;
664 /* go through and enable/disable all mask bits */
667 data = _intc_mask_data(desc, d, enum_id, &i, &j);
669 intc_enable_disable(d, data, enable);
673 /* go through and enable/disable all priority fields */
676 data = _intc_prio_data(desc, d, enum_id, &i, &j);
678 intc_enable_disable(d, data, enable);
684 static unsigned int __init intc_ack_data(struct intc_desc *desc,
685 struct intc_desc_int *d,
688 struct intc_mask_reg *mr = desc->hw.ack_regs;
689 unsigned int i, j, fn, mode;
690 unsigned long reg_e, reg_d;
692 for (i = 0; mr && enum_id && i < desc->hw.nr_ack_regs; i++) {
693 mr = desc->hw.ack_regs + i;
695 for (j = 0; j < ARRAY_SIZE(mr->enum_ids); j++) {
696 if (mr->enum_ids[j] != enum_id)
699 fn = REG_FN_MODIFY_BASE;
700 mode = MODE_ENABLE_REG;
704 fn += (mr->reg_width >> 3) - 1;
705 return _INTC_MK(fn, mode,
706 intc_get_reg(d, reg_e),
707 intc_get_reg(d, reg_d),
709 (mr->reg_width - 1) - j);
716 static unsigned int __init intc_sense_data(struct intc_desc *desc,
717 struct intc_desc_int *d,
720 struct intc_sense_reg *sr = desc->hw.sense_regs;
721 unsigned int i, j, fn, bit;
723 for (i = 0; sr && enum_id && i < desc->hw.nr_sense_regs; i++) {
724 sr = desc->hw.sense_regs + i;
726 for (j = 0; j < ARRAY_SIZE(sr->enum_ids); j++) {
727 if (sr->enum_ids[j] != enum_id)
730 fn = REG_FN_MODIFY_BASE;
731 fn += (sr->reg_width >> 3) - 1;
733 BUG_ON((j + 1) * sr->field_width > sr->reg_width);
735 bit = sr->reg_width - ((j + 1) * sr->field_width);
737 return _INTC_MK(fn, 0, intc_get_reg(d, sr->reg),
738 0, sr->field_width, bit);
745 static void __init intc_register_irq(struct intc_desc *desc,
746 struct intc_desc_int *d,
750 struct intc_handle_int *hp;
751 unsigned int data[2], primary;
754 * Register the IRQ position with the global IRQ map
756 set_bit(irq, intc_irq_map);
758 /* Prefer single interrupt source bitmap over other combinations:
759 * 1. bitmap, single interrupt source
760 * 2. priority, single interrupt source
761 * 3. bitmap, multiple interrupt sources (groups)
762 * 4. priority, multiple interrupt sources (groups)
765 data[0] = intc_mask_data(desc, d, enum_id, 0);
766 data[1] = intc_prio_data(desc, d, enum_id, 0);
769 if (!data[0] && data[1])
772 if (!data[0] && !data[1])
773 pr_warning("intc: missing unique irq mask for "
774 "irq %d (vect 0x%04x)\n", irq, irq2evt(irq));
776 data[0] = data[0] ? data[0] : intc_mask_data(desc, d, enum_id, 1);
777 data[1] = data[1] ? data[1] : intc_prio_data(desc, d, enum_id, 1);
782 BUG_ON(!data[primary]); /* must have primary masking method */
784 disable_irq_nosync(irq);
785 set_irq_chip_and_handler_name(irq, &d->chip,
786 handle_level_irq, "level");
787 set_irq_chip_data(irq, (void *)data[primary]);
789 /* set priority level
790 * - this needs to be at least 2 for 5-bit priorities on 7780
792 intc_prio_level[irq] = default_prio_level;
794 /* enable secondary masking method if present */
796 _intc_enable(irq, data[!primary]);
798 /* add irq to d->prio list if priority is available */
800 hp = d->prio + d->nr_prio;
802 hp->handle = data[1];
806 * only secondary priority should access registers, so
807 * set _INTC_FN(h) = REG_FN_ERR for intc_set_priority()
810 hp->handle &= ~_INTC_MK(0x0f, 0, 0, 0, 0, 0);
811 hp->handle |= _INTC_MK(REG_FN_ERR, 0, 0, 0, 0, 0);
816 /* add irq to d->sense list if sense is available */
817 data[0] = intc_sense_data(desc, d, enum_id);
819 (d->sense + d->nr_sense)->irq = irq;
820 (d->sense + d->nr_sense)->handle = data[0];
824 /* irq should be disabled by default */
827 if (desc->hw.ack_regs)
828 ack_handle[irq] = intc_ack_data(desc, d, enum_id);
831 set_irq_flags(irq, IRQF_VALID); /* Enable IRQ on ARM systems */
835 static unsigned int __init save_reg(struct intc_desc_int *d,
841 value = intc_phys_to_virt(d, value);
853 static void intc_redirect_irq(unsigned int irq, struct irq_desc *desc)
855 generic_handle_irq((unsigned int)get_irq_data(irq));
858 int __init register_intc_controller(struct intc_desc *desc)
860 unsigned int i, k, smp;
861 struct intc_hw_desc *hw = &desc->hw;
862 struct intc_desc_int *d;
863 struct resource *res;
865 pr_info("intc: Registered controller '%s' with %u IRQs\n",
866 desc->name, hw->nr_vectors);
868 d = kzalloc(sizeof(*d), GFP_NOWAIT);
872 INIT_LIST_HEAD(&d->list);
873 list_add(&d->list, &intc_list);
875 if (desc->num_resources) {
876 d->nr_windows = desc->num_resources;
877 d->window = kzalloc(d->nr_windows * sizeof(*d->window),
882 for (k = 0; k < d->nr_windows; k++) {
883 res = desc->resource + k;
884 WARN_ON(resource_type(res) != IORESOURCE_MEM);
885 d->window[k].phys = res->start;
886 d->window[k].size = resource_size(res);
887 d->window[k].virt = ioremap_nocache(res->start,
889 if (!d->window[k].virt)
894 d->nr_reg = hw->mask_regs ? hw->nr_mask_regs * 2 : 0;
895 d->nr_reg += hw->prio_regs ? hw->nr_prio_regs * 2 : 0;
896 d->nr_reg += hw->sense_regs ? hw->nr_sense_regs : 0;
897 d->nr_reg += hw->ack_regs ? hw->nr_ack_regs : 0;
899 d->reg = kzalloc(d->nr_reg * sizeof(*d->reg), GFP_NOWAIT);
904 d->smp = kzalloc(d->nr_reg * sizeof(*d->smp), GFP_NOWAIT);
911 for (i = 0; i < hw->nr_mask_regs; i++) {
912 smp = IS_SMP(hw->mask_regs[i]);
913 k += save_reg(d, k, hw->mask_regs[i].set_reg, smp);
914 k += save_reg(d, k, hw->mask_regs[i].clr_reg, smp);
919 d->prio = kzalloc(hw->nr_vectors * sizeof(*d->prio),
924 for (i = 0; i < hw->nr_prio_regs; i++) {
925 smp = IS_SMP(hw->prio_regs[i]);
926 k += save_reg(d, k, hw->prio_regs[i].set_reg, smp);
927 k += save_reg(d, k, hw->prio_regs[i].clr_reg, smp);
931 if (hw->sense_regs) {
932 d->sense = kzalloc(hw->nr_vectors * sizeof(*d->sense),
937 for (i = 0; i < hw->nr_sense_regs; i++)
938 k += save_reg(d, k, hw->sense_regs[i].reg, 0);
941 d->chip.name = desc->name;
942 d->chip.mask = intc_disable;
943 d->chip.unmask = intc_enable;
944 d->chip.mask_ack = intc_disable;
945 d->chip.enable = intc_enable;
946 d->chip.disable = intc_disable;
947 d->chip.shutdown = intc_disable;
948 d->chip.set_type = intc_set_sense;
949 d->chip.set_wake = intc_set_wake;
951 d->chip.set_affinity = intc_set_affinity;
955 for (i = 0; i < hw->nr_ack_regs; i++)
956 k += save_reg(d, k, hw->ack_regs[i].set_reg, 0);
958 d->chip.mask_ack = intc_mask_ack;
961 /* disable bits matching force_disable before registering irqs */
962 if (desc->force_disable)
963 intc_enable_disable_enum(desc, d, desc->force_disable, 0);
965 /* disable bits matching force_enable before registering irqs */
966 if (desc->force_enable)
967 intc_enable_disable_enum(desc, d, desc->force_enable, 0);
969 BUG_ON(k > 256); /* _INTC_ADDR_E() and _INTC_ADDR_D() are 8 bits */
971 /* register the vectors one by one */
972 for (i = 0; i < hw->nr_vectors; i++) {
973 struct intc_vect *vect = hw->vectors + i;
974 unsigned int irq = evt2irq(vect->vect);
975 struct irq_desc *irq_desc;
980 irq_desc = irq_to_desc_alloc_node(irq, numa_node_id());
981 if (unlikely(!irq_desc)) {
982 pr_err("can't get irq_desc for %d\n", irq);
986 intc_register_irq(desc, d, vect->enum_id, irq);
988 for (k = i + 1; k < hw->nr_vectors; k++) {
989 struct intc_vect *vect2 = hw->vectors + k;
990 unsigned int irq2 = evt2irq(vect2->vect);
992 if (vect->enum_id != vect2->enum_id)
996 * In the case of multi-evt handling and sparse
997 * IRQ support, each vector still needs to have
998 * its own backing irq_desc.
1000 irq_desc = irq_to_desc_alloc_node(irq2, numa_node_id());
1001 if (unlikely(!irq_desc)) {
1002 pr_err("can't get irq_desc for %d\n", irq2);
1008 /* redirect this interrupts to the first one */
1009 set_irq_chip(irq2, &dummy_irq_chip);
1010 set_irq_chained_handler(irq2, intc_redirect_irq);
1011 set_irq_data(irq2, (void *)irq);
1015 /* enable bits matching force_enable after registering irqs */
1016 if (desc->force_enable)
1017 intc_enable_disable_enum(desc, d, desc->force_enable, 1);
1029 for (k = 0; k < d->nr_windows; k++)
1030 if (d->window[k].virt)
1031 iounmap(d->window[k].virt);
1037 pr_err("unable to allocate INTC memory\n");
1042 #ifdef CONFIG_INTC_USERIMASK
1043 static void __iomem *uimask;
1045 int register_intc_userimask(unsigned long addr)
1047 if (unlikely(uimask))
1050 uimask = ioremap_nocache(addr, SZ_4K);
1051 if (unlikely(!uimask))
1054 pr_info("intc: userimask support registered for levels 0 -> %d\n",
1055 default_prio_level - 1);
1061 show_intc_userimask(struct sysdev_class *cls,
1062 struct sysdev_class_attribute *attr, char *buf)
1064 return sprintf(buf, "%d\n", (__raw_readl(uimask) >> 4) & 0xf);
1068 store_intc_userimask(struct sysdev_class *cls,
1069 struct sysdev_class_attribute *attr,
1070 const char *buf, size_t count)
1072 unsigned long level;
1074 level = simple_strtoul(buf, NULL, 10);
1077 * Minimal acceptable IRQ levels are in the 2 - 16 range, but
1078 * these are chomped so as to not interfere with normal IRQs.
1080 * Level 1 is a special case on some CPUs in that it's not
1081 * directly settable, but given that USERIMASK cuts off below a
1082 * certain level, we don't care about this limitation here.
1083 * Level 0 on the other hand equates to user masking disabled.
1085 * We use default_prio_level as a cut off so that only special
1086 * case opt-in IRQs can be mangled.
1088 if (level >= default_prio_level)
1091 __raw_writel(0xa5 << 24 | level << 4, uimask);
1096 static SYSDEV_CLASS_ATTR(userimask, S_IRUSR | S_IWUSR,
1097 show_intc_userimask, store_intc_userimask);
1101 show_intc_name(struct sys_device *dev, struct sysdev_attribute *attr, char *buf)
1103 struct intc_desc_int *d;
1105 d = container_of(dev, struct intc_desc_int, sysdev);
1107 return sprintf(buf, "%s\n", d->chip.name);
1110 static SYSDEV_ATTR(name, S_IRUGO, show_intc_name, NULL);
1112 static int intc_suspend(struct sys_device *dev, pm_message_t state)
1114 struct intc_desc_int *d;
1115 struct irq_desc *desc;
1118 /* get intc controller associated with this sysdev */
1119 d = container_of(dev, struct intc_desc_int, sysdev);
1121 switch (state.event) {
1123 if (d->state.event != PM_EVENT_FREEZE)
1125 for_each_irq_desc(irq, desc) {
1126 if (desc->handle_irq == intc_redirect_irq)
1128 if (desc->chip != &d->chip)
1130 if (desc->status & IRQ_DISABLED)
1136 case PM_EVENT_FREEZE:
1137 /* nothing has to be done */
1139 case PM_EVENT_SUSPEND:
1140 /* enable wakeup irqs belonging to this intc controller */
1141 for_each_irq_desc(irq, desc) {
1142 if ((desc->status & IRQ_WAKEUP) && (desc->chip == &d->chip))
1152 static int intc_resume(struct sys_device *dev)
1154 return intc_suspend(dev, PMSG_ON);
1157 static struct sysdev_class intc_sysdev_class = {
1159 .suspend = intc_suspend,
1160 .resume = intc_resume,
1163 /* register this intc as sysdev to allow suspend/resume */
1164 static int __init register_intc_sysdevs(void)
1166 struct intc_desc_int *d;
1170 error = sysdev_class_register(&intc_sysdev_class);
1171 #ifdef CONFIG_INTC_USERIMASK
1172 if (!error && uimask)
1173 error = sysdev_class_create_file(&intc_sysdev_class,
1177 list_for_each_entry(d, &intc_list, list) {
1179 d->sysdev.cls = &intc_sysdev_class;
1180 error = sysdev_register(&d->sysdev);
1182 error = sysdev_create_file(&d->sysdev,
1192 pr_err("intc: sysdev registration error\n");
1196 device_initcall(register_intc_sysdevs);
1199 * Dynamic IRQ allocation and deallocation
1201 unsigned int create_irq_nr(unsigned int irq_want, int node)
1203 unsigned int irq = 0, new;
1204 unsigned long flags;
1205 struct irq_desc *desc;
1207 spin_lock_irqsave(&vector_lock, flags);
1210 * First try the wanted IRQ
1212 if (test_and_set_bit(irq_want, intc_irq_map) == 0) {
1215 /* .. then fall back to scanning. */
1216 new = find_first_zero_bit(intc_irq_map, nr_irqs);
1217 if (unlikely(new == nr_irqs))
1220 __set_bit(new, intc_irq_map);
1223 desc = irq_to_desc_alloc_node(new, node);
1224 if (unlikely(!desc)) {
1225 pr_err("can't get irq_desc for %d\n", new);
1229 desc = move_irq_desc(desc, node);
1233 spin_unlock_irqrestore(&vector_lock, flags);
1236 dynamic_irq_init(irq);
1238 set_irq_flags(irq, IRQF_VALID); /* Enable IRQ on ARM systems */
1245 int create_irq(void)
1247 int nid = cpu_to_node(smp_processor_id());
1250 irq = create_irq_nr(NR_IRQS_LEGACY, nid);
1257 void destroy_irq(unsigned int irq)
1259 unsigned long flags;
1261 dynamic_irq_cleanup(irq);
1263 spin_lock_irqsave(&vector_lock, flags);
1264 __clear_bit(irq, intc_irq_map);
1265 spin_unlock_irqrestore(&vector_lock, flags);
1268 int reserve_irq_vector(unsigned int irq)
1270 unsigned long flags;
1273 spin_lock_irqsave(&vector_lock, flags);
1274 if (test_and_set_bit(irq, intc_irq_map))
1276 spin_unlock_irqrestore(&vector_lock, flags);
1281 void reserve_irq_legacy(void)
1283 unsigned long flags;
1286 spin_lock_irqsave(&vector_lock, flags);
1287 j = find_first_bit(intc_irq_map, nr_irqs);
1288 for (i = 0; i < j; i++)
1289 __set_bit(i, intc_irq_map);
1290 spin_unlock_irqrestore(&vector_lock, flags);