Merge tag 'Wimplicit-fallthrough-5.3-rc2' of git://git.kernel.org/pub/scm/linux/kerne...
[linux-2.6-block.git] / drivers / irqchip / irq-xtensa-mx.c
CommitLineData
26a8e96a
MF
1/*
2 * Xtensa MX interrupt distributor
3 *
4 * Copyright (C) 2002 - 2013 Tensilica, Inc.
5 *
6 * This file is subject to the terms and conditions of the GNU General Public
7 * License. See the file "COPYING" in the main directory of this archive
8 * for more details.
9 */
10
11#include <linux/interrupt.h>
12#include <linux/irqdomain.h>
13#include <linux/irq.h>
41a83e06 14#include <linux/irqchip.h>
26a8e96a
MF
15#include <linux/of.h>
16
17#include <asm/mxregs.h>
18
26a8e96a
MF
19#define HW_IRQ_IPI_COUNT 2
20#define HW_IRQ_MX_BASE 2
21#define HW_IRQ_EXTERN_BASE 3
22
23static DEFINE_PER_CPU(unsigned int, cached_irq_mask);
24
25static int xtensa_mx_irq_map(struct irq_domain *d, unsigned int irq,
26 irq_hw_number_t hw)
27{
28 if (hw < HW_IRQ_IPI_COUNT) {
29 struct irq_chip *irq_chip = d->host_data;
30 irq_set_chip_and_handler_name(irq, irq_chip,
31 handle_percpu_irq, "ipi");
32 irq_set_status_flags(irq, IRQ_LEVEL);
33 return 0;
34 }
50091212 35 irqd_set_single_target(irq_desc_get_irq_data(irq_to_desc(irq)));
26a8e96a
MF
36 return xtensa_irq_map(d, irq, hw);
37}
38
39/*
40 * Device Tree IRQ specifier translation function which works with one or
41 * two cell bindings. First cell value maps directly to the hwirq number.
42 * Second cell if present specifies whether hwirq number is external (1) or
43 * internal (0).
44 */
45static int xtensa_mx_irq_domain_xlate(struct irq_domain *d,
46 struct device_node *ctrlr,
47 const u32 *intspec, unsigned int intsize,
48 unsigned long *out_hwirq, unsigned int *out_type)
49{
50 return xtensa_irq_domain_xlate(intspec, intsize,
51 intspec[0], intspec[0] + HW_IRQ_EXTERN_BASE,
52 out_hwirq, out_type);
53}
54
55static const struct irq_domain_ops xtensa_mx_irq_domain_ops = {
56 .xlate = xtensa_mx_irq_domain_xlate,
57 .map = xtensa_mx_irq_map,
58};
59
60void secondary_init_irq(void)
61{
62 __this_cpu_write(cached_irq_mask,
63 XCHAL_INTTYPE_MASK_EXTERN_EDGE |
64 XCHAL_INTTYPE_MASK_EXTERN_LEVEL);
cad6fade 65 xtensa_set_sr(XCHAL_INTTYPE_MASK_EXTERN_EDGE |
26a8e96a
MF
66 XCHAL_INTTYPE_MASK_EXTERN_LEVEL, intenable);
67}
68
69static void xtensa_mx_irq_mask(struct irq_data *d)
70{
71 unsigned int mask = 1u << d->hwirq;
72
73 if (mask & (XCHAL_INTTYPE_MASK_EXTERN_EDGE |
eb271710
MF
74 XCHAL_INTTYPE_MASK_EXTERN_LEVEL)) {
75 unsigned int ext_irq = xtensa_get_ext_irq_no(d->hwirq);
76
77 if (ext_irq >= HW_IRQ_MX_BASE) {
78 set_er(1u << (ext_irq - HW_IRQ_MX_BASE), MIENG);
79 return;
80 }
26a8e96a 81 }
eb271710
MF
82 mask = __this_cpu_read(cached_irq_mask) & ~mask;
83 __this_cpu_write(cached_irq_mask, mask);
84 xtensa_set_sr(mask, intenable);
26a8e96a
MF
85}
86
87static void xtensa_mx_irq_unmask(struct irq_data *d)
88{
89 unsigned int mask = 1u << d->hwirq;
90
91 if (mask & (XCHAL_INTTYPE_MASK_EXTERN_EDGE |
eb271710
MF
92 XCHAL_INTTYPE_MASK_EXTERN_LEVEL)) {
93 unsigned int ext_irq = xtensa_get_ext_irq_no(d->hwirq);
94
95 if (ext_irq >= HW_IRQ_MX_BASE) {
96 set_er(1u << (ext_irq - HW_IRQ_MX_BASE), MIENGSET);
97 return;
98 }
26a8e96a 99 }
eb271710
MF
100 mask |= __this_cpu_read(cached_irq_mask);
101 __this_cpu_write(cached_irq_mask, mask);
102 xtensa_set_sr(mask, intenable);
26a8e96a
MF
103}
104
105static void xtensa_mx_irq_enable(struct irq_data *d)
106{
26a8e96a
MF
107 xtensa_mx_irq_unmask(d);
108}
109
110static void xtensa_mx_irq_disable(struct irq_data *d)
111{
112 xtensa_mx_irq_mask(d);
26a8e96a
MF
113}
114
115static void xtensa_mx_irq_ack(struct irq_data *d)
116{
cad6fade 117 xtensa_set_sr(1 << d->hwirq, intclear);
26a8e96a
MF
118}
119
120static int xtensa_mx_irq_retrigger(struct irq_data *d)
121{
bb665236
MF
122 unsigned int mask = 1u << d->hwirq;
123
124 if (WARN_ON(mask & ~XCHAL_INTTYPE_MASK_SOFTWARE))
125 return 0;
126 xtensa_set_sr(mask, intset);
26a8e96a
MF
127 return 1;
128}
129
130static int xtensa_mx_irq_set_affinity(struct irq_data *d,
131 const struct cpumask *dest, bool force)
132{
50091212
MZ
133 int cpu = cpumask_any_and(dest, cpu_online_mask);
134 unsigned mask = 1u << cpu;
26a8e96a
MF
135
136 set_er(mask, MIROUT(d->hwirq - HW_IRQ_MX_BASE));
50091212
MZ
137 irq_data_update_effective_affinity(d, cpumask_of(cpu));
138
26a8e96a
MF
139 return 0;
140
141}
142
143static struct irq_chip xtensa_mx_irq_chip = {
144 .name = "xtensa-mx",
145 .irq_enable = xtensa_mx_irq_enable,
146 .irq_disable = xtensa_mx_irq_disable,
147 .irq_mask = xtensa_mx_irq_mask,
148 .irq_unmask = xtensa_mx_irq_unmask,
149 .irq_ack = xtensa_mx_irq_ack,
150 .irq_retrigger = xtensa_mx_irq_retrigger,
151 .irq_set_affinity = xtensa_mx_irq_set_affinity,
152};
153
154int __init xtensa_mx_init_legacy(struct device_node *interrupt_parent)
155{
156 struct irq_domain *root_domain =
e5c86679 157 irq_domain_add_legacy(NULL, NR_IRQS - 1, 1, 0,
26a8e96a
MF
158 &xtensa_mx_irq_domain_ops,
159 &xtensa_mx_irq_chip);
160 irq_set_default_host(root_domain);
161 secondary_init_irq();
162 return 0;
163}
164
165static int __init xtensa_mx_init(struct device_node *np,
166 struct device_node *interrupt_parent)
167{
168 struct irq_domain *root_domain =
169 irq_domain_add_linear(np, NR_IRQS, &xtensa_mx_irq_domain_ops,
170 &xtensa_mx_irq_chip);
171 irq_set_default_host(root_domain);
172 secondary_init_irq();
173 return 0;
174}
175IRQCHIP_DECLARE(xtensa_mx_irq_chip, "cdns,xtensa-mx", xtensa_mx_init);