Commit | Line | Data |
---|---|---|
52a65ff5 | 1 | // SPDX-License-Identifier: GPL-2.0 |
3795de23 TG |
2 | /* |
3 | * Copyright (C) 1992, 1998-2006 Linus Torvalds, Ingo Molnar | |
4 | * Copyright (C) 2005-2006, Thomas Gleixner, Russell King | |
5 | * | |
99bfce5d TG |
6 | * This file contains the interrupt descriptor management code. Detailed |
7 | * information is available in Documentation/core-api/genericirq.rst | |
3795de23 TG |
8 | * |
9 | */ | |
10 | #include <linux/irq.h> | |
11 | #include <linux/slab.h> | |
ec53cf23 | 12 | #include <linux/export.h> |
3795de23 TG |
13 | #include <linux/interrupt.h> |
14 | #include <linux/kernel_stat.h> | |
15 | #include <linux/radix-tree.h> | |
1f5a5b87 | 16 | #include <linux/bitmap.h> |
76ba59f8 | 17 | #include <linux/irqdomain.h> |
ecb3f394 | 18 | #include <linux/sysfs.h> |
3795de23 TG |
19 | |
20 | #include "internals.h" | |
21 | ||
22 | /* | |
23 | * lockdep: we want to handle all irq_desc locks as a single lock-class: | |
24 | */ | |
78f90d91 | 25 | static struct lock_class_key irq_desc_lock_class; |
3795de23 | 26 | |
fe051434 | 27 | #if defined(CONFIG_SMP) |
fbf19803 TG |
28 | static int __init irq_affinity_setup(char *str) |
29 | { | |
10d94ff4 | 30 | alloc_bootmem_cpumask_var(&irq_default_affinity); |
fbf19803 TG |
31 | cpulist_parse(str, irq_default_affinity); |
32 | /* | |
33 | * Set at least the boot cpu. We don't want to end up with | |
a359f757 | 34 | * bugreports caused by random commandline masks |
fbf19803 TG |
35 | */ |
36 | cpumask_set_cpu(smp_processor_id(), irq_default_affinity); | |
37 | return 1; | |
38 | } | |
39 | __setup("irqaffinity=", irq_affinity_setup); | |
40 | ||
3795de23 TG |
41 | static void __init init_irq_default_affinity(void) |
42 | { | |
10d94ff4 | 43 | if (!cpumask_available(irq_default_affinity)) |
fbf19803 | 44 | zalloc_cpumask_var(&irq_default_affinity, GFP_NOWAIT); |
fbf19803 TG |
45 | if (cpumask_empty(irq_default_affinity)) |
46 | cpumask_setall(irq_default_affinity); | |
3795de23 TG |
47 | } |
48 | #else | |
49 | static void __init init_irq_default_affinity(void) | |
50 | { | |
51 | } | |
52 | #endif | |
53 | ||
1f5a5b87 | 54 | #ifdef CONFIG_SMP |
4ab764c3 | 55 | static int alloc_masks(struct irq_desc *desc, int node) |
1f5a5b87 | 56 | { |
9df872fa | 57 | if (!zalloc_cpumask_var_node(&desc->irq_common_data.affinity, |
4ab764c3 | 58 | GFP_KERNEL, node)) |
1f5a5b87 TG |
59 | return -ENOMEM; |
60 | ||
0d3f5425 TG |
61 | #ifdef CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK |
62 | if (!zalloc_cpumask_var_node(&desc->irq_common_data.effective_affinity, | |
63 | GFP_KERNEL, node)) { | |
64 | free_cpumask_var(desc->irq_common_data.affinity); | |
65 | return -ENOMEM; | |
66 | } | |
67 | #endif | |
68 | ||
1f5a5b87 | 69 | #ifdef CONFIG_GENERIC_PENDING_IRQ |
4ab764c3 | 70 | if (!zalloc_cpumask_var_node(&desc->pending_mask, GFP_KERNEL, node)) { |
0d3f5425 TG |
71 | #ifdef CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK |
72 | free_cpumask_var(desc->irq_common_data.effective_affinity); | |
73 | #endif | |
9df872fa | 74 | free_cpumask_var(desc->irq_common_data.affinity); |
1f5a5b87 TG |
75 | return -ENOMEM; |
76 | } | |
77 | #endif | |
78 | return 0; | |
79 | } | |
80 | ||
45ddcecb TG |
81 | static void desc_smp_init(struct irq_desc *desc, int node, |
82 | const struct cpumask *affinity) | |
1f5a5b87 | 83 | { |
45ddcecb TG |
84 | if (!affinity) |
85 | affinity = irq_default_affinity; | |
86 | cpumask_copy(desc->irq_common_data.affinity, affinity); | |
87 | ||
b7b29338 TG |
88 | #ifdef CONFIG_GENERIC_PENDING_IRQ |
89 | cpumask_clear(desc->pending_mask); | |
90 | #endif | |
449e9cae JL |
91 | #ifdef CONFIG_NUMA |
92 | desc->irq_common_data.node = node; | |
93 | #endif | |
b7b29338 TG |
94 | } |
95 | ||
1f5a5b87 TG |
96 | #else |
97 | static inline int | |
4ab764c3 | 98 | alloc_masks(struct irq_desc *desc, int node) { return 0; } |
45ddcecb TG |
99 | static inline void |
100 | desc_smp_init(struct irq_desc *desc, int node, const struct cpumask *affinity) { } | |
1f5a5b87 TG |
101 | #endif |
102 | ||
b6873807 | 103 | static void desc_set_defaults(unsigned int irq, struct irq_desc *desc, int node, |
45ddcecb | 104 | const struct cpumask *affinity, struct module *owner) |
1f5a5b87 | 105 | { |
6c9ae009 ED |
106 | int cpu; |
107 | ||
af7080e0 | 108 | desc->irq_common_data.handler_data = NULL; |
b237721c | 109 | desc->irq_common_data.msi_desc = NULL; |
af7080e0 | 110 | |
0d0b4c86 | 111 | desc->irq_data.common = &desc->irq_common_data; |
1f5a5b87 TG |
112 | desc->irq_data.irq = irq; |
113 | desc->irq_data.chip = &no_irq_chip; | |
114 | desc->irq_data.chip_data = NULL; | |
f9e4989e | 115 | irq_settings_clr_and_set(desc, ~0, _IRQ_DEFAULT_INIT_FLAGS); |
801a0e9a | 116 | irqd_set(&desc->irq_data, IRQD_IRQ_DISABLED); |
d829b8fb | 117 | irqd_set(&desc->irq_data, IRQD_IRQ_MASKED); |
1f5a5b87 TG |
118 | desc->handle_irq = handle_bad_irq; |
119 | desc->depth = 1; | |
b7b29338 TG |
120 | desc->irq_count = 0; |
121 | desc->irqs_unhandled = 0; | |
1136b072 | 122 | desc->tot_count = 0; |
1f5a5b87 | 123 | desc->name = NULL; |
b6873807 | 124 | desc->owner = owner; |
6c9ae009 ED |
125 | for_each_possible_cpu(cpu) |
126 | *per_cpu_ptr(desc->kstat_irqs, cpu) = 0; | |
45ddcecb | 127 | desc_smp_init(desc, node, affinity); |
1f5a5b87 TG |
128 | } |
129 | ||
3795de23 TG |
130 | int nr_irqs = NR_IRQS; |
131 | EXPORT_SYMBOL_GPL(nr_irqs); | |
132 | ||
a05a900a | 133 | static DEFINE_MUTEX(sparse_irq_lock); |
c1ee6264 | 134 | static DECLARE_BITMAP(allocated_irqs, IRQ_BITMAP_BITS); |
1f5a5b87 | 135 | |
3795de23 TG |
136 | #ifdef CONFIG_SPARSE_IRQ |
137 | ||
ecb3f394 CG |
138 | static void irq_kobj_release(struct kobject *kobj); |
139 | ||
140 | #ifdef CONFIG_SYSFS | |
141 | static struct kobject *irq_kobj_base; | |
142 | ||
143 | #define IRQ_ATTR_RO(_name) \ | |
144 | static struct kobj_attribute _name##_attr = __ATTR_RO(_name) | |
145 | ||
146 | static ssize_t per_cpu_count_show(struct kobject *kobj, | |
147 | struct kobj_attribute *attr, char *buf) | |
148 | { | |
149 | struct irq_desc *desc = container_of(kobj, struct irq_desc, kobj); | |
ecb3f394 CG |
150 | ssize_t ret = 0; |
151 | char *p = ""; | |
501e2db6 | 152 | int cpu; |
ecb3f394 CG |
153 | |
154 | for_each_possible_cpu(cpu) { | |
501e2db6 | 155 | unsigned int c = irq_desc_kstat_cpu(desc, cpu); |
ecb3f394 CG |
156 | |
157 | ret += scnprintf(buf + ret, PAGE_SIZE - ret, "%s%u", p, c); | |
158 | p = ","; | |
159 | } | |
160 | ||
161 | ret += scnprintf(buf + ret, PAGE_SIZE - ret, "\n"); | |
162 | return ret; | |
163 | } | |
164 | IRQ_ATTR_RO(per_cpu_count); | |
165 | ||
166 | static ssize_t chip_name_show(struct kobject *kobj, | |
167 | struct kobj_attribute *attr, char *buf) | |
168 | { | |
169 | struct irq_desc *desc = container_of(kobj, struct irq_desc, kobj); | |
170 | ssize_t ret = 0; | |
171 | ||
172 | raw_spin_lock_irq(&desc->lock); | |
173 | if (desc->irq_data.chip && desc->irq_data.chip->name) { | |
174 | ret = scnprintf(buf, PAGE_SIZE, "%s\n", | |
175 | desc->irq_data.chip->name); | |
176 | } | |
177 | raw_spin_unlock_irq(&desc->lock); | |
178 | ||
179 | return ret; | |
180 | } | |
181 | IRQ_ATTR_RO(chip_name); | |
182 | ||
183 | static ssize_t hwirq_show(struct kobject *kobj, | |
184 | struct kobj_attribute *attr, char *buf) | |
185 | { | |
186 | struct irq_desc *desc = container_of(kobj, struct irq_desc, kobj); | |
187 | ssize_t ret = 0; | |
188 | ||
189 | raw_spin_lock_irq(&desc->lock); | |
190 | if (desc->irq_data.domain) | |
d92df42d | 191 | ret = sprintf(buf, "%lu\n", desc->irq_data.hwirq); |
ecb3f394 CG |
192 | raw_spin_unlock_irq(&desc->lock); |
193 | ||
194 | return ret; | |
195 | } | |
196 | IRQ_ATTR_RO(hwirq); | |
197 | ||
198 | static ssize_t type_show(struct kobject *kobj, | |
199 | struct kobj_attribute *attr, char *buf) | |
200 | { | |
201 | struct irq_desc *desc = container_of(kobj, struct irq_desc, kobj); | |
202 | ssize_t ret = 0; | |
203 | ||
204 | raw_spin_lock_irq(&desc->lock); | |
205 | ret = sprintf(buf, "%s\n", | |
206 | irqd_is_level_type(&desc->irq_data) ? "level" : "edge"); | |
207 | raw_spin_unlock_irq(&desc->lock); | |
208 | ||
209 | return ret; | |
210 | ||
211 | } | |
212 | IRQ_ATTR_RO(type); | |
213 | ||
d61e2944 AS |
214 | static ssize_t wakeup_show(struct kobject *kobj, |
215 | struct kobj_attribute *attr, char *buf) | |
216 | { | |
217 | struct irq_desc *desc = container_of(kobj, struct irq_desc, kobj); | |
218 | ssize_t ret = 0; | |
219 | ||
220 | raw_spin_lock_irq(&desc->lock); | |
221 | ret = sprintf(buf, "%s\n", | |
222 | irqd_is_wakeup_set(&desc->irq_data) ? "enabled" : "disabled"); | |
223 | raw_spin_unlock_irq(&desc->lock); | |
224 | ||
225 | return ret; | |
226 | ||
227 | } | |
228 | IRQ_ATTR_RO(wakeup); | |
229 | ||
ecb3f394 CG |
230 | static ssize_t name_show(struct kobject *kobj, |
231 | struct kobj_attribute *attr, char *buf) | |
232 | { | |
233 | struct irq_desc *desc = container_of(kobj, struct irq_desc, kobj); | |
234 | ssize_t ret = 0; | |
235 | ||
236 | raw_spin_lock_irq(&desc->lock); | |
237 | if (desc->name) | |
238 | ret = scnprintf(buf, PAGE_SIZE, "%s\n", desc->name); | |
239 | raw_spin_unlock_irq(&desc->lock); | |
240 | ||
241 | return ret; | |
242 | } | |
243 | IRQ_ATTR_RO(name); | |
244 | ||
245 | static ssize_t actions_show(struct kobject *kobj, | |
246 | struct kobj_attribute *attr, char *buf) | |
247 | { | |
248 | struct irq_desc *desc = container_of(kobj, struct irq_desc, kobj); | |
249 | struct irqaction *action; | |
250 | ssize_t ret = 0; | |
251 | char *p = ""; | |
252 | ||
253 | raw_spin_lock_irq(&desc->lock); | |
c904cda0 | 254 | for_each_action_of_desc(desc, action) { |
ecb3f394 CG |
255 | ret += scnprintf(buf + ret, PAGE_SIZE - ret, "%s%s", |
256 | p, action->name); | |
257 | p = ","; | |
258 | } | |
259 | raw_spin_unlock_irq(&desc->lock); | |
260 | ||
261 | if (ret) | |
262 | ret += scnprintf(buf + ret, PAGE_SIZE - ret, "\n"); | |
263 | ||
264 | return ret; | |
265 | } | |
266 | IRQ_ATTR_RO(actions); | |
267 | ||
268 | static struct attribute *irq_attrs[] = { | |
269 | &per_cpu_count_attr.attr, | |
270 | &chip_name_attr.attr, | |
271 | &hwirq_attr.attr, | |
272 | &type_attr.attr, | |
d61e2944 | 273 | &wakeup_attr.attr, |
ecb3f394 CG |
274 | &name_attr.attr, |
275 | &actions_attr.attr, | |
276 | NULL | |
277 | }; | |
52ba92f5 | 278 | ATTRIBUTE_GROUPS(irq); |
ecb3f394 CG |
279 | |
280 | static struct kobj_type irq_kobj_type = { | |
281 | .release = irq_kobj_release, | |
282 | .sysfs_ops = &kobj_sysfs_ops, | |
52ba92f5 | 283 | .default_groups = irq_groups, |
ecb3f394 CG |
284 | }; |
285 | ||
286 | static void irq_sysfs_add(int irq, struct irq_desc *desc) | |
287 | { | |
288 | if (irq_kobj_base) { | |
289 | /* | |
290 | * Continue even in case of failure as this is nothing | |
9049e1ca YY |
291 | * crucial and failures in the late irq_sysfs_init() |
292 | * cannot be rolled back. | |
ecb3f394 CG |
293 | */ |
294 | if (kobject_add(&desc->kobj, irq_kobj_base, "%d", irq)) | |
295 | pr_warn("Failed to add kobject for irq %d\n", irq); | |
9049e1ca YY |
296 | else |
297 | desc->istate |= IRQS_SYSFS; | |
ecb3f394 CG |
298 | } |
299 | } | |
300 | ||
d0ff14fd MK |
301 | static void irq_sysfs_del(struct irq_desc *desc) |
302 | { | |
303 | /* | |
9049e1ca YY |
304 | * Only invoke kobject_del() when kobject_add() was successfully |
305 | * invoked for the descriptor. This covers both early boot, where | |
306 | * sysfs is not initialized yet, and the case of a failed | |
307 | * kobject_add() invocation. | |
d0ff14fd | 308 | */ |
9049e1ca | 309 | if (desc->istate & IRQS_SYSFS) |
d0ff14fd MK |
310 | kobject_del(&desc->kobj); |
311 | } | |
312 | ||
ecb3f394 CG |
313 | static int __init irq_sysfs_init(void) |
314 | { | |
315 | struct irq_desc *desc; | |
316 | int irq; | |
317 | ||
318 | /* Prevent concurrent irq alloc/free */ | |
319 | irq_lock_sparse(); | |
320 | ||
321 | irq_kobj_base = kobject_create_and_add("irq", kernel_kobj); | |
322 | if (!irq_kobj_base) { | |
323 | irq_unlock_sparse(); | |
324 | return -ENOMEM; | |
325 | } | |
326 | ||
327 | /* Add the already allocated interrupts */ | |
328 | for_each_irq_desc(irq, desc) | |
329 | irq_sysfs_add(irq, desc); | |
330 | irq_unlock_sparse(); | |
331 | ||
332 | return 0; | |
333 | } | |
334 | postcore_initcall(irq_sysfs_init); | |
335 | ||
336 | #else /* !CONFIG_SYSFS */ | |
337 | ||
338 | static struct kobj_type irq_kobj_type = { | |
339 | .release = irq_kobj_release, | |
340 | }; | |
341 | ||
342 | static void irq_sysfs_add(int irq, struct irq_desc *desc) {} | |
d0ff14fd | 343 | static void irq_sysfs_del(struct irq_desc *desc) {} |
ecb3f394 CG |
344 | |
345 | #endif /* CONFIG_SYSFS */ | |
346 | ||
baa0d233 | 347 | static RADIX_TREE(irq_desc_tree, GFP_KERNEL); |
3795de23 | 348 | |
1f5a5b87 | 349 | static void irq_insert_desc(unsigned int irq, struct irq_desc *desc) |
3795de23 TG |
350 | { |
351 | radix_tree_insert(&irq_desc_tree, irq, desc); | |
352 | } | |
353 | ||
354 | struct irq_desc *irq_to_desc(unsigned int irq) | |
355 | { | |
356 | return radix_tree_lookup(&irq_desc_tree, irq); | |
357 | } | |
11cc92eb | 358 | #ifdef CONFIG_KVM_BOOK3S_64_HV_MODULE |
64a1b95b TG |
359 | EXPORT_SYMBOL_GPL(irq_to_desc); |
360 | #endif | |
3795de23 | 361 | |
1f5a5b87 TG |
362 | static void delete_irq_desc(unsigned int irq) |
363 | { | |
364 | radix_tree_delete(&irq_desc_tree, irq); | |
365 | } | |
366 | ||
367 | #ifdef CONFIG_SMP | |
368 | static void free_masks(struct irq_desc *desc) | |
369 | { | |
370 | #ifdef CONFIG_GENERIC_PENDING_IRQ | |
371 | free_cpumask_var(desc->pending_mask); | |
372 | #endif | |
9df872fa | 373 | free_cpumask_var(desc->irq_common_data.affinity); |
0d3f5425 TG |
374 | #ifdef CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK |
375 | free_cpumask_var(desc->irq_common_data.effective_affinity); | |
376 | #endif | |
1f5a5b87 TG |
377 | } |
378 | #else | |
379 | static inline void free_masks(struct irq_desc *desc) { } | |
380 | #endif | |
381 | ||
c291ee62 TG |
382 | void irq_lock_sparse(void) |
383 | { | |
384 | mutex_lock(&sparse_irq_lock); | |
385 | } | |
386 | ||
387 | void irq_unlock_sparse(void) | |
388 | { | |
389 | mutex_unlock(&sparse_irq_lock); | |
390 | } | |
391 | ||
45ddcecb TG |
392 | static struct irq_desc *alloc_desc(int irq, int node, unsigned int flags, |
393 | const struct cpumask *affinity, | |
394 | struct module *owner) | |
1f5a5b87 TG |
395 | { |
396 | struct irq_desc *desc; | |
1f5a5b87 | 397 | |
4ab764c3 | 398 | desc = kzalloc_node(sizeof(*desc), GFP_KERNEL, node); |
1f5a5b87 TG |
399 | if (!desc) |
400 | return NULL; | |
401 | /* allocate based on nr_cpu_ids */ | |
6c9ae009 | 402 | desc->kstat_irqs = alloc_percpu(unsigned int); |
1f5a5b87 TG |
403 | if (!desc->kstat_irqs) |
404 | goto err_desc; | |
405 | ||
4ab764c3 | 406 | if (alloc_masks(desc, node)) |
1f5a5b87 TG |
407 | goto err_kstat; |
408 | ||
409 | raw_spin_lock_init(&desc->lock); | |
410 | lockdep_set_class(&desc->lock, &irq_desc_lock_class); | |
9114014c | 411 | mutex_init(&desc->request_mutex); |
425a5072 | 412 | init_rcu_head(&desc->rcu); |
8707898e | 413 | init_waitqueue_head(&desc->wait_for_threads); |
1f5a5b87 | 414 | |
45ddcecb TG |
415 | desc_set_defaults(irq, desc, node, affinity, owner); |
416 | irqd_set(&desc->irq_data, flags); | |
ecb3f394 | 417 | kobject_init(&desc->kobj, &irq_kobj_type); |
1f5a5b87 TG |
418 | |
419 | return desc; | |
420 | ||
421 | err_kstat: | |
6c9ae009 | 422 | free_percpu(desc->kstat_irqs); |
1f5a5b87 TG |
423 | err_desc: |
424 | kfree(desc); | |
425 | return NULL; | |
426 | } | |
427 | ||
ecb3f394 | 428 | static void irq_kobj_release(struct kobject *kobj) |
425a5072 | 429 | { |
ecb3f394 | 430 | struct irq_desc *desc = container_of(kobj, struct irq_desc, kobj); |
425a5072 TG |
431 | |
432 | free_masks(desc); | |
433 | free_percpu(desc->kstat_irqs); | |
434 | kfree(desc); | |
435 | } | |
436 | ||
ecb3f394 CG |
437 | static void delayed_free_desc(struct rcu_head *rhp) |
438 | { | |
439 | struct irq_desc *desc = container_of(rhp, struct irq_desc, rcu); | |
440 | ||
441 | kobject_put(&desc->kobj); | |
442 | } | |
443 | ||
1f5a5b87 TG |
444 | static void free_desc(unsigned int irq) |
445 | { | |
446 | struct irq_desc *desc = irq_to_desc(irq); | |
1f5a5b87 | 447 | |
087cdfb6 | 448 | irq_remove_debugfs_entry(desc); |
13bfe99e TG |
449 | unregister_irq_proc(irq, desc); |
450 | ||
c291ee62 TG |
451 | /* |
452 | * sparse_irq_lock protects also show_interrupts() and | |
453 | * kstat_irq_usr(). Once we deleted the descriptor from the | |
454 | * sparse tree we can free it. Access in proc will fail to | |
455 | * lookup the descriptor. | |
ecb3f394 CG |
456 | * |
457 | * The sysfs entry must be serialized against a concurrent | |
458 | * irq_sysfs_init() as well. | |
c291ee62 | 459 | */ |
d0ff14fd | 460 | irq_sysfs_del(desc); |
1f5a5b87 | 461 | delete_irq_desc(irq); |
1f5a5b87 | 462 | |
425a5072 TG |
463 | /* |
464 | * We free the descriptor, masks and stat fields via RCU. That | |
465 | * allows demultiplex interrupts to do rcu based management of | |
466 | * the child interrupts. | |
4a5f4d2f | 467 | * This also allows us to use rcu in kstat_irqs_usr(). |
425a5072 TG |
468 | */ |
469 | call_rcu(&desc->rcu, delayed_free_desc); | |
1f5a5b87 TG |
470 | } |
471 | ||
b6873807 | 472 | static int alloc_descs(unsigned int start, unsigned int cnt, int node, |
bec04037 DL |
473 | const struct irq_affinity_desc *affinity, |
474 | struct module *owner) | |
1f5a5b87 TG |
475 | { |
476 | struct irq_desc *desc; | |
e75eafb9 | 477 | int i; |
45ddcecb | 478 | |
e75eafb9 TG |
479 | /* Validate affinity mask(s) */ |
480 | if (affinity) { | |
12fee4cd | 481 | for (i = 0; i < cnt; i++) { |
bec04037 | 482 | if (cpumask_empty(&affinity[i].mask)) |
e75eafb9 TG |
483 | return -EINVAL; |
484 | } | |
485 | } | |
45ddcecb | 486 | |
1f5a5b87 | 487 | for (i = 0; i < cnt; i++) { |
bec04037 | 488 | const struct cpumask *mask = NULL; |
c410abbb | 489 | unsigned int flags = 0; |
bec04037 | 490 | |
45ddcecb | 491 | if (affinity) { |
c410abbb DL |
492 | if (affinity->is_managed) { |
493 | flags = IRQD_AFFINITY_MANAGED | | |
494 | IRQD_MANAGED_SHUTDOWN; | |
495 | } | |
bec04037 | 496 | mask = &affinity->mask; |
c410abbb | 497 | node = cpu_to_node(cpumask_first(mask)); |
e75eafb9 | 498 | affinity++; |
45ddcecb | 499 | } |
c410abbb | 500 | |
45ddcecb | 501 | desc = alloc_desc(start + i, node, flags, mask, owner); |
1f5a5b87 TG |
502 | if (!desc) |
503 | goto err; | |
1f5a5b87 | 504 | irq_insert_desc(start + i, desc); |
ecb3f394 | 505 | irq_sysfs_add(start + i, desc); |
e0b47794 | 506 | irq_add_debugfs_entry(start + i, desc); |
1f5a5b87 | 507 | } |
12ac1d0f | 508 | bitmap_set(allocated_irqs, start, cnt); |
1f5a5b87 TG |
509 | return start; |
510 | ||
511 | err: | |
512 | for (i--; i >= 0; i--) | |
513 | free_desc(start + i); | |
1f5a5b87 TG |
514 | return -ENOMEM; |
515 | } | |
516 | ||
ed4dea6e | 517 | static int irq_expand_nr_irqs(unsigned int nr) |
e7bcecb7 | 518 | { |
ed4dea6e | 519 | if (nr > IRQ_BITMAP_BITS) |
e7bcecb7 | 520 | return -ENOMEM; |
ed4dea6e | 521 | nr_irqs = nr; |
e7bcecb7 TG |
522 | return 0; |
523 | } | |
524 | ||
3795de23 TG |
525 | int __init early_irq_init(void) |
526 | { | |
b683de2b | 527 | int i, initcnt, node = first_online_node; |
3795de23 | 528 | struct irq_desc *desc; |
3795de23 TG |
529 | |
530 | init_irq_default_affinity(); | |
531 | ||
b683de2b TG |
532 | /* Let arch update nr_irqs and return the nr of preallocated irqs */ |
533 | initcnt = arch_probe_nr_irqs(); | |
5a29ef22 VL |
534 | printk(KERN_INFO "NR_IRQS: %d, nr_irqs: %d, preallocated irqs: %d\n", |
535 | NR_IRQS, nr_irqs, initcnt); | |
3795de23 | 536 | |
c1ee6264 TG |
537 | if (WARN_ON(nr_irqs > IRQ_BITMAP_BITS)) |
538 | nr_irqs = IRQ_BITMAP_BITS; | |
539 | ||
540 | if (WARN_ON(initcnt > IRQ_BITMAP_BITS)) | |
541 | initcnt = IRQ_BITMAP_BITS; | |
542 | ||
543 | if (initcnt > nr_irqs) | |
544 | nr_irqs = initcnt; | |
545 | ||
b683de2b | 546 | for (i = 0; i < initcnt; i++) { |
45ddcecb | 547 | desc = alloc_desc(i, node, 0, NULL, NULL); |
aa99ec0f TG |
548 | set_bit(i, allocated_irqs); |
549 | irq_insert_desc(i, desc); | |
3795de23 | 550 | } |
3795de23 TG |
551 | return arch_early_irq_init(); |
552 | } | |
553 | ||
3795de23 TG |
554 | #else /* !CONFIG_SPARSE_IRQ */ |
555 | ||
556 | struct irq_desc irq_desc[NR_IRQS] __cacheline_aligned_in_smp = { | |
557 | [0 ... NR_IRQS-1] = { | |
3795de23 TG |
558 | .handle_irq = handle_bad_irq, |
559 | .depth = 1, | |
560 | .lock = __RAW_SPIN_LOCK_UNLOCKED(irq_desc->lock), | |
561 | } | |
562 | }; | |
563 | ||
3795de23 TG |
564 | int __init early_irq_init(void) |
565 | { | |
aa99ec0f | 566 | int count, i, node = first_online_node; |
3795de23 | 567 | struct irq_desc *desc; |
3795de23 TG |
568 | |
569 | init_irq_default_affinity(); | |
570 | ||
5a29ef22 | 571 | printk(KERN_INFO "NR_IRQS: %d\n", NR_IRQS); |
3795de23 TG |
572 | |
573 | desc = irq_desc; | |
574 | count = ARRAY_SIZE(irq_desc); | |
575 | ||
576 | for (i = 0; i < count; i++) { | |
6c9ae009 | 577 | desc[i].kstat_irqs = alloc_percpu(unsigned int); |
4ab764c3 | 578 | alloc_masks(&desc[i], node); |
e7fbad30 | 579 | raw_spin_lock_init(&desc[i].lock); |
154cd387 | 580 | lockdep_set_class(&desc[i].lock, &irq_desc_lock_class); |
e8458e7a | 581 | mutex_init(&desc[i].request_mutex); |
8707898e | 582 | init_waitqueue_head(&desc[i].wait_for_threads); |
45ddcecb | 583 | desc_set_defaults(i, &desc[i], node, NULL, NULL); |
3795de23 TG |
584 | } |
585 | return arch_early_irq_init(); | |
586 | } | |
587 | ||
588 | struct irq_desc *irq_to_desc(unsigned int irq) | |
589 | { | |
590 | return (irq < NR_IRQS) ? irq_desc + irq : NULL; | |
591 | } | |
2c45aada | 592 | EXPORT_SYMBOL(irq_to_desc); |
3795de23 | 593 | |
1f5a5b87 TG |
594 | static void free_desc(unsigned int irq) |
595 | { | |
d8179bc0 TG |
596 | struct irq_desc *desc = irq_to_desc(irq); |
597 | unsigned long flags; | |
598 | ||
599 | raw_spin_lock_irqsave(&desc->lock, flags); | |
45ddcecb | 600 | desc_set_defaults(irq, desc, irq_desc_get_node(desc), NULL, NULL); |
d8179bc0 | 601 | raw_spin_unlock_irqrestore(&desc->lock, flags); |
1f5a5b87 TG |
602 | } |
603 | ||
b6873807 | 604 | static inline int alloc_descs(unsigned int start, unsigned int cnt, int node, |
bec04037 | 605 | const struct irq_affinity_desc *affinity, |
b6873807 | 606 | struct module *owner) |
1f5a5b87 | 607 | { |
b6873807 SAS |
608 | u32 i; |
609 | ||
610 | for (i = 0; i < cnt; i++) { | |
611 | struct irq_desc *desc = irq_to_desc(start + i); | |
612 | ||
613 | desc->owner = owner; | |
614 | } | |
12ac1d0f | 615 | bitmap_set(allocated_irqs, start, cnt); |
1f5a5b87 TG |
616 | return start; |
617 | } | |
e7bcecb7 | 618 | |
ed4dea6e | 619 | static int irq_expand_nr_irqs(unsigned int nr) |
e7bcecb7 TG |
620 | { |
621 | return -ENOMEM; | |
622 | } | |
623 | ||
f63b6a05 TG |
624 | void irq_mark_irq(unsigned int irq) |
625 | { | |
626 | mutex_lock(&sparse_irq_lock); | |
627 | bitmap_set(allocated_irqs, irq, 1); | |
628 | mutex_unlock(&sparse_irq_lock); | |
629 | } | |
630 | ||
c940e01c TG |
631 | #ifdef CONFIG_GENERIC_IRQ_LEGACY |
632 | void irq_init_desc(unsigned int irq) | |
633 | { | |
d8179bc0 | 634 | free_desc(irq); |
c940e01c TG |
635 | } |
636 | #endif | |
637 | ||
3795de23 TG |
638 | #endif /* !CONFIG_SPARSE_IRQ */ |
639 | ||
a3016b26 | 640 | int handle_irq_desc(struct irq_desc *desc) |
fe12bc2c | 641 | { |
c16816ac | 642 | struct irq_data *data; |
fe12bc2c TG |
643 | |
644 | if (!desc) | |
645 | return -EINVAL; | |
c16816ac TG |
646 | |
647 | data = irq_desc_get_irq_data(desc); | |
fe13889c | 648 | if (WARN_ON_ONCE(!in_hardirq() && handle_enforce_irqctx(data))) |
c16816ac TG |
649 | return -EPERM; |
650 | ||
bd0b9ac4 | 651 | generic_handle_irq_desc(desc); |
fe12bc2c TG |
652 | return 0; |
653 | } | |
a3016b26 MZ |
654 | |
655 | /** | |
656 | * generic_handle_irq - Invoke the handler for a particular irq | |
657 | * @irq: The irq number to handle | |
658 | * | |
0953fb26 MR |
659 | * Returns: 0 on success, or -EINVAL if conversion has failed |
660 | * | |
661 | * This function must be called from an IRQ context with irq regs | |
662 | * initialized. | |
663 | */ | |
a3016b26 MZ |
664 | int generic_handle_irq(unsigned int irq) |
665 | { | |
666 | return handle_irq_desc(irq_to_desc(irq)); | |
667 | } | |
edf76f83 | 668 | EXPORT_SYMBOL_GPL(generic_handle_irq); |
fe12bc2c | 669 | |
509853f9 SAS |
670 | /** |
671 | * generic_handle_irq_safe - Invoke the handler for a particular irq from any | |
672 | * context. | |
673 | * @irq: The irq number to handle | |
674 | * | |
675 | * Returns: 0 on success, a negative value on error. | |
676 | * | |
677 | * This function can be called from any context (IRQ or process context). It | |
678 | * will report an error if not invoked from IRQ context and the irq has been | |
679 | * marked to enforce IRQ-context only. | |
680 | */ | |
681 | int generic_handle_irq_safe(unsigned int irq) | |
682 | { | |
683 | unsigned long flags; | |
684 | int ret; | |
685 | ||
686 | local_irq_save(flags); | |
687 | ret = handle_irq_desc(irq_to_desc(irq)); | |
688 | local_irq_restore(flags); | |
689 | return ret; | |
690 | } | |
691 | EXPORT_SYMBOL_GPL(generic_handle_irq_safe); | |
692 | ||
e1c05491 | 693 | #ifdef CONFIG_IRQ_DOMAIN |
76ba59f8 | 694 | /** |
8240ef50 | 695 | * generic_handle_domain_irq - Invoke the handler for a HW irq belonging |
0953fb26 | 696 | * to a domain. |
8240ef50 MZ |
697 | * @domain: The domain where to perform the lookup |
698 | * @hwirq: The HW irq number to convert to a logical one | |
699 | * | |
700 | * Returns: 0 on success, or -EINVAL if conversion has failed | |
701 | * | |
0953fb26 MR |
702 | * This function must be called from an IRQ context with irq regs |
703 | * initialized. | |
8240ef50 MZ |
704 | */ |
705 | int generic_handle_domain_irq(struct irq_domain *domain, unsigned int hwirq) | |
706 | { | |
707 | return handle_irq_desc(irq_resolve_mapping(domain, hwirq)); | |
708 | } | |
709 | EXPORT_SYMBOL_GPL(generic_handle_domain_irq); | |
710 | ||
6a164c64 SAS |
711 | /** |
712 | * generic_handle_irq_safe - Invoke the handler for a HW irq belonging | |
713 | * to a domain from any context. | |
714 | * @domain: The domain where to perform the lookup | |
715 | * @hwirq: The HW irq number to convert to a logical one | |
716 | * | |
717 | * Returns: 0 on success, a negative value on error. | |
718 | * | |
719 | * This function can be called from any context (IRQ or process | |
720 | * context). If the interrupt is marked as 'enforce IRQ-context only' then | |
721 | * the function must be invoked from hard interrupt context. | |
722 | */ | |
723 | int generic_handle_domain_irq_safe(struct irq_domain *domain, unsigned int hwirq) | |
724 | { | |
725 | unsigned long flags; | |
726 | int ret; | |
727 | ||
728 | local_irq_save(flags); | |
729 | ret = handle_irq_desc(irq_resolve_mapping(domain, hwirq)); | |
730 | local_irq_restore(flags); | |
731 | return ret; | |
732 | } | |
733 | EXPORT_SYMBOL_GPL(generic_handle_domain_irq_safe); | |
734 | ||
2fe35f8e | 735 | /** |
0953fb26 MR |
736 | * generic_handle_domain_nmi - Invoke the handler for a HW nmi belonging |
737 | * to a domain. | |
2fe35f8e MR |
738 | * @domain: The domain where to perform the lookup |
739 | * @hwirq: The HW irq number to convert to a logical one | |
2fe35f8e MR |
740 | * |
741 | * Returns: 0 on success, or -EINVAL if conversion has failed | |
17ce302f | 742 | * |
0953fb26 MR |
743 | * This function must be called from an NMI context with irq regs |
744 | * initialized. | |
745 | **/ | |
746 | int generic_handle_domain_nmi(struct irq_domain *domain, unsigned int hwirq) | |
6e4933a0 | 747 | { |
0953fb26 MR |
748 | WARN_ON_ONCE(!in_nmi()); |
749 | return handle_irq_desc(irq_resolve_mapping(domain, hwirq)); | |
6e4933a0 JT |
750 | } |
751 | #endif | |
76ba59f8 | 752 | |
1f5a5b87 TG |
753 | /* Dynamic interrupt handling */ |
754 | ||
755 | /** | |
756 | * irq_free_descs - free irq descriptors | |
757 | * @from: Start of descriptor range | |
758 | * @cnt: Number of consecutive irqs to free | |
759 | */ | |
760 | void irq_free_descs(unsigned int from, unsigned int cnt) | |
761 | { | |
1f5a5b87 TG |
762 | int i; |
763 | ||
764 | if (from >= nr_irqs || (from + cnt) > nr_irqs) | |
765 | return; | |
766 | ||
12ac1d0f | 767 | mutex_lock(&sparse_irq_lock); |
1f5a5b87 TG |
768 | for (i = 0; i < cnt; i++) |
769 | free_desc(from + i); | |
770 | ||
1f5a5b87 | 771 | bitmap_clear(allocated_irqs, from, cnt); |
a05a900a | 772 | mutex_unlock(&sparse_irq_lock); |
1f5a5b87 | 773 | } |
edf76f83 | 774 | EXPORT_SYMBOL_GPL(irq_free_descs); |
1f5a5b87 TG |
775 | |
776 | /** | |
20a15ee0 | 777 | * __irq_alloc_descs - allocate and initialize a range of irq descriptors |
1f5a5b87 TG |
778 | * @irq: Allocate for specific irq number if irq >= 0 |
779 | * @from: Start the search from this irq number | |
780 | * @cnt: Number of consecutive irqs to allocate. | |
781 | * @node: Preferred node on which the irq descriptor should be allocated | |
d522a0d1 | 782 | * @owner: Owning module (can be NULL) |
e75eafb9 TG |
783 | * @affinity: Optional pointer to an affinity mask array of size @cnt which |
784 | * hints where the irq descriptors should be allocated and which | |
785 | * default affinities to use | |
1f5a5b87 TG |
786 | * |
787 | * Returns the first irq number or error code | |
788 | */ | |
789 | int __ref | |
b6873807 | 790 | __irq_alloc_descs(int irq, unsigned int from, unsigned int cnt, int node, |
bec04037 | 791 | struct module *owner, const struct irq_affinity_desc *affinity) |
1f5a5b87 | 792 | { |
1f5a5b87 TG |
793 | int start, ret; |
794 | ||
795 | if (!cnt) | |
796 | return -EINVAL; | |
797 | ||
c5182b88 MB |
798 | if (irq >= 0) { |
799 | if (from > irq) | |
800 | return -EINVAL; | |
801 | from = irq; | |
62a08ae2 TG |
802 | } else { |
803 | /* | |
804 | * For interrupts which are freely allocated the | |
805 | * architecture can force a lower bound to the @from | |
806 | * argument. x86 uses this to exclude the GSI space. | |
807 | */ | |
808 | from = arch_dynirq_lower_bound(from); | |
c5182b88 MB |
809 | } |
810 | ||
a05a900a | 811 | mutex_lock(&sparse_irq_lock); |
1f5a5b87 | 812 | |
ed4dea6e YL |
813 | start = bitmap_find_next_zero_area(allocated_irqs, IRQ_BITMAP_BITS, |
814 | from, cnt, 0); | |
1f5a5b87 TG |
815 | ret = -EEXIST; |
816 | if (irq >=0 && start != irq) | |
12ac1d0f | 817 | goto unlock; |
1f5a5b87 | 818 | |
ed4dea6e YL |
819 | if (start + cnt > nr_irqs) { |
820 | ret = irq_expand_nr_irqs(start + cnt); | |
e7bcecb7 | 821 | if (ret) |
12ac1d0f | 822 | goto unlock; |
e7bcecb7 | 823 | } |
12ac1d0f TG |
824 | ret = alloc_descs(start, cnt, node, affinity, owner); |
825 | unlock: | |
a05a900a | 826 | mutex_unlock(&sparse_irq_lock); |
1f5a5b87 TG |
827 | return ret; |
828 | } | |
b6873807 | 829 | EXPORT_SYMBOL_GPL(__irq_alloc_descs); |
1f5a5b87 | 830 | |
a98d24b7 TG |
831 | /** |
832 | * irq_get_next_irq - get next allocated irq number | |
833 | * @offset: where to start the search | |
834 | * | |
835 | * Returns next irq number after offset or nr_irqs if none is found. | |
836 | */ | |
837 | unsigned int irq_get_next_irq(unsigned int offset) | |
838 | { | |
839 | return find_next_bit(allocated_irqs, nr_irqs, offset); | |
840 | } | |
841 | ||
d5eb4ad2 | 842 | struct irq_desc * |
31d9d9b6 MZ |
843 | __irq_get_desc_lock(unsigned int irq, unsigned long *flags, bool bus, |
844 | unsigned int check) | |
d5eb4ad2 TG |
845 | { |
846 | struct irq_desc *desc = irq_to_desc(irq); | |
847 | ||
848 | if (desc) { | |
31d9d9b6 MZ |
849 | if (check & _IRQ_DESC_CHECK) { |
850 | if ((check & _IRQ_DESC_PERCPU) && | |
851 | !irq_settings_is_per_cpu_devid(desc)) | |
852 | return NULL; | |
853 | ||
854 | if (!(check & _IRQ_DESC_PERCPU) && | |
855 | irq_settings_is_per_cpu_devid(desc)) | |
856 | return NULL; | |
857 | } | |
858 | ||
d5eb4ad2 TG |
859 | if (bus) |
860 | chip_bus_lock(desc); | |
861 | raw_spin_lock_irqsave(&desc->lock, *flags); | |
862 | } | |
863 | return desc; | |
864 | } | |
865 | ||
866 | void __irq_put_desc_unlock(struct irq_desc *desc, unsigned long flags, bool bus) | |
8b3b5479 | 867 | __releases(&desc->lock) |
d5eb4ad2 TG |
868 | { |
869 | raw_spin_unlock_irqrestore(&desc->lock, flags); | |
870 | if (bus) | |
871 | chip_bus_sync_unlock(desc); | |
872 | } | |
873 | ||
222df54f MZ |
874 | int irq_set_percpu_devid_partition(unsigned int irq, |
875 | const struct cpumask *affinity) | |
31d9d9b6 MZ |
876 | { |
877 | struct irq_desc *desc = irq_to_desc(irq); | |
878 | ||
879 | if (!desc) | |
880 | return -EINVAL; | |
881 | ||
882 | if (desc->percpu_enabled) | |
883 | return -EINVAL; | |
884 | ||
885 | desc->percpu_enabled = kzalloc(sizeof(*desc->percpu_enabled), GFP_KERNEL); | |
886 | ||
887 | if (!desc->percpu_enabled) | |
888 | return -ENOMEM; | |
889 | ||
222df54f MZ |
890 | if (affinity) |
891 | desc->percpu_affinity = affinity; | |
892 | else | |
893 | desc->percpu_affinity = cpu_possible_mask; | |
894 | ||
31d9d9b6 MZ |
895 | irq_set_percpu_devid_flags(irq); |
896 | return 0; | |
897 | } | |
898 | ||
222df54f MZ |
899 | int irq_set_percpu_devid(unsigned int irq) |
900 | { | |
901 | return irq_set_percpu_devid_partition(irq, NULL); | |
902 | } | |
903 | ||
904 | int irq_get_percpu_devid_partition(unsigned int irq, struct cpumask *affinity) | |
905 | { | |
906 | struct irq_desc *desc = irq_to_desc(irq); | |
907 | ||
908 | if (!desc || !desc->percpu_enabled) | |
909 | return -EINVAL; | |
910 | ||
911 | if (affinity) | |
912 | cpumask_copy(affinity, desc->percpu_affinity); | |
913 | ||
914 | return 0; | |
915 | } | |
5ffeb050 | 916 | EXPORT_SYMBOL_GPL(irq_get_percpu_devid_partition); |
222df54f | 917 | |
792d0018 TG |
918 | void kstat_incr_irq_this_cpu(unsigned int irq) |
919 | { | |
b51bf95c | 920 | kstat_incr_irqs_this_cpu(irq_to_desc(irq)); |
792d0018 TG |
921 | } |
922 | ||
c291ee62 TG |
923 | /** |
924 | * kstat_irqs_cpu - Get the statistics for an interrupt on a cpu | |
925 | * @irq: The interrupt number | |
926 | * @cpu: The cpu number | |
927 | * | |
928 | * Returns the sum of interrupt counts on @cpu since boot for | |
929 | * @irq. The caller must ensure that the interrupt is not removed | |
930 | * concurrently. | |
931 | */ | |
3795de23 TG |
932 | unsigned int kstat_irqs_cpu(unsigned int irq, int cpu) |
933 | { | |
934 | struct irq_desc *desc = irq_to_desc(irq); | |
6c9ae009 ED |
935 | |
936 | return desc && desc->kstat_irqs ? | |
937 | *per_cpu_ptr(desc->kstat_irqs, cpu) : 0; | |
3795de23 | 938 | } |
478735e3 | 939 | |
c09cb129 ST |
940 | static bool irq_is_nmi(struct irq_desc *desc) |
941 | { | |
942 | return desc->istate & IRQS_NMI; | |
943 | } | |
944 | ||
26c19d0a | 945 | static unsigned int kstat_irqs(unsigned int irq) |
478735e3 KH |
946 | { |
947 | struct irq_desc *desc = irq_to_desc(irq); | |
5e9662fa | 948 | unsigned int sum = 0; |
1136b072 | 949 | int cpu; |
478735e3 | 950 | |
6c9ae009 | 951 | if (!desc || !desc->kstat_irqs) |
478735e3 | 952 | return 0; |
1136b072 | 953 | if (!irq_settings_is_per_cpu_devid(desc) && |
c09cb129 ST |
954 | !irq_settings_is_per_cpu(desc) && |
955 | !irq_is_nmi(desc)) | |
9e42ad10 | 956 | return data_race(desc->tot_count); |
1136b072 | 957 | |
478735e3 | 958 | for_each_possible_cpu(cpu) |
9e42ad10 | 959 | sum += data_race(*per_cpu_ptr(desc->kstat_irqs, cpu)); |
478735e3 KH |
960 | return sum; |
961 | } | |
c291ee62 TG |
962 | |
963 | /** | |
26c19d0a | 964 | * kstat_irqs_usr - Get the statistics for an interrupt from thread context |
c291ee62 TG |
965 | * @irq: The interrupt number |
966 | * | |
4a5f4d2f | 967 | * Returns the sum of interrupt counts on all cpus since boot for @irq. |
26c19d0a TG |
968 | * |
969 | * It uses rcu to protect the access since a concurrent removal of an | |
970 | * interrupt descriptor is observing an rcu grace period before | |
971 | * delayed_free_desc()/irq_kobj_release(). | |
c291ee62 TG |
972 | */ |
973 | unsigned int kstat_irqs_usr(unsigned int irq) | |
974 | { | |
7df0b278 | 975 | unsigned int sum; |
c291ee62 | 976 | |
4a5f4d2f | 977 | rcu_read_lock(); |
c291ee62 | 978 | sum = kstat_irqs(irq); |
4a5f4d2f | 979 | rcu_read_unlock(); |
c291ee62 TG |
980 | return sum; |
981 | } | |
f1c6306c TG |
982 | |
983 | #ifdef CONFIG_LOCKDEP | |
984 | void __irq_set_lockdep_class(unsigned int irq, struct lock_class_key *lock_class, | |
985 | struct lock_class_key *request_class) | |
986 | { | |
987 | struct irq_desc *desc = irq_to_desc(irq); | |
988 | ||
989 | if (desc) { | |
990 | lockdep_set_class(&desc->lock, lock_class); | |
991 | lockdep_set_class(&desc->request_mutex, request_class); | |
992 | } | |
993 | } | |
994 | EXPORT_SYMBOL_GPL(__irq_set_lockdep_class); | |
995 | #endif |