Commit | Line | Data |
---|---|---|
74afab7a JL |
1 | /* |
2 | * Local APIC related interfaces to support IOAPIC, MSI, HT_IRQ etc. | |
3 | * | |
4 | * Copyright (C) 1997, 1998, 1999, 2000, 2009 Ingo Molnar, Hajnalka Szabo | |
5 | * Moved from arch/x86/kernel/apic/io_apic.c. | |
b5dc8e6c JL |
6 | * Jiang Liu <jiang.liu@linux.intel.com> |
7 | * Enable support of hierarchical irqdomains | |
74afab7a JL |
8 | * |
9 | * This program is free software; you can redistribute it and/or modify | |
10 | * it under the terms of the GNU General Public License version 2 as | |
11 | * published by the Free Software Foundation. | |
12 | */ | |
13 | #include <linux/interrupt.h> | |
14 | #include <linux/init.h> | |
15 | #include <linux/compiler.h> | |
16 | #include <linux/irqdomain.h> | |
17 | #include <linux/slab.h> | |
18 | #include <asm/hw_irq.h> | |
19 | #include <asm/apic.h> | |
20 | #include <asm/i8259.h> | |
21 | #include <asm/desc.h> | |
22 | #include <asm/irq_remapping.h> | |
23 | ||
b5dc8e6c | 24 | struct irq_domain *x86_vector_domain; |
74afab7a | 25 | static DEFINE_RAW_SPINLOCK(vector_lock); |
b5dc8e6c | 26 | static struct irq_chip lapic_controller; |
74afab7a JL |
27 | |
28 | void lock_vector_lock(void) | |
29 | { | |
30 | /* Used to the online set of cpus does not change | |
31 | * during assign_irq_vector. | |
32 | */ | |
33 | raw_spin_lock(&vector_lock); | |
34 | } | |
35 | ||
36 | void unlock_vector_lock(void) | |
37 | { | |
38 | raw_spin_unlock(&vector_lock); | |
39 | } | |
40 | ||
41 | struct irq_cfg *irq_cfg(unsigned int irq) | |
42 | { | |
b5dc8e6c | 43 | return irqd_cfg(irq_get_irq_data(irq)); |
74afab7a JL |
44 | } |
45 | ||
46 | struct irq_cfg *irqd_cfg(struct irq_data *irq_data) | |
47 | { | |
b5dc8e6c JL |
48 | if (!irq_data) |
49 | return NULL; | |
50 | ||
51 | while (irq_data->parent_data) | |
52 | irq_data = irq_data->parent_data; | |
53 | ||
74afab7a JL |
54 | return irq_data->chip_data; |
55 | } | |
56 | ||
b5dc8e6c | 57 | static struct irq_cfg *alloc_irq_cfg(int node) |
74afab7a JL |
58 | { |
59 | struct irq_cfg *cfg; | |
60 | ||
61 | cfg = kzalloc_node(sizeof(*cfg), GFP_KERNEL, node); | |
62 | if (!cfg) | |
63 | return NULL; | |
64 | if (!zalloc_cpumask_var_node(&cfg->domain, GFP_KERNEL, node)) | |
65 | goto out_cfg; | |
66 | if (!zalloc_cpumask_var_node(&cfg->old_domain, GFP_KERNEL, node)) | |
67 | goto out_domain; | |
68 | #ifdef CONFIG_X86_IO_APIC | |
69 | INIT_LIST_HEAD(&cfg->irq_2_pin); | |
70 | #endif | |
71 | return cfg; | |
72 | out_domain: | |
73 | free_cpumask_var(cfg->domain); | |
74 | out_cfg: | |
75 | kfree(cfg); | |
76 | return NULL; | |
77 | } | |
78 | ||
79 | struct irq_cfg *alloc_irq_and_cfg_at(unsigned int at, int node) | |
80 | { | |
81 | int res = irq_alloc_desc_at(at, node); | |
82 | struct irq_cfg *cfg; | |
83 | ||
84 | if (res < 0) { | |
85 | if (res != -EEXIST) | |
86 | return NULL; | |
87 | cfg = irq_cfg(at); | |
88 | if (cfg) | |
89 | return cfg; | |
90 | } | |
91 | ||
b5dc8e6c | 92 | cfg = alloc_irq_cfg(node); |
74afab7a JL |
93 | if (cfg) |
94 | irq_set_chip_data(at, cfg); | |
95 | else | |
96 | irq_free_desc(at); | |
97 | return cfg; | |
98 | } | |
99 | ||
b5dc8e6c | 100 | static void free_irq_cfg(struct irq_cfg *cfg) |
74afab7a | 101 | { |
b5dc8e6c JL |
102 | if (cfg) { |
103 | free_cpumask_var(cfg->domain); | |
104 | free_cpumask_var(cfg->old_domain); | |
105 | kfree(cfg); | |
106 | } | |
74afab7a JL |
107 | } |
108 | ||
109 | static int | |
110 | __assign_irq_vector(int irq, struct irq_cfg *cfg, const struct cpumask *mask) | |
111 | { | |
112 | /* | |
113 | * NOTE! The local APIC isn't very good at handling | |
114 | * multiple interrupts at the same interrupt level. | |
115 | * As the interrupt level is determined by taking the | |
116 | * vector number and shifting that right by 4, we | |
117 | * want to spread these out a bit so that they don't | |
118 | * all fall in the same interrupt level. | |
119 | * | |
120 | * Also, we've got to be careful not to trash gate | |
121 | * 0x80, because int 0x80 is hm, kind of importantish. ;) | |
122 | */ | |
123 | static int current_vector = FIRST_EXTERNAL_VECTOR + VECTOR_OFFSET_START; | |
124 | static int current_offset = VECTOR_OFFSET_START % 16; | |
125 | int cpu, err; | |
126 | cpumask_var_t tmp_mask; | |
127 | ||
128 | if (cfg->move_in_progress) | |
129 | return -EBUSY; | |
130 | ||
131 | if (!alloc_cpumask_var(&tmp_mask, GFP_ATOMIC)) | |
132 | return -ENOMEM; | |
133 | ||
134 | /* Only try and allocate irqs on cpus that are present */ | |
135 | err = -ENOSPC; | |
136 | cpumask_clear(cfg->old_domain); | |
137 | cpu = cpumask_first_and(mask, cpu_online_mask); | |
138 | while (cpu < nr_cpu_ids) { | |
139 | int new_cpu, vector, offset; | |
140 | ||
141 | apic->vector_allocation_domain(cpu, tmp_mask, mask); | |
142 | ||
143 | if (cpumask_subset(tmp_mask, cfg->domain)) { | |
144 | err = 0; | |
145 | if (cpumask_equal(tmp_mask, cfg->domain)) | |
146 | break; | |
147 | /* | |
148 | * New cpumask using the vector is a proper subset of | |
149 | * the current in use mask. So cleanup the vector | |
150 | * allocation for the members that are not used anymore. | |
151 | */ | |
152 | cpumask_andnot(cfg->old_domain, cfg->domain, tmp_mask); | |
153 | cfg->move_in_progress = | |
154 | cpumask_intersects(cfg->old_domain, cpu_online_mask); | |
155 | cpumask_and(cfg->domain, cfg->domain, tmp_mask); | |
156 | break; | |
157 | } | |
158 | ||
159 | vector = current_vector; | |
160 | offset = current_offset; | |
161 | next: | |
162 | vector += 16; | |
163 | if (vector >= first_system_vector) { | |
164 | offset = (offset + 1) % 16; | |
165 | vector = FIRST_EXTERNAL_VECTOR + offset; | |
166 | } | |
167 | ||
168 | if (unlikely(current_vector == vector)) { | |
169 | cpumask_or(cfg->old_domain, cfg->old_domain, tmp_mask); | |
170 | cpumask_andnot(tmp_mask, mask, cfg->old_domain); | |
171 | cpu = cpumask_first_and(tmp_mask, cpu_online_mask); | |
172 | continue; | |
173 | } | |
174 | ||
175 | if (test_bit(vector, used_vectors)) | |
176 | goto next; | |
177 | ||
178 | for_each_cpu_and(new_cpu, tmp_mask, cpu_online_mask) { | |
179 | if (per_cpu(vector_irq, new_cpu)[vector] > | |
180 | VECTOR_UNDEFINED) | |
181 | goto next; | |
182 | } | |
183 | /* Found one! */ | |
184 | current_vector = vector; | |
185 | current_offset = offset; | |
186 | if (cfg->vector) { | |
187 | cpumask_copy(cfg->old_domain, cfg->domain); | |
188 | cfg->move_in_progress = | |
189 | cpumask_intersects(cfg->old_domain, cpu_online_mask); | |
190 | } | |
191 | for_each_cpu_and(new_cpu, tmp_mask, cpu_online_mask) | |
192 | per_cpu(vector_irq, new_cpu)[vector] = irq; | |
193 | cfg->vector = vector; | |
194 | cpumask_copy(cfg->domain, tmp_mask); | |
195 | err = 0; | |
196 | break; | |
197 | } | |
198 | free_cpumask_var(tmp_mask); | |
199 | ||
5f0052f9 JL |
200 | if (!err) { |
201 | /* cache destination APIC IDs into cfg->dest_apicid */ | |
202 | err = apic->cpu_mask_to_apicid_and(mask, cfg->domain, | |
203 | &cfg->dest_apicid); | |
204 | } | |
205 | ||
74afab7a JL |
206 | return err; |
207 | } | |
208 | ||
209 | int assign_irq_vector(int irq, struct irq_cfg *cfg, const struct cpumask *mask) | |
210 | { | |
211 | int err; | |
212 | unsigned long flags; | |
213 | ||
214 | raw_spin_lock_irqsave(&vector_lock, flags); | |
215 | err = __assign_irq_vector(irq, cfg, mask); | |
216 | raw_spin_unlock_irqrestore(&vector_lock, flags); | |
217 | return err; | |
218 | } | |
219 | ||
220 | void clear_irq_vector(int irq, struct irq_cfg *cfg) | |
221 | { | |
222 | int cpu, vector; | |
223 | unsigned long flags; | |
224 | ||
225 | raw_spin_lock_irqsave(&vector_lock, flags); | |
226 | BUG_ON(!cfg->vector); | |
227 | ||
228 | vector = cfg->vector; | |
229 | for_each_cpu_and(cpu, cfg->domain, cpu_online_mask) | |
230 | per_cpu(vector_irq, cpu)[vector] = VECTOR_UNDEFINED; | |
231 | ||
232 | cfg->vector = 0; | |
233 | cpumask_clear(cfg->domain); | |
234 | ||
235 | if (likely(!cfg->move_in_progress)) { | |
236 | raw_spin_unlock_irqrestore(&vector_lock, flags); | |
237 | return; | |
238 | } | |
239 | ||
240 | for_each_cpu_and(cpu, cfg->old_domain, cpu_online_mask) { | |
241 | for (vector = FIRST_EXTERNAL_VECTOR; vector < NR_VECTORS; | |
242 | vector++) { | |
243 | if (per_cpu(vector_irq, cpu)[vector] != irq) | |
244 | continue; | |
245 | per_cpu(vector_irq, cpu)[vector] = VECTOR_UNDEFINED; | |
246 | break; | |
247 | } | |
248 | } | |
249 | cfg->move_in_progress = 0; | |
250 | raw_spin_unlock_irqrestore(&vector_lock, flags); | |
251 | } | |
252 | ||
b5dc8e6c JL |
253 | void init_irq_alloc_info(struct irq_alloc_info *info, |
254 | const struct cpumask *mask) | |
255 | { | |
256 | memset(info, 0, sizeof(*info)); | |
257 | info->mask = mask; | |
258 | } | |
259 | ||
260 | void copy_irq_alloc_info(struct irq_alloc_info *dst, struct irq_alloc_info *src) | |
261 | { | |
262 | if (src) | |
263 | *dst = *src; | |
264 | else | |
265 | memset(dst, 0, sizeof(*dst)); | |
266 | } | |
267 | ||
268 | static inline const struct cpumask * | |
269 | irq_alloc_info_get_mask(struct irq_alloc_info *info) | |
270 | { | |
271 | return (!info || !info->mask) ? apic->target_cpus() : info->mask; | |
272 | } | |
273 | ||
274 | static void x86_vector_free_irqs(struct irq_domain *domain, | |
275 | unsigned int virq, unsigned int nr_irqs) | |
276 | { | |
277 | struct irq_data *irq_data; | |
278 | int i; | |
279 | ||
280 | for (i = 0; i < nr_irqs; i++) { | |
281 | irq_data = irq_domain_get_irq_data(x86_vector_domain, virq + i); | |
282 | if (irq_data && irq_data->chip_data) { | |
283 | free_remapped_irq(virq); | |
284 | clear_irq_vector(virq + i, irq_data->chip_data); | |
285 | free_irq_cfg(irq_data->chip_data); | |
286 | irq_domain_reset_irq_data(irq_data); | |
287 | } | |
288 | } | |
289 | } | |
290 | ||
291 | static int x86_vector_alloc_irqs(struct irq_domain *domain, unsigned int virq, | |
292 | unsigned int nr_irqs, void *arg) | |
293 | { | |
294 | struct irq_alloc_info *info = arg; | |
295 | const struct cpumask *mask; | |
296 | struct irq_data *irq_data; | |
297 | struct irq_cfg *cfg; | |
298 | int i, err; | |
299 | ||
300 | if (disable_apic) | |
301 | return -ENXIO; | |
302 | ||
303 | /* Currently vector allocator can't guarantee contiguous allocations */ | |
304 | if ((info->flags & X86_IRQ_ALLOC_CONTIGUOUS_VECTORS) && nr_irqs > 1) | |
305 | return -ENOSYS; | |
306 | ||
307 | mask = irq_alloc_info_get_mask(info); | |
308 | for (i = 0; i < nr_irqs; i++) { | |
309 | irq_data = irq_domain_get_irq_data(domain, virq + i); | |
310 | BUG_ON(!irq_data); | |
311 | cfg = alloc_irq_cfg(irq_data->node); | |
312 | if (!cfg) { | |
313 | err = -ENOMEM; | |
314 | goto error; | |
315 | } | |
316 | ||
317 | irq_data->chip = &lapic_controller; | |
318 | irq_data->chip_data = cfg; | |
319 | irq_data->hwirq = virq + i; | |
320 | err = assign_irq_vector(virq, cfg, mask); | |
321 | if (err) | |
322 | goto error; | |
323 | } | |
324 | ||
325 | return 0; | |
326 | ||
327 | error: | |
328 | x86_vector_free_irqs(domain, virq, i + 1); | |
329 | return err; | |
330 | } | |
331 | ||
332 | static struct irq_domain_ops x86_vector_domain_ops = { | |
333 | .alloc = x86_vector_alloc_irqs, | |
334 | .free = x86_vector_free_irqs, | |
335 | }; | |
336 | ||
11d686e9 JL |
337 | int __init arch_probe_nr_irqs(void) |
338 | { | |
339 | int nr; | |
340 | ||
341 | if (nr_irqs > (NR_VECTORS * nr_cpu_ids)) | |
342 | nr_irqs = NR_VECTORS * nr_cpu_ids; | |
343 | ||
344 | nr = (gsi_top + nr_legacy_irqs()) + 8 * nr_cpu_ids; | |
345 | #if defined(CONFIG_PCI_MSI) || defined(CONFIG_HT_IRQ) | |
346 | /* | |
347 | * for MSI and HT dyn irq | |
348 | */ | |
349 | if (gsi_top <= NR_IRQS_LEGACY) | |
350 | nr += 8 * nr_cpu_ids; | |
351 | else | |
352 | nr += gsi_top * 16; | |
353 | #endif | |
354 | if (nr < nr_irqs) | |
355 | nr_irqs = nr; | |
356 | ||
357 | return nr_legacy_irqs(); | |
358 | } | |
359 | ||
360 | int __init arch_early_irq_init(void) | |
361 | { | |
b5dc8e6c JL |
362 | x86_vector_domain = irq_domain_add_tree(NULL, &x86_vector_domain_ops, |
363 | NULL); | |
364 | BUG_ON(x86_vector_domain == NULL); | |
365 | irq_set_default_host(x86_vector_domain); | |
366 | ||
52f518a3 JL |
367 | arch_init_msi_domain(x86_vector_domain); |
368 | ||
11d686e9 JL |
369 | return arch_early_ioapic_init(); |
370 | } | |
371 | ||
74afab7a JL |
372 | static void __setup_vector_irq(int cpu) |
373 | { | |
374 | /* Initialize vector_irq on a new cpu */ | |
375 | int irq, vector; | |
376 | struct irq_cfg *cfg; | |
377 | ||
378 | /* | |
379 | * vector_lock will make sure that we don't run into irq vector | |
380 | * assignments that might be happening on another cpu in parallel, | |
381 | * while we setup our initial vector to irq mappings. | |
382 | */ | |
383 | raw_spin_lock(&vector_lock); | |
384 | /* Mark the inuse vectors */ | |
385 | for_each_active_irq(irq) { | |
386 | cfg = irq_cfg(irq); | |
387 | if (!cfg) | |
388 | continue; | |
389 | ||
390 | if (!cpumask_test_cpu(cpu, cfg->domain)) | |
391 | continue; | |
392 | vector = cfg->vector; | |
393 | per_cpu(vector_irq, cpu)[vector] = irq; | |
394 | } | |
395 | /* Mark the free vectors */ | |
396 | for (vector = 0; vector < NR_VECTORS; ++vector) { | |
397 | irq = per_cpu(vector_irq, cpu)[vector]; | |
398 | if (irq <= VECTOR_UNDEFINED) | |
399 | continue; | |
400 | ||
401 | cfg = irq_cfg(irq); | |
402 | if (!cpumask_test_cpu(cpu, cfg->domain)) | |
403 | per_cpu(vector_irq, cpu)[vector] = VECTOR_UNDEFINED; | |
404 | } | |
405 | raw_spin_unlock(&vector_lock); | |
406 | } | |
407 | ||
408 | /* | |
409 | * Setup the vector to irq mappings. | |
410 | */ | |
411 | void setup_vector_irq(int cpu) | |
412 | { | |
413 | int irq; | |
414 | ||
415 | /* | |
416 | * On most of the platforms, legacy PIC delivers the interrupts on the | |
417 | * boot cpu. But there are certain platforms where PIC interrupts are | |
418 | * delivered to multiple cpu's. If the legacy IRQ is handled by the | |
419 | * legacy PIC, for the new cpu that is coming online, setup the static | |
420 | * legacy vector to irq mapping: | |
421 | */ | |
422 | for (irq = 0; irq < nr_legacy_irqs(); irq++) | |
423 | per_cpu(vector_irq, cpu)[IRQ0_VECTOR + irq] = irq; | |
424 | ||
425 | __setup_vector_irq(cpu); | |
426 | } | |
427 | ||
428 | int apic_retrigger_irq(struct irq_data *data) | |
429 | { | |
a9786091 | 430 | struct irq_cfg *cfg = irqd_cfg(data); |
74afab7a JL |
431 | unsigned long flags; |
432 | int cpu; | |
433 | ||
434 | raw_spin_lock_irqsave(&vector_lock, flags); | |
435 | cpu = cpumask_first_and(cfg->domain, cpu_online_mask); | |
436 | apic->send_IPI_mask(cpumask_of(cpu), cfg->vector); | |
437 | raw_spin_unlock_irqrestore(&vector_lock, flags); | |
438 | ||
439 | return 1; | |
440 | } | |
441 | ||
442 | void apic_ack_edge(struct irq_data *data) | |
443 | { | |
a9786091 | 444 | irq_complete_move(irqd_cfg(data)); |
74afab7a JL |
445 | irq_move_irq(data); |
446 | ack_APIC_irq(); | |
447 | } | |
448 | ||
449 | /* | |
450 | * Either sets data->affinity to a valid value, and returns | |
451 | * ->cpu_mask_to_apicid of that in dest_id, or returns -1 and | |
452 | * leaves data->affinity untouched. | |
453 | */ | |
454 | int apic_set_affinity(struct irq_data *data, const struct cpumask *mask, | |
455 | unsigned int *dest_id) | |
456 | { | |
a9786091 | 457 | struct irq_cfg *cfg = irqd_cfg(data); |
74afab7a JL |
458 | unsigned int irq = data->irq; |
459 | int err; | |
460 | ||
461 | if (!config_enabled(CONFIG_SMP)) | |
462 | return -EPERM; | |
463 | ||
464 | if (!cpumask_intersects(mask, cpu_online_mask)) | |
465 | return -EINVAL; | |
466 | ||
467 | err = assign_irq_vector(irq, cfg, mask); | |
468 | if (err) | |
469 | return err; | |
470 | ||
471 | err = apic->cpu_mask_to_apicid_and(mask, cfg->domain, dest_id); | |
472 | if (err) { | |
473 | if (assign_irq_vector(irq, cfg, data->affinity)) | |
474 | pr_err("Failed to recover vector for irq %d\n", irq); | |
475 | return err; | |
476 | } | |
477 | ||
478 | cpumask_copy(data->affinity, mask); | |
479 | ||
480 | return 0; | |
481 | } | |
482 | ||
b5dc8e6c JL |
483 | static int vector_set_affinity(struct irq_data *irq_data, |
484 | const struct cpumask *dest, bool force) | |
485 | { | |
486 | struct irq_cfg *cfg = irq_data->chip_data; | |
487 | int err, irq = irq_data->irq; | |
488 | ||
489 | if (!config_enabled(CONFIG_SMP)) | |
490 | return -EPERM; | |
491 | ||
492 | if (!cpumask_intersects(dest, cpu_online_mask)) | |
493 | return -EINVAL; | |
494 | ||
495 | err = assign_irq_vector(irq, cfg, dest); | |
496 | if (err) { | |
497 | struct irq_data *top = irq_get_irq_data(irq); | |
498 | ||
499 | if (assign_irq_vector(irq, cfg, top->affinity)) | |
500 | pr_err("Failed to recover vector for irq %d\n", irq); | |
501 | return err; | |
502 | } | |
503 | ||
504 | return IRQ_SET_MASK_OK; | |
505 | } | |
506 | ||
507 | static struct irq_chip lapic_controller = { | |
508 | .irq_ack = apic_ack_edge, | |
509 | .irq_set_affinity = vector_set_affinity, | |
510 | .irq_retrigger = apic_retrigger_irq, | |
511 | }; | |
512 | ||
74afab7a JL |
513 | #ifdef CONFIG_SMP |
514 | void send_cleanup_vector(struct irq_cfg *cfg) | |
515 | { | |
516 | cpumask_var_t cleanup_mask; | |
517 | ||
518 | if (unlikely(!alloc_cpumask_var(&cleanup_mask, GFP_ATOMIC))) { | |
519 | unsigned int i; | |
520 | ||
521 | for_each_cpu_and(i, cfg->old_domain, cpu_online_mask) | |
522 | apic->send_IPI_mask(cpumask_of(i), | |
523 | IRQ_MOVE_CLEANUP_VECTOR); | |
524 | } else { | |
525 | cpumask_and(cleanup_mask, cfg->old_domain, cpu_online_mask); | |
526 | apic->send_IPI_mask(cleanup_mask, IRQ_MOVE_CLEANUP_VECTOR); | |
527 | free_cpumask_var(cleanup_mask); | |
528 | } | |
529 | cfg->move_in_progress = 0; | |
530 | } | |
531 | ||
532 | asmlinkage __visible void smp_irq_move_cleanup_interrupt(void) | |
533 | { | |
534 | unsigned vector, me; | |
535 | ||
536 | ack_APIC_irq(); | |
537 | irq_enter(); | |
538 | exit_idle(); | |
539 | ||
540 | me = smp_processor_id(); | |
541 | for (vector = FIRST_EXTERNAL_VECTOR; vector < NR_VECTORS; vector++) { | |
542 | int irq; | |
543 | unsigned int irr; | |
544 | struct irq_desc *desc; | |
545 | struct irq_cfg *cfg; | |
546 | ||
547 | irq = __this_cpu_read(vector_irq[vector]); | |
548 | ||
549 | if (irq <= VECTOR_UNDEFINED) | |
550 | continue; | |
551 | ||
552 | desc = irq_to_desc(irq); | |
553 | if (!desc) | |
554 | continue; | |
555 | ||
556 | cfg = irq_cfg(irq); | |
557 | if (!cfg) | |
558 | continue; | |
559 | ||
560 | raw_spin_lock(&desc->lock); | |
561 | ||
562 | /* | |
563 | * Check if the irq migration is in progress. If so, we | |
564 | * haven't received the cleanup request yet for this irq. | |
565 | */ | |
566 | if (cfg->move_in_progress) | |
567 | goto unlock; | |
568 | ||
569 | if (vector == cfg->vector && cpumask_test_cpu(me, cfg->domain)) | |
570 | goto unlock; | |
571 | ||
572 | irr = apic_read(APIC_IRR + (vector / 32 * 0x10)); | |
573 | /* | |
574 | * Check if the vector that needs to be cleanedup is | |
575 | * registered at the cpu's IRR. If so, then this is not | |
576 | * the best time to clean it up. Lets clean it up in the | |
577 | * next attempt by sending another IRQ_MOVE_CLEANUP_VECTOR | |
578 | * to myself. | |
579 | */ | |
580 | if (irr & (1 << (vector % 32))) { | |
581 | apic->send_IPI_self(IRQ_MOVE_CLEANUP_VECTOR); | |
582 | goto unlock; | |
583 | } | |
584 | __this_cpu_write(vector_irq[vector], VECTOR_UNDEFINED); | |
585 | unlock: | |
586 | raw_spin_unlock(&desc->lock); | |
587 | } | |
588 | ||
589 | irq_exit(); | |
590 | } | |
591 | ||
592 | static void __irq_complete_move(struct irq_cfg *cfg, unsigned vector) | |
593 | { | |
594 | unsigned me; | |
595 | ||
596 | if (likely(!cfg->move_in_progress)) | |
597 | return; | |
598 | ||
599 | me = smp_processor_id(); | |
600 | ||
601 | if (vector == cfg->vector && cpumask_test_cpu(me, cfg->domain)) | |
602 | send_cleanup_vector(cfg); | |
603 | } | |
604 | ||
605 | void irq_complete_move(struct irq_cfg *cfg) | |
606 | { | |
607 | __irq_complete_move(cfg, ~get_irq_regs()->orig_ax); | |
608 | } | |
609 | ||
610 | void irq_force_complete_move(int irq) | |
611 | { | |
612 | struct irq_cfg *cfg = irq_cfg(irq); | |
613 | ||
614 | if (!cfg) | |
615 | return; | |
616 | ||
617 | __irq_complete_move(cfg, cfg->vector); | |
618 | } | |
74afab7a JL |
619 | #endif |
620 | ||
621 | /* | |
622 | * Dynamic irq allocate and deallocation. Should be replaced by irq domains! | |
623 | */ | |
624 | int arch_setup_hwirq(unsigned int irq, int node) | |
625 | { | |
626 | struct irq_cfg *cfg; | |
627 | unsigned long flags; | |
628 | int ret; | |
629 | ||
b5dc8e6c | 630 | cfg = alloc_irq_cfg(node); |
74afab7a JL |
631 | if (!cfg) |
632 | return -ENOMEM; | |
633 | ||
634 | raw_spin_lock_irqsave(&vector_lock, flags); | |
635 | ret = __assign_irq_vector(irq, cfg, apic->target_cpus()); | |
636 | raw_spin_unlock_irqrestore(&vector_lock, flags); | |
637 | ||
638 | if (!ret) | |
639 | irq_set_chip_data(irq, cfg); | |
640 | else | |
b5dc8e6c | 641 | free_irq_cfg(cfg); |
74afab7a JL |
642 | return ret; |
643 | } | |
644 | ||
645 | void arch_teardown_hwirq(unsigned int irq) | |
646 | { | |
647 | struct irq_cfg *cfg = irq_cfg(irq); | |
648 | ||
649 | free_remapped_irq(irq); | |
650 | clear_irq_vector(irq, cfg); | |
b5dc8e6c JL |
651 | irq_set_chip_data(irq, NULL); |
652 | free_irq_cfg(cfg); | |
74afab7a JL |
653 | } |
654 | ||
655 | static void __init print_APIC_field(int base) | |
656 | { | |
657 | int i; | |
658 | ||
659 | printk(KERN_DEBUG); | |
660 | ||
661 | for (i = 0; i < 8; i++) | |
662 | pr_cont("%08x", apic_read(base + i*0x10)); | |
663 | ||
664 | pr_cont("\n"); | |
665 | } | |
666 | ||
667 | static void __init print_local_APIC(void *dummy) | |
668 | { | |
669 | unsigned int i, v, ver, maxlvt; | |
670 | u64 icr; | |
671 | ||
849d3569 JL |
672 | pr_debug("printing local APIC contents on CPU#%d/%d:\n", |
673 | smp_processor_id(), hard_smp_processor_id()); | |
74afab7a | 674 | v = apic_read(APIC_ID); |
849d3569 | 675 | pr_info("... APIC ID: %08x (%01x)\n", v, read_apic_id()); |
74afab7a | 676 | v = apic_read(APIC_LVR); |
849d3569 | 677 | pr_info("... APIC VERSION: %08x\n", v); |
74afab7a JL |
678 | ver = GET_APIC_VERSION(v); |
679 | maxlvt = lapic_get_maxlvt(); | |
680 | ||
681 | v = apic_read(APIC_TASKPRI); | |
849d3569 | 682 | pr_debug("... APIC TASKPRI: %08x (%02x)\n", v, v & APIC_TPRI_MASK); |
74afab7a JL |
683 | |
684 | /* !82489DX */ | |
685 | if (APIC_INTEGRATED(ver)) { | |
686 | if (!APIC_XAPIC(ver)) { | |
687 | v = apic_read(APIC_ARBPRI); | |
849d3569 JL |
688 | pr_debug("... APIC ARBPRI: %08x (%02x)\n", |
689 | v, v & APIC_ARBPRI_MASK); | |
74afab7a JL |
690 | } |
691 | v = apic_read(APIC_PROCPRI); | |
849d3569 | 692 | pr_debug("... APIC PROCPRI: %08x\n", v); |
74afab7a JL |
693 | } |
694 | ||
695 | /* | |
696 | * Remote read supported only in the 82489DX and local APIC for | |
697 | * Pentium processors. | |
698 | */ | |
699 | if (!APIC_INTEGRATED(ver) || maxlvt == 3) { | |
700 | v = apic_read(APIC_RRR); | |
849d3569 | 701 | pr_debug("... APIC RRR: %08x\n", v); |
74afab7a JL |
702 | } |
703 | ||
704 | v = apic_read(APIC_LDR); | |
849d3569 | 705 | pr_debug("... APIC LDR: %08x\n", v); |
74afab7a JL |
706 | if (!x2apic_enabled()) { |
707 | v = apic_read(APIC_DFR); | |
849d3569 | 708 | pr_debug("... APIC DFR: %08x\n", v); |
74afab7a JL |
709 | } |
710 | v = apic_read(APIC_SPIV); | |
849d3569 | 711 | pr_debug("... APIC SPIV: %08x\n", v); |
74afab7a | 712 | |
849d3569 | 713 | pr_debug("... APIC ISR field:\n"); |
74afab7a | 714 | print_APIC_field(APIC_ISR); |
849d3569 | 715 | pr_debug("... APIC TMR field:\n"); |
74afab7a | 716 | print_APIC_field(APIC_TMR); |
849d3569 | 717 | pr_debug("... APIC IRR field:\n"); |
74afab7a JL |
718 | print_APIC_field(APIC_IRR); |
719 | ||
720 | /* !82489DX */ | |
721 | if (APIC_INTEGRATED(ver)) { | |
722 | /* Due to the Pentium erratum 3AP. */ | |
723 | if (maxlvt > 3) | |
724 | apic_write(APIC_ESR, 0); | |
725 | ||
726 | v = apic_read(APIC_ESR); | |
849d3569 | 727 | pr_debug("... APIC ESR: %08x\n", v); |
74afab7a JL |
728 | } |
729 | ||
730 | icr = apic_icr_read(); | |
849d3569 JL |
731 | pr_debug("... APIC ICR: %08x\n", (u32)icr); |
732 | pr_debug("... APIC ICR2: %08x\n", (u32)(icr >> 32)); | |
74afab7a JL |
733 | |
734 | v = apic_read(APIC_LVTT); | |
849d3569 | 735 | pr_debug("... APIC LVTT: %08x\n", v); |
74afab7a JL |
736 | |
737 | if (maxlvt > 3) { | |
738 | /* PC is LVT#4. */ | |
739 | v = apic_read(APIC_LVTPC); | |
849d3569 | 740 | pr_debug("... APIC LVTPC: %08x\n", v); |
74afab7a JL |
741 | } |
742 | v = apic_read(APIC_LVT0); | |
849d3569 | 743 | pr_debug("... APIC LVT0: %08x\n", v); |
74afab7a | 744 | v = apic_read(APIC_LVT1); |
849d3569 | 745 | pr_debug("... APIC LVT1: %08x\n", v); |
74afab7a JL |
746 | |
747 | if (maxlvt > 2) { | |
748 | /* ERR is LVT#3. */ | |
749 | v = apic_read(APIC_LVTERR); | |
849d3569 | 750 | pr_debug("... APIC LVTERR: %08x\n", v); |
74afab7a JL |
751 | } |
752 | ||
753 | v = apic_read(APIC_TMICT); | |
849d3569 | 754 | pr_debug("... APIC TMICT: %08x\n", v); |
74afab7a | 755 | v = apic_read(APIC_TMCCT); |
849d3569 | 756 | pr_debug("... APIC TMCCT: %08x\n", v); |
74afab7a | 757 | v = apic_read(APIC_TDCR); |
849d3569 | 758 | pr_debug("... APIC TDCR: %08x\n", v); |
74afab7a JL |
759 | |
760 | if (boot_cpu_has(X86_FEATURE_EXTAPIC)) { | |
761 | v = apic_read(APIC_EFEAT); | |
762 | maxlvt = (v >> 16) & 0xff; | |
849d3569 | 763 | pr_debug("... APIC EFEAT: %08x\n", v); |
74afab7a | 764 | v = apic_read(APIC_ECTRL); |
849d3569 | 765 | pr_debug("... APIC ECTRL: %08x\n", v); |
74afab7a JL |
766 | for (i = 0; i < maxlvt; i++) { |
767 | v = apic_read(APIC_EILVTn(i)); | |
849d3569 | 768 | pr_debug("... APIC EILVT%d: %08x\n", i, v); |
74afab7a JL |
769 | } |
770 | } | |
771 | pr_cont("\n"); | |
772 | } | |
773 | ||
774 | static void __init print_local_APICs(int maxcpu) | |
775 | { | |
776 | int cpu; | |
777 | ||
778 | if (!maxcpu) | |
779 | return; | |
780 | ||
781 | preempt_disable(); | |
782 | for_each_online_cpu(cpu) { | |
783 | if (cpu >= maxcpu) | |
784 | break; | |
785 | smp_call_function_single(cpu, print_local_APIC, NULL, 1); | |
786 | } | |
787 | preempt_enable(); | |
788 | } | |
789 | ||
790 | static void __init print_PIC(void) | |
791 | { | |
792 | unsigned int v; | |
793 | unsigned long flags; | |
794 | ||
795 | if (!nr_legacy_irqs()) | |
796 | return; | |
797 | ||
849d3569 | 798 | pr_debug("\nprinting PIC contents\n"); |
74afab7a JL |
799 | |
800 | raw_spin_lock_irqsave(&i8259A_lock, flags); | |
801 | ||
802 | v = inb(0xa1) << 8 | inb(0x21); | |
849d3569 | 803 | pr_debug("... PIC IMR: %04x\n", v); |
74afab7a JL |
804 | |
805 | v = inb(0xa0) << 8 | inb(0x20); | |
849d3569 | 806 | pr_debug("... PIC IRR: %04x\n", v); |
74afab7a JL |
807 | |
808 | outb(0x0b, 0xa0); | |
809 | outb(0x0b, 0x20); | |
810 | v = inb(0xa0) << 8 | inb(0x20); | |
811 | outb(0x0a, 0xa0); | |
812 | outb(0x0a, 0x20); | |
813 | ||
814 | raw_spin_unlock_irqrestore(&i8259A_lock, flags); | |
815 | ||
849d3569 | 816 | pr_debug("... PIC ISR: %04x\n", v); |
74afab7a JL |
817 | |
818 | v = inb(0x4d1) << 8 | inb(0x4d0); | |
849d3569 | 819 | pr_debug("... PIC ELCR: %04x\n", v); |
74afab7a JL |
820 | } |
821 | ||
822 | static int show_lapic __initdata = 1; | |
823 | static __init int setup_show_lapic(char *arg) | |
824 | { | |
825 | int num = -1; | |
826 | ||
827 | if (strcmp(arg, "all") == 0) { | |
828 | show_lapic = CONFIG_NR_CPUS; | |
829 | } else { | |
830 | get_option(&arg, &num); | |
831 | if (num >= 0) | |
832 | show_lapic = num; | |
833 | } | |
834 | ||
835 | return 1; | |
836 | } | |
837 | __setup("show_lapic=", setup_show_lapic); | |
838 | ||
839 | static int __init print_ICs(void) | |
840 | { | |
841 | if (apic_verbosity == APIC_QUIET) | |
842 | return 0; | |
843 | ||
844 | print_PIC(); | |
845 | ||
846 | /* don't print out if apic is not there */ | |
847 | if (!cpu_has_apic && !apic_from_smp_config()) | |
848 | return 0; | |
849 | ||
850 | print_local_APICs(show_lapic); | |
851 | print_IO_APICs(); | |
852 | ||
853 | return 0; | |
854 | } | |
855 | ||
856 | late_initcall(print_ICs); |