Commit | Line | Data |
---|---|---|
74afab7a JL |
1 | /* |
2 | * Local APIC related interfaces to support IOAPIC, MSI, HT_IRQ etc. | |
3 | * | |
4 | * Copyright (C) 1997, 1998, 1999, 2000, 2009 Ingo Molnar, Hajnalka Szabo | |
5 | * Moved from arch/x86/kernel/apic/io_apic.c. | |
b5dc8e6c JL |
6 | * Jiang Liu <jiang.liu@linux.intel.com> |
7 | * Enable support of hierarchical irqdomains | |
74afab7a JL |
8 | * |
9 | * This program is free software; you can redistribute it and/or modify | |
10 | * it under the terms of the GNU General Public License version 2 as | |
11 | * published by the Free Software Foundation. | |
12 | */ | |
13 | #include <linux/interrupt.h> | |
14 | #include <linux/init.h> | |
15 | #include <linux/compiler.h> | |
16 | #include <linux/irqdomain.h> | |
17 | #include <linux/slab.h> | |
18 | #include <asm/hw_irq.h> | |
19 | #include <asm/apic.h> | |
20 | #include <asm/i8259.h> | |
21 | #include <asm/desc.h> | |
22 | #include <asm/irq_remapping.h> | |
23 | ||
b5dc8e6c | 24 | struct irq_domain *x86_vector_domain; |
74afab7a | 25 | static DEFINE_RAW_SPINLOCK(vector_lock); |
b5dc8e6c | 26 | static struct irq_chip lapic_controller; |
74afab7a JL |
27 | |
28 | void lock_vector_lock(void) | |
29 | { | |
30 | /* Used to the online set of cpus does not change | |
31 | * during assign_irq_vector. | |
32 | */ | |
33 | raw_spin_lock(&vector_lock); | |
34 | } | |
35 | ||
36 | void unlock_vector_lock(void) | |
37 | { | |
38 | raw_spin_unlock(&vector_lock); | |
39 | } | |
40 | ||
41 | struct irq_cfg *irq_cfg(unsigned int irq) | |
42 | { | |
b5dc8e6c | 43 | return irqd_cfg(irq_get_irq_data(irq)); |
74afab7a JL |
44 | } |
45 | ||
46 | struct irq_cfg *irqd_cfg(struct irq_data *irq_data) | |
47 | { | |
b5dc8e6c JL |
48 | if (!irq_data) |
49 | return NULL; | |
50 | ||
51 | while (irq_data->parent_data) | |
52 | irq_data = irq_data->parent_data; | |
53 | ||
74afab7a JL |
54 | return irq_data->chip_data; |
55 | } | |
56 | ||
b5dc8e6c | 57 | static struct irq_cfg *alloc_irq_cfg(int node) |
74afab7a JL |
58 | { |
59 | struct irq_cfg *cfg; | |
60 | ||
61 | cfg = kzalloc_node(sizeof(*cfg), GFP_KERNEL, node); | |
62 | if (!cfg) | |
63 | return NULL; | |
64 | if (!zalloc_cpumask_var_node(&cfg->domain, GFP_KERNEL, node)) | |
65 | goto out_cfg; | |
66 | if (!zalloc_cpumask_var_node(&cfg->old_domain, GFP_KERNEL, node)) | |
67 | goto out_domain; | |
68 | #ifdef CONFIG_X86_IO_APIC | |
69 | INIT_LIST_HEAD(&cfg->irq_2_pin); | |
70 | #endif | |
71 | return cfg; | |
72 | out_domain: | |
73 | free_cpumask_var(cfg->domain); | |
74 | out_cfg: | |
75 | kfree(cfg); | |
76 | return NULL; | |
77 | } | |
78 | ||
79 | struct irq_cfg *alloc_irq_and_cfg_at(unsigned int at, int node) | |
80 | { | |
81 | int res = irq_alloc_desc_at(at, node); | |
82 | struct irq_cfg *cfg; | |
83 | ||
84 | if (res < 0) { | |
85 | if (res != -EEXIST) | |
86 | return NULL; | |
87 | cfg = irq_cfg(at); | |
88 | if (cfg) | |
89 | return cfg; | |
90 | } | |
91 | ||
b5dc8e6c | 92 | cfg = alloc_irq_cfg(node); |
74afab7a JL |
93 | if (cfg) |
94 | irq_set_chip_data(at, cfg); | |
95 | else | |
96 | irq_free_desc(at); | |
97 | return cfg; | |
98 | } | |
99 | ||
b5dc8e6c | 100 | static void free_irq_cfg(struct irq_cfg *cfg) |
74afab7a | 101 | { |
b5dc8e6c JL |
102 | if (cfg) { |
103 | free_cpumask_var(cfg->domain); | |
104 | free_cpumask_var(cfg->old_domain); | |
105 | kfree(cfg); | |
106 | } | |
74afab7a JL |
107 | } |
108 | ||
109 | static int | |
110 | __assign_irq_vector(int irq, struct irq_cfg *cfg, const struct cpumask *mask) | |
111 | { | |
112 | /* | |
113 | * NOTE! The local APIC isn't very good at handling | |
114 | * multiple interrupts at the same interrupt level. | |
115 | * As the interrupt level is determined by taking the | |
116 | * vector number and shifting that right by 4, we | |
117 | * want to spread these out a bit so that they don't | |
118 | * all fall in the same interrupt level. | |
119 | * | |
120 | * Also, we've got to be careful not to trash gate | |
121 | * 0x80, because int 0x80 is hm, kind of importantish. ;) | |
122 | */ | |
123 | static int current_vector = FIRST_EXTERNAL_VECTOR + VECTOR_OFFSET_START; | |
124 | static int current_offset = VECTOR_OFFSET_START % 16; | |
125 | int cpu, err; | |
126 | cpumask_var_t tmp_mask; | |
127 | ||
128 | if (cfg->move_in_progress) | |
129 | return -EBUSY; | |
130 | ||
131 | if (!alloc_cpumask_var(&tmp_mask, GFP_ATOMIC)) | |
132 | return -ENOMEM; | |
133 | ||
134 | /* Only try and allocate irqs on cpus that are present */ | |
135 | err = -ENOSPC; | |
136 | cpumask_clear(cfg->old_domain); | |
137 | cpu = cpumask_first_and(mask, cpu_online_mask); | |
138 | while (cpu < nr_cpu_ids) { | |
139 | int new_cpu, vector, offset; | |
140 | ||
141 | apic->vector_allocation_domain(cpu, tmp_mask, mask); | |
142 | ||
143 | if (cpumask_subset(tmp_mask, cfg->domain)) { | |
144 | err = 0; | |
145 | if (cpumask_equal(tmp_mask, cfg->domain)) | |
146 | break; | |
147 | /* | |
148 | * New cpumask using the vector is a proper subset of | |
149 | * the current in use mask. So cleanup the vector | |
150 | * allocation for the members that are not used anymore. | |
151 | */ | |
152 | cpumask_andnot(cfg->old_domain, cfg->domain, tmp_mask); | |
153 | cfg->move_in_progress = | |
154 | cpumask_intersects(cfg->old_domain, cpu_online_mask); | |
155 | cpumask_and(cfg->domain, cfg->domain, tmp_mask); | |
156 | break; | |
157 | } | |
158 | ||
159 | vector = current_vector; | |
160 | offset = current_offset; | |
161 | next: | |
162 | vector += 16; | |
163 | if (vector >= first_system_vector) { | |
164 | offset = (offset + 1) % 16; | |
165 | vector = FIRST_EXTERNAL_VECTOR + offset; | |
166 | } | |
167 | ||
168 | if (unlikely(current_vector == vector)) { | |
169 | cpumask_or(cfg->old_domain, cfg->old_domain, tmp_mask); | |
170 | cpumask_andnot(tmp_mask, mask, cfg->old_domain); | |
171 | cpu = cpumask_first_and(tmp_mask, cpu_online_mask); | |
172 | continue; | |
173 | } | |
174 | ||
175 | if (test_bit(vector, used_vectors)) | |
176 | goto next; | |
177 | ||
178 | for_each_cpu_and(new_cpu, tmp_mask, cpu_online_mask) { | |
179 | if (per_cpu(vector_irq, new_cpu)[vector] > | |
180 | VECTOR_UNDEFINED) | |
181 | goto next; | |
182 | } | |
183 | /* Found one! */ | |
184 | current_vector = vector; | |
185 | current_offset = offset; | |
186 | if (cfg->vector) { | |
187 | cpumask_copy(cfg->old_domain, cfg->domain); | |
188 | cfg->move_in_progress = | |
189 | cpumask_intersects(cfg->old_domain, cpu_online_mask); | |
190 | } | |
191 | for_each_cpu_and(new_cpu, tmp_mask, cpu_online_mask) | |
192 | per_cpu(vector_irq, new_cpu)[vector] = irq; | |
193 | cfg->vector = vector; | |
194 | cpumask_copy(cfg->domain, tmp_mask); | |
195 | err = 0; | |
196 | break; | |
197 | } | |
198 | free_cpumask_var(tmp_mask); | |
199 | ||
5f0052f9 JL |
200 | if (!err) { |
201 | /* cache destination APIC IDs into cfg->dest_apicid */ | |
202 | err = apic->cpu_mask_to_apicid_and(mask, cfg->domain, | |
203 | &cfg->dest_apicid); | |
204 | } | |
205 | ||
74afab7a JL |
206 | return err; |
207 | } | |
208 | ||
209 | int assign_irq_vector(int irq, struct irq_cfg *cfg, const struct cpumask *mask) | |
210 | { | |
211 | int err; | |
212 | unsigned long flags; | |
213 | ||
214 | raw_spin_lock_irqsave(&vector_lock, flags); | |
215 | err = __assign_irq_vector(irq, cfg, mask); | |
216 | raw_spin_unlock_irqrestore(&vector_lock, flags); | |
217 | return err; | |
218 | } | |
219 | ||
220 | void clear_irq_vector(int irq, struct irq_cfg *cfg) | |
221 | { | |
222 | int cpu, vector; | |
223 | unsigned long flags; | |
224 | ||
225 | raw_spin_lock_irqsave(&vector_lock, flags); | |
226 | BUG_ON(!cfg->vector); | |
227 | ||
228 | vector = cfg->vector; | |
229 | for_each_cpu_and(cpu, cfg->domain, cpu_online_mask) | |
230 | per_cpu(vector_irq, cpu)[vector] = VECTOR_UNDEFINED; | |
231 | ||
232 | cfg->vector = 0; | |
233 | cpumask_clear(cfg->domain); | |
234 | ||
235 | if (likely(!cfg->move_in_progress)) { | |
236 | raw_spin_unlock_irqrestore(&vector_lock, flags); | |
237 | return; | |
238 | } | |
239 | ||
240 | for_each_cpu_and(cpu, cfg->old_domain, cpu_online_mask) { | |
241 | for (vector = FIRST_EXTERNAL_VECTOR; vector < NR_VECTORS; | |
242 | vector++) { | |
243 | if (per_cpu(vector_irq, cpu)[vector] != irq) | |
244 | continue; | |
245 | per_cpu(vector_irq, cpu)[vector] = VECTOR_UNDEFINED; | |
246 | break; | |
247 | } | |
248 | } | |
249 | cfg->move_in_progress = 0; | |
250 | raw_spin_unlock_irqrestore(&vector_lock, flags); | |
251 | } | |
252 | ||
b5dc8e6c JL |
253 | void init_irq_alloc_info(struct irq_alloc_info *info, |
254 | const struct cpumask *mask) | |
255 | { | |
256 | memset(info, 0, sizeof(*info)); | |
257 | info->mask = mask; | |
258 | } | |
259 | ||
260 | void copy_irq_alloc_info(struct irq_alloc_info *dst, struct irq_alloc_info *src) | |
261 | { | |
262 | if (src) | |
263 | *dst = *src; | |
264 | else | |
265 | memset(dst, 0, sizeof(*dst)); | |
266 | } | |
267 | ||
268 | static inline const struct cpumask * | |
269 | irq_alloc_info_get_mask(struct irq_alloc_info *info) | |
270 | { | |
271 | return (!info || !info->mask) ? apic->target_cpus() : info->mask; | |
272 | } | |
273 | ||
274 | static void x86_vector_free_irqs(struct irq_domain *domain, | |
275 | unsigned int virq, unsigned int nr_irqs) | |
276 | { | |
277 | struct irq_data *irq_data; | |
278 | int i; | |
279 | ||
280 | for (i = 0; i < nr_irqs; i++) { | |
281 | irq_data = irq_domain_get_irq_data(x86_vector_domain, virq + i); | |
282 | if (irq_data && irq_data->chip_data) { | |
283 | free_remapped_irq(virq); | |
284 | clear_irq_vector(virq + i, irq_data->chip_data); | |
285 | free_irq_cfg(irq_data->chip_data); | |
286 | irq_domain_reset_irq_data(irq_data); | |
287 | } | |
288 | } | |
289 | } | |
290 | ||
291 | static int x86_vector_alloc_irqs(struct irq_domain *domain, unsigned int virq, | |
292 | unsigned int nr_irqs, void *arg) | |
293 | { | |
294 | struct irq_alloc_info *info = arg; | |
295 | const struct cpumask *mask; | |
296 | struct irq_data *irq_data; | |
297 | struct irq_cfg *cfg; | |
298 | int i, err; | |
299 | ||
300 | if (disable_apic) | |
301 | return -ENXIO; | |
302 | ||
303 | /* Currently vector allocator can't guarantee contiguous allocations */ | |
304 | if ((info->flags & X86_IRQ_ALLOC_CONTIGUOUS_VECTORS) && nr_irqs > 1) | |
305 | return -ENOSYS; | |
306 | ||
307 | mask = irq_alloc_info_get_mask(info); | |
308 | for (i = 0; i < nr_irqs; i++) { | |
309 | irq_data = irq_domain_get_irq_data(domain, virq + i); | |
310 | BUG_ON(!irq_data); | |
311 | cfg = alloc_irq_cfg(irq_data->node); | |
312 | if (!cfg) { | |
313 | err = -ENOMEM; | |
314 | goto error; | |
315 | } | |
316 | ||
317 | irq_data->chip = &lapic_controller; | |
318 | irq_data->chip_data = cfg; | |
319 | irq_data->hwirq = virq + i; | |
320 | err = assign_irq_vector(virq, cfg, mask); | |
321 | if (err) | |
322 | goto error; | |
323 | } | |
324 | ||
325 | return 0; | |
326 | ||
327 | error: | |
328 | x86_vector_free_irqs(domain, virq, i + 1); | |
329 | return err; | |
330 | } | |
331 | ||
332 | static struct irq_domain_ops x86_vector_domain_ops = { | |
333 | .alloc = x86_vector_alloc_irqs, | |
334 | .free = x86_vector_free_irqs, | |
335 | }; | |
336 | ||
11d686e9 JL |
337 | int __init arch_probe_nr_irqs(void) |
338 | { | |
339 | int nr; | |
340 | ||
341 | if (nr_irqs > (NR_VECTORS * nr_cpu_ids)) | |
342 | nr_irqs = NR_VECTORS * nr_cpu_ids; | |
343 | ||
344 | nr = (gsi_top + nr_legacy_irqs()) + 8 * nr_cpu_ids; | |
345 | #if defined(CONFIG_PCI_MSI) || defined(CONFIG_HT_IRQ) | |
346 | /* | |
347 | * for MSI and HT dyn irq | |
348 | */ | |
349 | if (gsi_top <= NR_IRQS_LEGACY) | |
350 | nr += 8 * nr_cpu_ids; | |
351 | else | |
352 | nr += gsi_top * 16; | |
353 | #endif | |
354 | if (nr < nr_irqs) | |
355 | nr_irqs = nr; | |
356 | ||
357 | return nr_legacy_irqs(); | |
358 | } | |
359 | ||
360 | int __init arch_early_irq_init(void) | |
361 | { | |
b5dc8e6c JL |
362 | x86_vector_domain = irq_domain_add_tree(NULL, &x86_vector_domain_ops, |
363 | NULL); | |
364 | BUG_ON(x86_vector_domain == NULL); | |
365 | irq_set_default_host(x86_vector_domain); | |
366 | ||
11d686e9 JL |
367 | return arch_early_ioapic_init(); |
368 | } | |
369 | ||
74afab7a JL |
370 | static void __setup_vector_irq(int cpu) |
371 | { | |
372 | /* Initialize vector_irq on a new cpu */ | |
373 | int irq, vector; | |
374 | struct irq_cfg *cfg; | |
375 | ||
376 | /* | |
377 | * vector_lock will make sure that we don't run into irq vector | |
378 | * assignments that might be happening on another cpu in parallel, | |
379 | * while we setup our initial vector to irq mappings. | |
380 | */ | |
381 | raw_spin_lock(&vector_lock); | |
382 | /* Mark the inuse vectors */ | |
383 | for_each_active_irq(irq) { | |
384 | cfg = irq_cfg(irq); | |
385 | if (!cfg) | |
386 | continue; | |
387 | ||
388 | if (!cpumask_test_cpu(cpu, cfg->domain)) | |
389 | continue; | |
390 | vector = cfg->vector; | |
391 | per_cpu(vector_irq, cpu)[vector] = irq; | |
392 | } | |
393 | /* Mark the free vectors */ | |
394 | for (vector = 0; vector < NR_VECTORS; ++vector) { | |
395 | irq = per_cpu(vector_irq, cpu)[vector]; | |
396 | if (irq <= VECTOR_UNDEFINED) | |
397 | continue; | |
398 | ||
399 | cfg = irq_cfg(irq); | |
400 | if (!cpumask_test_cpu(cpu, cfg->domain)) | |
401 | per_cpu(vector_irq, cpu)[vector] = VECTOR_UNDEFINED; | |
402 | } | |
403 | raw_spin_unlock(&vector_lock); | |
404 | } | |
405 | ||
406 | /* | |
407 | * Setup the vector to irq mappings. | |
408 | */ | |
409 | void setup_vector_irq(int cpu) | |
410 | { | |
411 | int irq; | |
412 | ||
413 | /* | |
414 | * On most of the platforms, legacy PIC delivers the interrupts on the | |
415 | * boot cpu. But there are certain platforms where PIC interrupts are | |
416 | * delivered to multiple cpu's. If the legacy IRQ is handled by the | |
417 | * legacy PIC, for the new cpu that is coming online, setup the static | |
418 | * legacy vector to irq mapping: | |
419 | */ | |
420 | for (irq = 0; irq < nr_legacy_irqs(); irq++) | |
421 | per_cpu(vector_irq, cpu)[IRQ0_VECTOR + irq] = irq; | |
422 | ||
423 | __setup_vector_irq(cpu); | |
424 | } | |
425 | ||
426 | int apic_retrigger_irq(struct irq_data *data) | |
427 | { | |
a9786091 | 428 | struct irq_cfg *cfg = irqd_cfg(data); |
74afab7a JL |
429 | unsigned long flags; |
430 | int cpu; | |
431 | ||
432 | raw_spin_lock_irqsave(&vector_lock, flags); | |
433 | cpu = cpumask_first_and(cfg->domain, cpu_online_mask); | |
434 | apic->send_IPI_mask(cpumask_of(cpu), cfg->vector); | |
435 | raw_spin_unlock_irqrestore(&vector_lock, flags); | |
436 | ||
437 | return 1; | |
438 | } | |
439 | ||
440 | void apic_ack_edge(struct irq_data *data) | |
441 | { | |
a9786091 | 442 | irq_complete_move(irqd_cfg(data)); |
74afab7a JL |
443 | irq_move_irq(data); |
444 | ack_APIC_irq(); | |
445 | } | |
446 | ||
447 | /* | |
448 | * Either sets data->affinity to a valid value, and returns | |
449 | * ->cpu_mask_to_apicid of that in dest_id, or returns -1 and | |
450 | * leaves data->affinity untouched. | |
451 | */ | |
452 | int apic_set_affinity(struct irq_data *data, const struct cpumask *mask, | |
453 | unsigned int *dest_id) | |
454 | { | |
a9786091 | 455 | struct irq_cfg *cfg = irqd_cfg(data); |
74afab7a JL |
456 | unsigned int irq = data->irq; |
457 | int err; | |
458 | ||
459 | if (!config_enabled(CONFIG_SMP)) | |
460 | return -EPERM; | |
461 | ||
462 | if (!cpumask_intersects(mask, cpu_online_mask)) | |
463 | return -EINVAL; | |
464 | ||
465 | err = assign_irq_vector(irq, cfg, mask); | |
466 | if (err) | |
467 | return err; | |
468 | ||
469 | err = apic->cpu_mask_to_apicid_and(mask, cfg->domain, dest_id); | |
470 | if (err) { | |
471 | if (assign_irq_vector(irq, cfg, data->affinity)) | |
472 | pr_err("Failed to recover vector for irq %d\n", irq); | |
473 | return err; | |
474 | } | |
475 | ||
476 | cpumask_copy(data->affinity, mask); | |
477 | ||
478 | return 0; | |
479 | } | |
480 | ||
b5dc8e6c JL |
481 | static int vector_set_affinity(struct irq_data *irq_data, |
482 | const struct cpumask *dest, bool force) | |
483 | { | |
484 | struct irq_cfg *cfg = irq_data->chip_data; | |
485 | int err, irq = irq_data->irq; | |
486 | ||
487 | if (!config_enabled(CONFIG_SMP)) | |
488 | return -EPERM; | |
489 | ||
490 | if (!cpumask_intersects(dest, cpu_online_mask)) | |
491 | return -EINVAL; | |
492 | ||
493 | err = assign_irq_vector(irq, cfg, dest); | |
494 | if (err) { | |
495 | struct irq_data *top = irq_get_irq_data(irq); | |
496 | ||
497 | if (assign_irq_vector(irq, cfg, top->affinity)) | |
498 | pr_err("Failed to recover vector for irq %d\n", irq); | |
499 | return err; | |
500 | } | |
501 | ||
502 | return IRQ_SET_MASK_OK; | |
503 | } | |
504 | ||
505 | static struct irq_chip lapic_controller = { | |
506 | .irq_ack = apic_ack_edge, | |
507 | .irq_set_affinity = vector_set_affinity, | |
508 | .irq_retrigger = apic_retrigger_irq, | |
509 | }; | |
510 | ||
74afab7a JL |
511 | #ifdef CONFIG_SMP |
512 | void send_cleanup_vector(struct irq_cfg *cfg) | |
513 | { | |
514 | cpumask_var_t cleanup_mask; | |
515 | ||
516 | if (unlikely(!alloc_cpumask_var(&cleanup_mask, GFP_ATOMIC))) { | |
517 | unsigned int i; | |
518 | ||
519 | for_each_cpu_and(i, cfg->old_domain, cpu_online_mask) | |
520 | apic->send_IPI_mask(cpumask_of(i), | |
521 | IRQ_MOVE_CLEANUP_VECTOR); | |
522 | } else { | |
523 | cpumask_and(cleanup_mask, cfg->old_domain, cpu_online_mask); | |
524 | apic->send_IPI_mask(cleanup_mask, IRQ_MOVE_CLEANUP_VECTOR); | |
525 | free_cpumask_var(cleanup_mask); | |
526 | } | |
527 | cfg->move_in_progress = 0; | |
528 | } | |
529 | ||
530 | asmlinkage __visible void smp_irq_move_cleanup_interrupt(void) | |
531 | { | |
532 | unsigned vector, me; | |
533 | ||
534 | ack_APIC_irq(); | |
535 | irq_enter(); | |
536 | exit_idle(); | |
537 | ||
538 | me = smp_processor_id(); | |
539 | for (vector = FIRST_EXTERNAL_VECTOR; vector < NR_VECTORS; vector++) { | |
540 | int irq; | |
541 | unsigned int irr; | |
542 | struct irq_desc *desc; | |
543 | struct irq_cfg *cfg; | |
544 | ||
545 | irq = __this_cpu_read(vector_irq[vector]); | |
546 | ||
547 | if (irq <= VECTOR_UNDEFINED) | |
548 | continue; | |
549 | ||
550 | desc = irq_to_desc(irq); | |
551 | if (!desc) | |
552 | continue; | |
553 | ||
554 | cfg = irq_cfg(irq); | |
555 | if (!cfg) | |
556 | continue; | |
557 | ||
558 | raw_spin_lock(&desc->lock); | |
559 | ||
560 | /* | |
561 | * Check if the irq migration is in progress. If so, we | |
562 | * haven't received the cleanup request yet for this irq. | |
563 | */ | |
564 | if (cfg->move_in_progress) | |
565 | goto unlock; | |
566 | ||
567 | if (vector == cfg->vector && cpumask_test_cpu(me, cfg->domain)) | |
568 | goto unlock; | |
569 | ||
570 | irr = apic_read(APIC_IRR + (vector / 32 * 0x10)); | |
571 | /* | |
572 | * Check if the vector that needs to be cleanedup is | |
573 | * registered at the cpu's IRR. If so, then this is not | |
574 | * the best time to clean it up. Lets clean it up in the | |
575 | * next attempt by sending another IRQ_MOVE_CLEANUP_VECTOR | |
576 | * to myself. | |
577 | */ | |
578 | if (irr & (1 << (vector % 32))) { | |
579 | apic->send_IPI_self(IRQ_MOVE_CLEANUP_VECTOR); | |
580 | goto unlock; | |
581 | } | |
582 | __this_cpu_write(vector_irq[vector], VECTOR_UNDEFINED); | |
583 | unlock: | |
584 | raw_spin_unlock(&desc->lock); | |
585 | } | |
586 | ||
587 | irq_exit(); | |
588 | } | |
589 | ||
590 | static void __irq_complete_move(struct irq_cfg *cfg, unsigned vector) | |
591 | { | |
592 | unsigned me; | |
593 | ||
594 | if (likely(!cfg->move_in_progress)) | |
595 | return; | |
596 | ||
597 | me = smp_processor_id(); | |
598 | ||
599 | if (vector == cfg->vector && cpumask_test_cpu(me, cfg->domain)) | |
600 | send_cleanup_vector(cfg); | |
601 | } | |
602 | ||
603 | void irq_complete_move(struct irq_cfg *cfg) | |
604 | { | |
605 | __irq_complete_move(cfg, ~get_irq_regs()->orig_ax); | |
606 | } | |
607 | ||
608 | void irq_force_complete_move(int irq) | |
609 | { | |
610 | struct irq_cfg *cfg = irq_cfg(irq); | |
611 | ||
612 | if (!cfg) | |
613 | return; | |
614 | ||
615 | __irq_complete_move(cfg, cfg->vector); | |
616 | } | |
74afab7a JL |
617 | #endif |
618 | ||
619 | /* | |
620 | * Dynamic irq allocate and deallocation. Should be replaced by irq domains! | |
621 | */ | |
622 | int arch_setup_hwirq(unsigned int irq, int node) | |
623 | { | |
624 | struct irq_cfg *cfg; | |
625 | unsigned long flags; | |
626 | int ret; | |
627 | ||
b5dc8e6c | 628 | cfg = alloc_irq_cfg(node); |
74afab7a JL |
629 | if (!cfg) |
630 | return -ENOMEM; | |
631 | ||
632 | raw_spin_lock_irqsave(&vector_lock, flags); | |
633 | ret = __assign_irq_vector(irq, cfg, apic->target_cpus()); | |
634 | raw_spin_unlock_irqrestore(&vector_lock, flags); | |
635 | ||
636 | if (!ret) | |
637 | irq_set_chip_data(irq, cfg); | |
638 | else | |
b5dc8e6c | 639 | free_irq_cfg(cfg); |
74afab7a JL |
640 | return ret; |
641 | } | |
642 | ||
643 | void arch_teardown_hwirq(unsigned int irq) | |
644 | { | |
645 | struct irq_cfg *cfg = irq_cfg(irq); | |
646 | ||
647 | free_remapped_irq(irq); | |
648 | clear_irq_vector(irq, cfg); | |
b5dc8e6c JL |
649 | irq_set_chip_data(irq, NULL); |
650 | free_irq_cfg(cfg); | |
74afab7a JL |
651 | } |
652 | ||
653 | static void __init print_APIC_field(int base) | |
654 | { | |
655 | int i; | |
656 | ||
657 | printk(KERN_DEBUG); | |
658 | ||
659 | for (i = 0; i < 8; i++) | |
660 | pr_cont("%08x", apic_read(base + i*0x10)); | |
661 | ||
662 | pr_cont("\n"); | |
663 | } | |
664 | ||
665 | static void __init print_local_APIC(void *dummy) | |
666 | { | |
667 | unsigned int i, v, ver, maxlvt; | |
668 | u64 icr; | |
669 | ||
849d3569 JL |
670 | pr_debug("printing local APIC contents on CPU#%d/%d:\n", |
671 | smp_processor_id(), hard_smp_processor_id()); | |
74afab7a | 672 | v = apic_read(APIC_ID); |
849d3569 | 673 | pr_info("... APIC ID: %08x (%01x)\n", v, read_apic_id()); |
74afab7a | 674 | v = apic_read(APIC_LVR); |
849d3569 | 675 | pr_info("... APIC VERSION: %08x\n", v); |
74afab7a JL |
676 | ver = GET_APIC_VERSION(v); |
677 | maxlvt = lapic_get_maxlvt(); | |
678 | ||
679 | v = apic_read(APIC_TASKPRI); | |
849d3569 | 680 | pr_debug("... APIC TASKPRI: %08x (%02x)\n", v, v & APIC_TPRI_MASK); |
74afab7a JL |
681 | |
682 | /* !82489DX */ | |
683 | if (APIC_INTEGRATED(ver)) { | |
684 | if (!APIC_XAPIC(ver)) { | |
685 | v = apic_read(APIC_ARBPRI); | |
849d3569 JL |
686 | pr_debug("... APIC ARBPRI: %08x (%02x)\n", |
687 | v, v & APIC_ARBPRI_MASK); | |
74afab7a JL |
688 | } |
689 | v = apic_read(APIC_PROCPRI); | |
849d3569 | 690 | pr_debug("... APIC PROCPRI: %08x\n", v); |
74afab7a JL |
691 | } |
692 | ||
693 | /* | |
694 | * Remote read supported only in the 82489DX and local APIC for | |
695 | * Pentium processors. | |
696 | */ | |
697 | if (!APIC_INTEGRATED(ver) || maxlvt == 3) { | |
698 | v = apic_read(APIC_RRR); | |
849d3569 | 699 | pr_debug("... APIC RRR: %08x\n", v); |
74afab7a JL |
700 | } |
701 | ||
702 | v = apic_read(APIC_LDR); | |
849d3569 | 703 | pr_debug("... APIC LDR: %08x\n", v); |
74afab7a JL |
704 | if (!x2apic_enabled()) { |
705 | v = apic_read(APIC_DFR); | |
849d3569 | 706 | pr_debug("... APIC DFR: %08x\n", v); |
74afab7a JL |
707 | } |
708 | v = apic_read(APIC_SPIV); | |
849d3569 | 709 | pr_debug("... APIC SPIV: %08x\n", v); |
74afab7a | 710 | |
849d3569 | 711 | pr_debug("... APIC ISR field:\n"); |
74afab7a | 712 | print_APIC_field(APIC_ISR); |
849d3569 | 713 | pr_debug("... APIC TMR field:\n"); |
74afab7a | 714 | print_APIC_field(APIC_TMR); |
849d3569 | 715 | pr_debug("... APIC IRR field:\n"); |
74afab7a JL |
716 | print_APIC_field(APIC_IRR); |
717 | ||
718 | /* !82489DX */ | |
719 | if (APIC_INTEGRATED(ver)) { | |
720 | /* Due to the Pentium erratum 3AP. */ | |
721 | if (maxlvt > 3) | |
722 | apic_write(APIC_ESR, 0); | |
723 | ||
724 | v = apic_read(APIC_ESR); | |
849d3569 | 725 | pr_debug("... APIC ESR: %08x\n", v); |
74afab7a JL |
726 | } |
727 | ||
728 | icr = apic_icr_read(); | |
849d3569 JL |
729 | pr_debug("... APIC ICR: %08x\n", (u32)icr); |
730 | pr_debug("... APIC ICR2: %08x\n", (u32)(icr >> 32)); | |
74afab7a JL |
731 | |
732 | v = apic_read(APIC_LVTT); | |
849d3569 | 733 | pr_debug("... APIC LVTT: %08x\n", v); |
74afab7a JL |
734 | |
735 | if (maxlvt > 3) { | |
736 | /* PC is LVT#4. */ | |
737 | v = apic_read(APIC_LVTPC); | |
849d3569 | 738 | pr_debug("... APIC LVTPC: %08x\n", v); |
74afab7a JL |
739 | } |
740 | v = apic_read(APIC_LVT0); | |
849d3569 | 741 | pr_debug("... APIC LVT0: %08x\n", v); |
74afab7a | 742 | v = apic_read(APIC_LVT1); |
849d3569 | 743 | pr_debug("... APIC LVT1: %08x\n", v); |
74afab7a JL |
744 | |
745 | if (maxlvt > 2) { | |
746 | /* ERR is LVT#3. */ | |
747 | v = apic_read(APIC_LVTERR); | |
849d3569 | 748 | pr_debug("... APIC LVTERR: %08x\n", v); |
74afab7a JL |
749 | } |
750 | ||
751 | v = apic_read(APIC_TMICT); | |
849d3569 | 752 | pr_debug("... APIC TMICT: %08x\n", v); |
74afab7a | 753 | v = apic_read(APIC_TMCCT); |
849d3569 | 754 | pr_debug("... APIC TMCCT: %08x\n", v); |
74afab7a | 755 | v = apic_read(APIC_TDCR); |
849d3569 | 756 | pr_debug("... APIC TDCR: %08x\n", v); |
74afab7a JL |
757 | |
758 | if (boot_cpu_has(X86_FEATURE_EXTAPIC)) { | |
759 | v = apic_read(APIC_EFEAT); | |
760 | maxlvt = (v >> 16) & 0xff; | |
849d3569 | 761 | pr_debug("... APIC EFEAT: %08x\n", v); |
74afab7a | 762 | v = apic_read(APIC_ECTRL); |
849d3569 | 763 | pr_debug("... APIC ECTRL: %08x\n", v); |
74afab7a JL |
764 | for (i = 0; i < maxlvt; i++) { |
765 | v = apic_read(APIC_EILVTn(i)); | |
849d3569 | 766 | pr_debug("... APIC EILVT%d: %08x\n", i, v); |
74afab7a JL |
767 | } |
768 | } | |
769 | pr_cont("\n"); | |
770 | } | |
771 | ||
772 | static void __init print_local_APICs(int maxcpu) | |
773 | { | |
774 | int cpu; | |
775 | ||
776 | if (!maxcpu) | |
777 | return; | |
778 | ||
779 | preempt_disable(); | |
780 | for_each_online_cpu(cpu) { | |
781 | if (cpu >= maxcpu) | |
782 | break; | |
783 | smp_call_function_single(cpu, print_local_APIC, NULL, 1); | |
784 | } | |
785 | preempt_enable(); | |
786 | } | |
787 | ||
788 | static void __init print_PIC(void) | |
789 | { | |
790 | unsigned int v; | |
791 | unsigned long flags; | |
792 | ||
793 | if (!nr_legacy_irqs()) | |
794 | return; | |
795 | ||
849d3569 | 796 | pr_debug("\nprinting PIC contents\n"); |
74afab7a JL |
797 | |
798 | raw_spin_lock_irqsave(&i8259A_lock, flags); | |
799 | ||
800 | v = inb(0xa1) << 8 | inb(0x21); | |
849d3569 | 801 | pr_debug("... PIC IMR: %04x\n", v); |
74afab7a JL |
802 | |
803 | v = inb(0xa0) << 8 | inb(0x20); | |
849d3569 | 804 | pr_debug("... PIC IRR: %04x\n", v); |
74afab7a JL |
805 | |
806 | outb(0x0b, 0xa0); | |
807 | outb(0x0b, 0x20); | |
808 | v = inb(0xa0) << 8 | inb(0x20); | |
809 | outb(0x0a, 0xa0); | |
810 | outb(0x0a, 0x20); | |
811 | ||
812 | raw_spin_unlock_irqrestore(&i8259A_lock, flags); | |
813 | ||
849d3569 | 814 | pr_debug("... PIC ISR: %04x\n", v); |
74afab7a JL |
815 | |
816 | v = inb(0x4d1) << 8 | inb(0x4d0); | |
849d3569 | 817 | pr_debug("... PIC ELCR: %04x\n", v); |
74afab7a JL |
818 | } |
819 | ||
820 | static int show_lapic __initdata = 1; | |
821 | static __init int setup_show_lapic(char *arg) | |
822 | { | |
823 | int num = -1; | |
824 | ||
825 | if (strcmp(arg, "all") == 0) { | |
826 | show_lapic = CONFIG_NR_CPUS; | |
827 | } else { | |
828 | get_option(&arg, &num); | |
829 | if (num >= 0) | |
830 | show_lapic = num; | |
831 | } | |
832 | ||
833 | return 1; | |
834 | } | |
835 | __setup("show_lapic=", setup_show_lapic); | |
836 | ||
837 | static int __init print_ICs(void) | |
838 | { | |
839 | if (apic_verbosity == APIC_QUIET) | |
840 | return 0; | |
841 | ||
842 | print_PIC(); | |
843 | ||
844 | /* don't print out if apic is not there */ | |
845 | if (!cpu_has_apic && !apic_from_smp_config()) | |
846 | return 0; | |
847 | ||
848 | print_local_APICs(show_lapic); | |
849 | print_IO_APICs(); | |
850 | ||
851 | return 0; | |
852 | } | |
853 | ||
854 | late_initcall(print_ICs); |