Commit | Line | Data |
---|---|---|
d2912cb1 | 1 | // SPDX-License-Identifier: GPL-2.0-only |
74afab7a | 2 | /* |
fd2fa6c1 | 3 | * Local APIC related interfaces to support IOAPIC, MSI, etc. |
74afab7a JL |
4 | * |
5 | * Copyright (C) 1997, 1998, 1999, 2000, 2009 Ingo Molnar, Hajnalka Szabo | |
6 | * Moved from arch/x86/kernel/apic/io_apic.c. | |
b5dc8e6c JL |
7 | * Jiang Liu <jiang.liu@linux.intel.com> |
8 | * Enable support of hierarchical irqdomains | |
74afab7a JL |
9 | */ |
10 | #include <linux/interrupt.h> | |
447ae316 | 11 | #include <linux/irq.h> |
65d7ed57 | 12 | #include <linux/seq_file.h> |
74afab7a JL |
13 | #include <linux/init.h> |
14 | #include <linux/compiler.h> | |
74afab7a | 15 | #include <linux/slab.h> |
d746d1eb | 16 | #include <asm/irqdomain.h> |
74afab7a | 17 | #include <asm/hw_irq.h> |
ad3bc25a | 18 | #include <asm/traps.h> |
74afab7a JL |
19 | #include <asm/apic.h> |
20 | #include <asm/i8259.h> | |
21 | #include <asm/desc.h> | |
22 | #include <asm/irq_remapping.h> | |
23 | ||
8d1e3dca TG |
24 | #include <asm/trace/irq_vectors.h> |
25 | ||
7f3262ed | 26 | struct apic_chip_data { |
ba224fea TG |
27 | struct irq_cfg hw_irq_cfg; |
28 | unsigned int vector; | |
29 | unsigned int prev_vector; | |
029c6e1c TG |
30 | unsigned int cpu; |
31 | unsigned int prev_cpu; | |
69cde000 | 32 | unsigned int irq; |
dccfe314 | 33 | struct hlist_node clist; |
2db1f959 | 34 | unsigned int move_in_progress : 1, |
4900be83 TG |
35 | is_managed : 1, |
36 | can_reserve : 1, | |
37 | has_reserved : 1; | |
7f3262ed JL |
38 | }; |
39 | ||
b5dc8e6c | 40 | struct irq_domain *x86_vector_domain; |
c8f3e518 | 41 | EXPORT_SYMBOL_GPL(x86_vector_domain); |
74afab7a | 42 | static DEFINE_RAW_SPINLOCK(vector_lock); |
69cde000 | 43 | static cpumask_var_t vector_searchmask; |
b5dc8e6c | 44 | static struct irq_chip lapic_controller; |
0fa115da | 45 | static struct irq_matrix *vector_matrix; |
dccfe314 | 46 | #ifdef CONFIG_SMP |
bdc1dad2 TG |
47 | |
48 | static void vector_cleanup_callback(struct timer_list *tmr); | |
49 | ||
50 | struct vector_cleanup { | |
51 | struct hlist_head head; | |
52 | struct timer_list timer; | |
53 | }; | |
54 | ||
55 | static DEFINE_PER_CPU(struct vector_cleanup, vector_cleanup) = { | |
56 | .head = HLIST_HEAD_INIT, | |
57 | .timer = __TIMER_INITIALIZER(vector_cleanup_callback, TIMER_PINNED), | |
58 | }; | |
dccfe314 | 59 | #endif |
74afab7a JL |
60 | |
61 | void lock_vector_lock(void) | |
62 | { | |
63 | /* Used to the online set of cpus does not change | |
64 | * during assign_irq_vector. | |
65 | */ | |
66 | raw_spin_lock(&vector_lock); | |
67 | } | |
68 | ||
69 | void unlock_vector_lock(void) | |
70 | { | |
71 | raw_spin_unlock(&vector_lock); | |
72 | } | |
73 | ||
99a1482d TG |
74 | void init_irq_alloc_info(struct irq_alloc_info *info, |
75 | const struct cpumask *mask) | |
76 | { | |
77 | memset(info, 0, sizeof(*info)); | |
78 | info->mask = mask; | |
79 | } | |
80 | ||
81 | void copy_irq_alloc_info(struct irq_alloc_info *dst, struct irq_alloc_info *src) | |
82 | { | |
83 | if (src) | |
84 | *dst = *src; | |
85 | else | |
86 | memset(dst, 0, sizeof(*dst)); | |
87 | } | |
88 | ||
86ba6551 | 89 | static struct apic_chip_data *apic_chip_data(struct irq_data *irqd) |
74afab7a | 90 | { |
86ba6551 | 91 | if (!irqd) |
b5dc8e6c JL |
92 | return NULL; |
93 | ||
86ba6551 TG |
94 | while (irqd->parent_data) |
95 | irqd = irqd->parent_data; | |
b5dc8e6c | 96 | |
86ba6551 | 97 | return irqd->chip_data; |
74afab7a JL |
98 | } |
99 | ||
86ba6551 | 100 | struct irq_cfg *irqd_cfg(struct irq_data *irqd) |
7f3262ed | 101 | { |
86ba6551 | 102 | struct apic_chip_data *apicd = apic_chip_data(irqd); |
7f3262ed | 103 | |
ba224fea | 104 | return apicd ? &apicd->hw_irq_cfg : NULL; |
7f3262ed | 105 | } |
c8f3e518 | 106 | EXPORT_SYMBOL_GPL(irqd_cfg); |
7f3262ed JL |
107 | |
108 | struct irq_cfg *irq_cfg(unsigned int irq) | |
74afab7a | 109 | { |
7f3262ed JL |
110 | return irqd_cfg(irq_get_irq_data(irq)); |
111 | } | |
74afab7a | 112 | |
7f3262ed JL |
113 | static struct apic_chip_data *alloc_apic_chip_data(int node) |
114 | { | |
86ba6551 | 115 | struct apic_chip_data *apicd; |
7f3262ed | 116 | |
86ba6551 | 117 | apicd = kzalloc_node(sizeof(*apicd), GFP_KERNEL, node); |
69cde000 TG |
118 | if (apicd) |
119 | INIT_HLIST_NODE(&apicd->clist); | |
86ba6551 | 120 | return apicd; |
74afab7a JL |
121 | } |
122 | ||
86ba6551 | 123 | static void free_apic_chip_data(struct apic_chip_data *apicd) |
74afab7a | 124 | { |
69cde000 | 125 | kfree(apicd); |
74afab7a JL |
126 | } |
127 | ||
ba224fea TG |
128 | static void apic_update_irq_cfg(struct irq_data *irqd, unsigned int vector, |
129 | unsigned int cpu) | |
74afab7a | 130 | { |
69cde000 | 131 | struct apic_chip_data *apicd = apic_chip_data(irqd); |
74afab7a | 132 | |
69cde000 | 133 | lockdep_assert_held(&vector_lock); |
74afab7a | 134 | |
ba224fea TG |
135 | apicd->hw_irq_cfg.vector = vector; |
136 | apicd->hw_irq_cfg.dest_apicid = apic->calc_dest_apicid(cpu); | |
137 | irq_data_update_effective_affinity(irqd, cpumask_of(cpu)); | |
138 | trace_vector_config(irqd->irq, vector, cpu, | |
139 | apicd->hw_irq_cfg.dest_apicid); | |
69cde000 | 140 | } |
74afab7a | 141 | |
69cde000 TG |
142 | static void apic_update_vector(struct irq_data *irqd, unsigned int newvec, |
143 | unsigned int newcpu) | |
144 | { | |
145 | struct apic_chip_data *apicd = apic_chip_data(irqd); | |
146 | struct irq_desc *desc = irq_data_to_desc(irqd); | |
e84cf6aa | 147 | bool managed = irqd_affinity_is_managed(irqd); |
74afab7a | 148 | |
69cde000 | 149 | lockdep_assert_held(&vector_lock); |
74afab7a | 150 | |
ba224fea | 151 | trace_vector_update(irqd->irq, newvec, newcpu, apicd->vector, |
69cde000 | 152 | apicd->cpu); |
74afab7a | 153 | |
e84cf6aa TG |
154 | /* |
155 | * If there is no vector associated or if the associated vector is | |
156 | * the shutdown vector, which is associated to make PCI/MSI | |
157 | * shutdown mode work, then there is nothing to release. Clear out | |
158 | * prev_vector for this and the offlined target case. | |
159 | */ | |
160 | apicd->prev_vector = 0; | |
161 | if (!apicd->vector || apicd->vector == MANAGED_IRQ_SHUTDOWN_VECTOR) | |
162 | goto setnew; | |
163 | /* | |
164 | * If the target CPU of the previous vector is online, then mark | |
165 | * the vector as move in progress and store it for cleanup when the | |
166 | * first interrupt on the new vector arrives. If the target CPU is | |
167 | * offline then the regular release mechanism via the cleanup | |
168 | * vector is not possible and the vector can be immediately freed | |
169 | * in the underlying matrix allocator. | |
170 | */ | |
171 | if (cpu_online(apicd->cpu)) { | |
69cde000 | 172 | apicd->move_in_progress = true; |
ba224fea | 173 | apicd->prev_vector = apicd->vector; |
69cde000 | 174 | apicd->prev_cpu = apicd->cpu; |
e027ffff | 175 | WARN_ON_ONCE(apicd->cpu == newcpu); |
69cde000 | 176 | } else { |
e84cf6aa TG |
177 | irq_matrix_free(vector_matrix, apicd->cpu, apicd->vector, |
178 | managed); | |
69cde000 | 179 | } |
74afab7a | 180 | |
e84cf6aa | 181 | setnew: |
ba224fea | 182 | apicd->vector = newvec; |
69cde000 TG |
183 | apicd->cpu = newcpu; |
184 | BUG_ON(!IS_ERR_OR_NULL(per_cpu(vector_irq, newcpu)[newvec])); | |
185 | per_cpu(vector_irq, newcpu)[newvec] = desc; | |
186 | } | |
74afab7a | 187 | |
2db1f959 TG |
188 | static void vector_assign_managed_shutdown(struct irq_data *irqd) |
189 | { | |
190 | unsigned int cpu = cpumask_first(cpu_online_mask); | |
191 | ||
192 | apic_update_irq_cfg(irqd, MANAGED_IRQ_SHUTDOWN_VECTOR, cpu); | |
193 | } | |
194 | ||
195 | static int reserve_managed_vector(struct irq_data *irqd) | |
196 | { | |
197 | const struct cpumask *affmsk = irq_data_get_affinity_mask(irqd); | |
198 | struct apic_chip_data *apicd = apic_chip_data(irqd); | |
199 | unsigned long flags; | |
200 | int ret; | |
201 | ||
202 | raw_spin_lock_irqsave(&vector_lock, flags); | |
203 | apicd->is_managed = true; | |
204 | ret = irq_matrix_reserve_managed(vector_matrix, affmsk); | |
205 | raw_spin_unlock_irqrestore(&vector_lock, flags); | |
206 | trace_vector_reserve_managed(irqd->irq, ret); | |
207 | return ret; | |
208 | } | |
209 | ||
4900be83 TG |
210 | static void reserve_irq_vector_locked(struct irq_data *irqd) |
211 | { | |
212 | struct apic_chip_data *apicd = apic_chip_data(irqd); | |
213 | ||
214 | irq_matrix_reserve(vector_matrix); | |
215 | apicd->can_reserve = true; | |
216 | apicd->has_reserved = true; | |
945f50a5 | 217 | irqd_set_can_reserve(irqd); |
4900be83 TG |
218 | trace_vector_reserve(irqd->irq, 0); |
219 | vector_assign_managed_shutdown(irqd); | |
220 | } | |
221 | ||
222 | static int reserve_irq_vector(struct irq_data *irqd) | |
223 | { | |
224 | unsigned long flags; | |
225 | ||
226 | raw_spin_lock_irqsave(&vector_lock, flags); | |
227 | reserve_irq_vector_locked(irqd); | |
228 | raw_spin_unlock_irqrestore(&vector_lock, flags); | |
229 | return 0; | |
230 | } | |
231 | ||
27733971 DL |
232 | static int |
233 | assign_vector_locked(struct irq_data *irqd, const struct cpumask *dest) | |
69cde000 TG |
234 | { |
235 | struct apic_chip_data *apicd = apic_chip_data(irqd); | |
4900be83 | 236 | bool resvd = apicd->has_reserved; |
69cde000 | 237 | unsigned int cpu = apicd->cpu; |
ba224fea TG |
238 | int vector = apicd->vector; |
239 | ||
240 | lockdep_assert_held(&vector_lock); | |
74afab7a | 241 | |
3716fd27 | 242 | /* |
69cde000 TG |
243 | * If the current target CPU is online and in the new requested |
244 | * affinity mask, there is no point in moving the interrupt from | |
245 | * one CPU to another. | |
3716fd27 | 246 | */ |
69cde000 TG |
247 | if (vector && cpu_online(cpu) && cpumask_test_cpu(cpu, dest)) |
248 | return 0; | |
249 | ||
80ae7b1a TG |
250 | /* |
251 | * Careful here. @apicd might either have move_in_progress set or | |
252 | * be enqueued for cleanup. Assigning a new vector would either | |
253 | * leave a stale vector on some CPU around or in case of a pending | |
254 | * cleanup corrupt the hlist. | |
255 | */ | |
256 | if (apicd->move_in_progress || !hlist_unhashed(&apicd->clist)) | |
257 | return -EBUSY; | |
258 | ||
4900be83 | 259 | vector = irq_matrix_alloc(vector_matrix, dest, resvd, &cpu); |
4900be83 | 260 | trace_vector_alloc(irqd->irq, vector, resvd, vector); |
69cde000 TG |
261 | if (vector < 0) |
262 | return vector; | |
27733971 DL |
263 | apic_update_vector(irqd, vector, cpu); |
264 | apic_update_irq_cfg(irqd, vector, cpu); | |
69cde000 | 265 | |
3716fd27 | 266 | return 0; |
74afab7a JL |
267 | } |
268 | ||
69cde000 | 269 | static int assign_irq_vector(struct irq_data *irqd, const struct cpumask *dest) |
74afab7a | 270 | { |
74afab7a | 271 | unsigned long flags; |
69cde000 | 272 | int ret; |
74afab7a JL |
273 | |
274 | raw_spin_lock_irqsave(&vector_lock, flags); | |
69cde000 TG |
275 | cpumask_and(vector_searchmask, dest, cpu_online_mask); |
276 | ret = assign_vector_locked(irqd, vector_searchmask); | |
74afab7a | 277 | raw_spin_unlock_irqrestore(&vector_lock, flags); |
69cde000 | 278 | return ret; |
74afab7a JL |
279 | } |
280 | ||
2db1f959 TG |
281 | static int assign_irq_vector_any_locked(struct irq_data *irqd) |
282 | { | |
d6ffc6ac TG |
283 | /* Get the affinity mask - either irq_default_affinity or (user) set */ |
284 | const struct cpumask *affmsk = irq_data_get_affinity_mask(irqd); | |
2db1f959 TG |
285 | int node = irq_data_get_node(irqd); |
286 | ||
190113b4 TG |
287 | if (node != NUMA_NO_NODE) { |
288 | /* Try the intersection of @affmsk and node mask */ | |
289 | cpumask_and(vector_searchmask, cpumask_of_node(node), affmsk); | |
290 | if (!assign_vector_locked(irqd, vector_searchmask)) | |
291 | return 0; | |
292 | } | |
293 | ||
d6ffc6ac TG |
294 | /* Try the full affinity mask */ |
295 | cpumask_and(vector_searchmask, affmsk, cpu_online_mask); | |
296 | if (!assign_vector_locked(irqd, vector_searchmask)) | |
297 | return 0; | |
190113b4 TG |
298 | |
299 | if (node != NUMA_NO_NODE) { | |
300 | /* Try the node mask */ | |
301 | if (!assign_vector_locked(irqd, cpumask_of_node(node))) | |
302 | return 0; | |
303 | } | |
304 | ||
d6ffc6ac | 305 | /* Try the full online mask */ |
2db1f959 TG |
306 | return assign_vector_locked(irqd, cpu_online_mask); |
307 | } | |
308 | ||
2db1f959 TG |
309 | static int |
310 | assign_irq_vector_policy(struct irq_data *irqd, struct irq_alloc_info *info) | |
486ca539 | 311 | { |
2db1f959 TG |
312 | if (irqd_affinity_is_managed(irqd)) |
313 | return reserve_managed_vector(irqd); | |
258d86ee | 314 | if (info->mask) |
69cde000 | 315 | return assign_irq_vector(irqd, info->mask); |
464d1230 TG |
316 | /* |
317 | * Make only a global reservation with no guarantee. A real vector | |
318 | * is associated at activation time. | |
319 | */ | |
4900be83 | 320 | return reserve_irq_vector(irqd); |
2db1f959 TG |
321 | } |
322 | ||
323 | static int | |
324 | assign_managed_vector(struct irq_data *irqd, const struct cpumask *dest) | |
325 | { | |
326 | const struct cpumask *affmsk = irq_data_get_affinity_mask(irqd); | |
327 | struct apic_chip_data *apicd = apic_chip_data(irqd); | |
328 | int vector, cpu; | |
329 | ||
76f99ae5 DL |
330 | cpumask_and(vector_searchmask, dest, affmsk); |
331 | ||
2db1f959 TG |
332 | /* set_affinity might call here for nothing */ |
333 | if (apicd->vector && cpumask_test_cpu(apicd->cpu, vector_searchmask)) | |
486ca539 | 334 | return 0; |
76f99ae5 DL |
335 | vector = irq_matrix_alloc_managed(vector_matrix, vector_searchmask, |
336 | &cpu); | |
2db1f959 TG |
337 | trace_vector_alloc_managed(irqd->irq, vector, vector); |
338 | if (vector < 0) | |
339 | return vector; | |
340 | apic_update_vector(irqd, vector, cpu); | |
341 | apic_update_irq_cfg(irqd, vector, cpu); | |
342 | return 0; | |
486ca539 JL |
343 | } |
344 | ||
69cde000 | 345 | static void clear_irq_vector(struct irq_data *irqd) |
74afab7a | 346 | { |
69cde000 | 347 | struct apic_chip_data *apicd = apic_chip_data(irqd); |
2db1f959 | 348 | bool managed = irqd_affinity_is_managed(irqd); |
ba224fea | 349 | unsigned int vector = apicd->vector; |
74afab7a | 350 | |
69cde000 | 351 | lockdep_assert_held(&vector_lock); |
ba224fea | 352 | |
dccfe314 | 353 | if (!vector) |
1bdb8970 | 354 | return; |
74afab7a | 355 | |
ba224fea | 356 | trace_vector_clear(irqd->irq, vector, apicd->cpu, apicd->prev_vector, |
69cde000 TG |
357 | apicd->prev_cpu); |
358 | ||
b7107a67 | 359 | per_cpu(vector_irq, apicd->cpu)[vector] = VECTOR_SHUTDOWN; |
2db1f959 | 360 | irq_matrix_free(vector_matrix, apicd->cpu, vector, managed); |
ba224fea | 361 | apicd->vector = 0; |
74afab7a | 362 | |
dccfe314 | 363 | /* Clean up move in progress */ |
ba224fea | 364 | vector = apicd->prev_vector; |
dccfe314 | 365 | if (!vector) |
74afab7a | 366 | return; |
74afab7a | 367 | |
b7107a67 | 368 | per_cpu(vector_irq, apicd->prev_cpu)[vector] = VECTOR_SHUTDOWN; |
2db1f959 | 369 | irq_matrix_free(vector_matrix, apicd->prev_cpu, vector, managed); |
ba224fea | 370 | apicd->prev_vector = 0; |
86ba6551 | 371 | apicd->move_in_progress = 0; |
dccfe314 | 372 | hlist_del_init(&apicd->clist); |
74afab7a JL |
373 | } |
374 | ||
2db1f959 TG |
375 | static void x86_vector_deactivate(struct irq_domain *dom, struct irq_data *irqd) |
376 | { | |
377 | struct apic_chip_data *apicd = apic_chip_data(irqd); | |
378 | unsigned long flags; | |
379 | ||
380 | trace_vector_deactivate(irqd->irq, apicd->is_managed, | |
4900be83 | 381 | apicd->can_reserve, false); |
2db1f959 | 382 | |
4900be83 TG |
383 | /* Regular fixed assigned interrupt */ |
384 | if (!apicd->is_managed && !apicd->can_reserve) | |
385 | return; | |
386 | /* If the interrupt has a global reservation, nothing to do */ | |
387 | if (apicd->has_reserved) | |
2db1f959 TG |
388 | return; |
389 | ||
390 | raw_spin_lock_irqsave(&vector_lock, flags); | |
391 | clear_irq_vector(irqd); | |
4900be83 TG |
392 | if (apicd->can_reserve) |
393 | reserve_irq_vector_locked(irqd); | |
394 | else | |
395 | vector_assign_managed_shutdown(irqd); | |
2db1f959 TG |
396 | raw_spin_unlock_irqrestore(&vector_lock, flags); |
397 | } | |
398 | ||
4900be83 TG |
399 | static int activate_reserved(struct irq_data *irqd) |
400 | { | |
401 | struct apic_chip_data *apicd = apic_chip_data(irqd); | |
402 | int ret; | |
403 | ||
404 | ret = assign_irq_vector_any_locked(irqd); | |
bc976233 | 405 | if (!ret) { |
4900be83 | 406 | apicd->has_reserved = false; |
bc976233 TG |
407 | /* |
408 | * Core might have disabled reservation mode after | |
409 | * allocating the irq descriptor. Ideally this should | |
410 | * happen before allocation time, but that would require | |
411 | * completely convoluted ways of transporting that | |
412 | * information. | |
413 | */ | |
414 | if (!irqd_can_reserve(irqd)) | |
415 | apicd->can_reserve = false; | |
416 | } | |
743dac49 NH |
417 | |
418 | /* | |
419 | * Check to ensure that the effective affinity mask is a subset | |
420 | * the user supplied affinity mask, and warn the user if it is not | |
421 | */ | |
422 | if (!cpumask_subset(irq_data_get_effective_affinity_mask(irqd), | |
423 | irq_data_get_affinity_mask(irqd))) { | |
424 | pr_warn("irq %u: Affinity broken due to vector space exhaustion.\n", | |
425 | irqd->irq); | |
426 | } | |
427 | ||
4900be83 TG |
428 | return ret; |
429 | } | |
430 | ||
2db1f959 TG |
431 | static int activate_managed(struct irq_data *irqd) |
432 | { | |
433 | const struct cpumask *dest = irq_data_get_affinity_mask(irqd); | |
434 | int ret; | |
435 | ||
436 | cpumask_and(vector_searchmask, dest, cpu_online_mask); | |
437 | if (WARN_ON_ONCE(cpumask_empty(vector_searchmask))) { | |
438 | /* Something in the core code broke! Survive gracefully */ | |
439 | pr_err("Managed startup for irq %u, but no CPU\n", irqd->irq); | |
47b7360c | 440 | return -EINVAL; |
2db1f959 TG |
441 | } |
442 | ||
443 | ret = assign_managed_vector(irqd, vector_searchmask); | |
444 | /* | |
445 | * This should not happen. The vector reservation got buggered. Handle | |
446 | * it gracefully. | |
447 | */ | |
448 | if (WARN_ON_ONCE(ret < 0)) { | |
449 | pr_err("Managed startup irq %u, no vector available\n", | |
450 | irqd->irq); | |
451 | } | |
843c4089 | 452 | return ret; |
2db1f959 TG |
453 | } |
454 | ||
455 | static int x86_vector_activate(struct irq_domain *dom, struct irq_data *irqd, | |
702cb0a0 | 456 | bool reserve) |
2db1f959 TG |
457 | { |
458 | struct apic_chip_data *apicd = apic_chip_data(irqd); | |
459 | unsigned long flags; | |
460 | int ret = 0; | |
461 | ||
462 | trace_vector_activate(irqd->irq, apicd->is_managed, | |
702cb0a0 | 463 | apicd->can_reserve, reserve); |
2db1f959 | 464 | |
2db1f959 | 465 | raw_spin_lock_irqsave(&vector_lock, flags); |
baedb87d TG |
466 | if (!apicd->can_reserve && !apicd->is_managed) |
467 | assign_irq_vector_any_locked(irqd); | |
468 | else if (reserve || irqd_is_managed_and_shutdown(irqd)) | |
2db1f959 | 469 | vector_assign_managed_shutdown(irqd); |
4900be83 | 470 | else if (apicd->is_managed) |
2db1f959 | 471 | ret = activate_managed(irqd); |
4900be83 TG |
472 | else if (apicd->has_reserved) |
473 | ret = activate_reserved(irqd); | |
2db1f959 TG |
474 | raw_spin_unlock_irqrestore(&vector_lock, flags); |
475 | return ret; | |
476 | } | |
477 | ||
478 | static void vector_free_reserved_and_managed(struct irq_data *irqd) | |
479 | { | |
480 | const struct cpumask *dest = irq_data_get_affinity_mask(irqd); | |
481 | struct apic_chip_data *apicd = apic_chip_data(irqd); | |
482 | ||
4900be83 TG |
483 | trace_vector_teardown(irqd->irq, apicd->is_managed, |
484 | apicd->has_reserved); | |
2db1f959 | 485 | |
4900be83 TG |
486 | if (apicd->has_reserved) |
487 | irq_matrix_remove_reserved(vector_matrix); | |
2db1f959 TG |
488 | if (apicd->is_managed) |
489 | irq_matrix_remove_managed(vector_matrix, dest); | |
490 | } | |
491 | ||
b5dc8e6c JL |
492 | static void x86_vector_free_irqs(struct irq_domain *domain, |
493 | unsigned int virq, unsigned int nr_irqs) | |
494 | { | |
86ba6551 TG |
495 | struct apic_chip_data *apicd; |
496 | struct irq_data *irqd; | |
111abeba | 497 | unsigned long flags; |
b5dc8e6c JL |
498 | int i; |
499 | ||
500 | for (i = 0; i < nr_irqs; i++) { | |
86ba6551 TG |
501 | irqd = irq_domain_get_irq_data(x86_vector_domain, virq + i); |
502 | if (irqd && irqd->chip_data) { | |
111abeba | 503 | raw_spin_lock_irqsave(&vector_lock, flags); |
69cde000 | 504 | clear_irq_vector(irqd); |
2db1f959 | 505 | vector_free_reserved_and_managed(irqd); |
86ba6551 TG |
506 | apicd = irqd->chip_data; |
507 | irq_domain_reset_irq_data(irqd); | |
111abeba | 508 | raw_spin_unlock_irqrestore(&vector_lock, flags); |
86ba6551 | 509 | free_apic_chip_data(apicd); |
b5dc8e6c JL |
510 | } |
511 | } | |
512 | } | |
513 | ||
464d1230 TG |
514 | static bool vector_configure_legacy(unsigned int virq, struct irq_data *irqd, |
515 | struct apic_chip_data *apicd) | |
516 | { | |
517 | unsigned long flags; | |
518 | bool realloc = false; | |
519 | ||
520 | apicd->vector = ISA_IRQ_VECTOR(virq); | |
521 | apicd->cpu = 0; | |
522 | ||
523 | raw_spin_lock_irqsave(&vector_lock, flags); | |
524 | /* | |
525 | * If the interrupt is activated, then it must stay at this vector | |
526 | * position. That's usually the timer interrupt (0). | |
527 | */ | |
528 | if (irqd_is_activated(irqd)) { | |
529 | trace_vector_setup(virq, true, 0); | |
530 | apic_update_irq_cfg(irqd, apicd->vector, apicd->cpu); | |
531 | } else { | |
532 | /* Release the vector */ | |
533 | apicd->can_reserve = true; | |
945f50a5 | 534 | irqd_set_can_reserve(irqd); |
464d1230 TG |
535 | clear_irq_vector(irqd); |
536 | realloc = true; | |
537 | } | |
538 | raw_spin_unlock_irqrestore(&vector_lock, flags); | |
539 | return realloc; | |
540 | } | |
541 | ||
b5dc8e6c JL |
542 | static int x86_vector_alloc_irqs(struct irq_domain *domain, unsigned int virq, |
543 | unsigned int nr_irqs, void *arg) | |
544 | { | |
545 | struct irq_alloc_info *info = arg; | |
86ba6551 TG |
546 | struct apic_chip_data *apicd; |
547 | struct irq_data *irqd; | |
5f2dbbc5 | 548 | int i, err, node; |
b5dc8e6c | 549 | |
49062454 | 550 | if (apic_is_disabled) |
b5dc8e6c JL |
551 | return -ENXIO; |
552 | ||
9a98bc2c TG |
553 | /* |
554 | * Catch any attempt to touch the cascade interrupt on a PIC | |
555 | * equipped system. | |
556 | */ | |
557 | if (WARN_ON_ONCE(info->flags & X86_IRQ_ALLOC_LEGACY && | |
558 | virq == PIC_CASCADE_IR)) | |
559 | return -EINVAL; | |
560 | ||
b5dc8e6c | 561 | for (i = 0; i < nr_irqs; i++) { |
86ba6551 TG |
562 | irqd = irq_domain_get_irq_data(domain, virq + i); |
563 | BUG_ON(!irqd); | |
564 | node = irq_data_get_node(irqd); | |
4ef76eb6 TG |
565 | WARN_ON_ONCE(irqd->chip_data); |
566 | apicd = alloc_apic_chip_data(node); | |
86ba6551 | 567 | if (!apicd) { |
b5dc8e6c JL |
568 | err = -ENOMEM; |
569 | goto error; | |
570 | } | |
571 | ||
69cde000 | 572 | apicd->irq = virq + i; |
86ba6551 TG |
573 | irqd->chip = &lapic_controller; |
574 | irqd->chip_data = apicd; | |
575 | irqd->hwirq = virq + i; | |
576 | irqd_set_single_target(irqd); | |
008f1d60 TG |
577 | /* |
578 | * Prevent that any of these interrupts is invoked in | |
579 | * non interrupt context via e.g. generic_handle_irq() | |
580 | * as that can corrupt the affinity move state. | |
581 | */ | |
582 | irqd_set_handle_enforce_irqctx(irqd); | |
f0c7baca TG |
583 | |
584 | /* Don't invoke affinity setter on deactivated interrupts */ | |
585 | irqd_set_affinity_on_activate(irqd); | |
586 | ||
4ef76eb6 | 587 | /* |
69cde000 TG |
588 | * Legacy vectors are already assigned when the IOAPIC |
589 | * takes them over. They stay on the same vector. This is | |
590 | * required for check_timer() to work correctly as it might | |
591 | * switch back to legacy mode. Only update the hardware | |
592 | * config. | |
4ef76eb6 TG |
593 | */ |
594 | if (info->flags & X86_IRQ_ALLOC_LEGACY) { | |
464d1230 TG |
595 | if (!vector_configure_legacy(virq + i, irqd, apicd)) |
596 | continue; | |
4ef76eb6 TG |
597 | } |
598 | ||
2db1f959 | 599 | err = assign_irq_vector_policy(irqd, info); |
69cde000 | 600 | trace_vector_setup(virq + i, false, err); |
45d55e7b TG |
601 | if (err) { |
602 | irqd->chip_data = NULL; | |
603 | free_apic_chip_data(apicd); | |
b5dc8e6c | 604 | goto error; |
45d55e7b | 605 | } |
b5dc8e6c JL |
606 | } |
607 | ||
608 | return 0; | |
609 | ||
610 | error: | |
45d55e7b | 611 | x86_vector_free_irqs(domain, virq, i); |
b5dc8e6c JL |
612 | return err; |
613 | } | |
614 | ||
65d7ed57 | 615 | #ifdef CONFIG_GENERIC_IRQ_DEBUGFS |
d553d03f CIK |
616 | static void x86_vector_debug_show(struct seq_file *m, struct irq_domain *d, |
617 | struct irq_data *irqd, int ind) | |
65d7ed57 | 618 | { |
a07771ac | 619 | struct apic_chip_data apicd; |
65d7ed57 TG |
620 | unsigned long flags; |
621 | int irq; | |
622 | ||
623 | if (!irqd) { | |
624 | irq_matrix_debug_show(m, vector_matrix, ind); | |
625 | return; | |
626 | } | |
627 | ||
628 | irq = irqd->irq; | |
629 | if (irq < nr_legacy_irqs() && !test_bit(irq, &io_apic_irqs)) { | |
630 | seq_printf(m, "%*sVector: %5d\n", ind, "", ISA_IRQ_VECTOR(irq)); | |
631 | seq_printf(m, "%*sTarget: Legacy PIC all CPUs\n", ind, ""); | |
632 | return; | |
633 | } | |
634 | ||
a07771ac | 635 | if (!irqd->chip_data) { |
65d7ed57 TG |
636 | seq_printf(m, "%*sVector: Not assigned\n", ind, ""); |
637 | return; | |
638 | } | |
639 | ||
640 | raw_spin_lock_irqsave(&vector_lock, flags); | |
a07771ac | 641 | memcpy(&apicd, irqd->chip_data, sizeof(apicd)); |
65d7ed57 | 642 | raw_spin_unlock_irqrestore(&vector_lock, flags); |
a07771ac TG |
643 | |
644 | seq_printf(m, "%*sVector: %5u\n", ind, "", apicd.vector); | |
645 | seq_printf(m, "%*sTarget: %5u\n", ind, "", apicd.cpu); | |
646 | if (apicd.prev_vector) { | |
647 | seq_printf(m, "%*sPrevious vector: %5u\n", ind, "", apicd.prev_vector); | |
648 | seq_printf(m, "%*sPrevious target: %5u\n", ind, "", apicd.prev_cpu); | |
65d7ed57 | 649 | } |
a07771ac TG |
650 | seq_printf(m, "%*smove_in_progress: %u\n", ind, "", apicd.move_in_progress ? 1 : 0); |
651 | seq_printf(m, "%*sis_managed: %u\n", ind, "", apicd.is_managed ? 1 : 0); | |
652 | seq_printf(m, "%*scan_reserve: %u\n", ind, "", apicd.can_reserve ? 1 : 0); | |
653 | seq_printf(m, "%*shas_reserved: %u\n", ind, "", apicd.has_reserved ? 1 : 0); | |
654 | seq_printf(m, "%*scleanup_pending: %u\n", ind, "", !hlist_unhashed(&apicd.clist)); | |
65d7ed57 TG |
655 | } |
656 | #endif | |
657 | ||
6452ea2a DW |
658 | int x86_fwspec_is_ioapic(struct irq_fwspec *fwspec) |
659 | { | |
660 | if (fwspec->param_count != 1) | |
661 | return 0; | |
662 | ||
663 | if (is_fwnode_irqchip(fwspec->fwnode)) { | |
664 | const char *fwname = fwnode_get_name(fwspec->fwnode); | |
665 | return fwname && !strncmp(fwname, "IO-APIC-", 8) && | |
666 | simple_strtol(fwname+8, NULL, 10) == fwspec->param[0]; | |
667 | } | |
668 | return to_of_node(fwspec->fwnode) && | |
669 | of_device_is_compatible(to_of_node(fwspec->fwnode), | |
670 | "intel,ce4100-ioapic"); | |
671 | } | |
672 | ||
673 | int x86_fwspec_is_hpet(struct irq_fwspec *fwspec) | |
674 | { | |
675 | if (fwspec->param_count != 1) | |
676 | return 0; | |
677 | ||
678 | if (is_fwnode_irqchip(fwspec->fwnode)) { | |
679 | const char *fwname = fwnode_get_name(fwspec->fwnode); | |
680 | return fwname && !strncmp(fwname, "HPET-MSI-", 9) && | |
681 | simple_strtol(fwname+9, NULL, 10) == fwspec->param[0]; | |
682 | } | |
683 | return 0; | |
684 | } | |
685 | ||
686 | static int x86_vector_select(struct irq_domain *d, struct irq_fwspec *fwspec, | |
687 | enum irq_domain_bus_token bus_token) | |
688 | { | |
689 | /* | |
690 | * HPET and I/OAPIC cannot be parented in the vector domain | |
691 | * if IRQ remapping is enabled. APIC IDs above 15 bits are | |
692 | * only permitted if IRQ remapping is enabled, so check that. | |
693 | */ | |
9132d720 | 694 | if (apic_id_valid(32768)) |
6452ea2a DW |
695 | return 0; |
696 | ||
697 | return x86_fwspec_is_ioapic(fwspec) || x86_fwspec_is_hpet(fwspec); | |
698 | } | |
699 | ||
eb18cf55 | 700 | static const struct irq_domain_ops x86_vector_domain_ops = { |
6452ea2a | 701 | .select = x86_vector_select, |
65d7ed57 TG |
702 | .alloc = x86_vector_alloc_irqs, |
703 | .free = x86_vector_free_irqs, | |
2db1f959 TG |
704 | .activate = x86_vector_activate, |
705 | .deactivate = x86_vector_deactivate, | |
65d7ed57 TG |
706 | #ifdef CONFIG_GENERIC_IRQ_DEBUGFS |
707 | .debug_show = x86_vector_debug_show, | |
708 | #endif | |
b5dc8e6c JL |
709 | }; |
710 | ||
11d686e9 JL |
711 | int __init arch_probe_nr_irqs(void) |
712 | { | |
713 | int nr; | |
714 | ||
f642974c BVA |
715 | if (irq_get_nr_irqs() > NR_VECTORS * nr_cpu_ids) |
716 | irq_set_nr_irqs(NR_VECTORS * nr_cpu_ids); | |
11d686e9 JL |
717 | |
718 | nr = (gsi_top + nr_legacy_irqs()) + 8 * nr_cpu_ids; | |
fd2fa6c1 | 719 | #if defined(CONFIG_PCI_MSI) |
11d686e9 JL |
720 | /* |
721 | * for MSI and HT dyn irq | |
722 | */ | |
723 | if (gsi_top <= NR_IRQS_LEGACY) | |
724 | nr += 8 * nr_cpu_ids; | |
725 | else | |
726 | nr += gsi_top * 16; | |
727 | #endif | |
f642974c BVA |
728 | if (nr < irq_get_nr_irqs()) |
729 | irq_set_nr_irqs(nr); | |
11d686e9 | 730 | |
8c058b0b VK |
731 | /* |
732 | * We don't know if PIC is present at this point so we need to do | |
733 | * probe() to get the right number of legacy IRQs. | |
734 | */ | |
735 | return legacy_pic->probe(); | |
11d686e9 JL |
736 | } |
737 | ||
0fa115da TG |
738 | void lapic_assign_legacy_vector(unsigned int irq, bool replace) |
739 | { | |
740 | /* | |
54aa699e BH |
741 | * Use assign system here so it won't get accounted as allocated |
742 | * and movable in the cpu hotplug check and it prevents managed | |
0fa115da TG |
743 | * irq reservation from touching it. |
744 | */ | |
745 | irq_matrix_assign_system(vector_matrix, ISA_IRQ_VECTOR(irq), replace); | |
746 | } | |
747 | ||
7d65f9e8 TG |
748 | void __init lapic_update_legacy_vectors(void) |
749 | { | |
750 | unsigned int i; | |
751 | ||
752 | if (IS_ENABLED(CONFIG_X86_IO_APIC) && nr_ioapics > 0) | |
753 | return; | |
754 | ||
755 | /* | |
756 | * If the IO/APIC is disabled via config, kernel command line or | |
757 | * lack of enumeration then all legacy interrupts are routed | |
758 | * through the PIC. Make sure that they are marked as legacy | |
759 | * vectors. PIC_CASCADE_IRQ has already been marked in | |
760 | * lapic_assign_system_vectors(). | |
761 | */ | |
762 | for (i = 0; i < nr_legacy_irqs(); i++) { | |
763 | if (i != PIC_CASCADE_IR) | |
764 | lapic_assign_legacy_vector(i, true); | |
765 | } | |
766 | } | |
767 | ||
0fa115da TG |
768 | void __init lapic_assign_system_vectors(void) |
769 | { | |
749443de | 770 | unsigned int i, vector; |
0fa115da | 771 | |
749443de | 772 | for_each_set_bit(vector, system_vectors, NR_VECTORS) |
0fa115da TG |
773 | irq_matrix_assign_system(vector_matrix, vector, false); |
774 | ||
775 | if (nr_legacy_irqs() > 1) | |
776 | lapic_assign_legacy_vector(PIC_CASCADE_IR, false); | |
777 | ||
778 | /* System vectors are reserved, online it */ | |
779 | irq_matrix_online(vector_matrix); | |
780 | ||
781 | /* Mark the preallocated legacy interrupts */ | |
782 | for (i = 0; i < nr_legacy_irqs(); i++) { | |
9a98bc2c TG |
783 | /* |
784 | * Don't touch the cascade interrupt. It's unusable | |
785 | * on PIC equipped machines. See the large comment | |
786 | * in the IO/APIC code. | |
787 | */ | |
0fa115da TG |
788 | if (i != PIC_CASCADE_IR) |
789 | irq_matrix_assign(vector_matrix, ISA_IRQ_VECTOR(i)); | |
790 | } | |
791 | } | |
792 | ||
11d686e9 JL |
793 | int __init arch_early_irq_init(void) |
794 | { | |
9d35f859 TG |
795 | struct fwnode_handle *fn; |
796 | ||
9d35f859 TG |
797 | fn = irq_domain_alloc_named_fwnode("VECTOR"); |
798 | BUG_ON(!fn); | |
799 | x86_vector_domain = irq_domain_create_tree(fn, &x86_vector_domain_ops, | |
800 | NULL); | |
b5dc8e6c | 801 | BUG_ON(x86_vector_domain == NULL); |
825dfab2 | 802 | irq_set_default_domain(x86_vector_domain); |
b5dc8e6c | 803 | |
3716fd27 | 804 | BUG_ON(!alloc_cpumask_var(&vector_searchmask, GFP_KERNEL)); |
f7fa7aee | 805 | |
0fa115da TG |
806 | /* |
807 | * Allocate the vector matrix allocator data structure and limit the | |
808 | * search area. | |
809 | */ | |
810 | vector_matrix = irq_alloc_matrix(NR_VECTORS, FIRST_EXTERNAL_VECTOR, | |
811 | FIRST_SYSTEM_VECTOR); | |
812 | BUG_ON(!vector_matrix); | |
813 | ||
11d686e9 JL |
814 | return arch_early_ioapic_init(); |
815 | } | |
816 | ||
ba801640 | 817 | #ifdef CONFIG_SMP |
74afab7a | 818 | |
f0cc6cca TG |
819 | static struct irq_desc *__setup_vector_irq(int vector) |
820 | { | |
821 | int isairq = vector - ISA_IRQ_VECTOR(0); | |
822 | ||
823 | /* Check whether the irq is in the legacy space */ | |
824 | if (isairq < 0 || isairq >= nr_legacy_irqs()) | |
825 | return VECTOR_UNUSED; | |
826 | /* Check whether the irq is handled by the IOAPIC */ | |
827 | if (test_bit(isairq, &io_apic_irqs)) | |
828 | return VECTOR_UNUSED; | |
829 | return irq_to_desc(isairq); | |
830 | } | |
831 | ||
0fa115da TG |
832 | /* Online the local APIC infrastructure and initialize the vectors */ |
833 | void lapic_online(void) | |
74afab7a | 834 | { |
f0cc6cca | 835 | unsigned int vector; |
74afab7a | 836 | |
5a3f75e3 | 837 | lockdep_assert_held(&vector_lock); |
0fa115da TG |
838 | |
839 | /* Online the vector matrix array for this CPU */ | |
840 | irq_matrix_online(vector_matrix); | |
841 | ||
74afab7a | 842 | /* |
f0cc6cca TG |
843 | * The interrupt affinity logic never targets interrupts to offline |
844 | * CPUs. The exception are the legacy PIC interrupts. In general | |
845 | * they are only targeted to CPU0, but depending on the platform | |
846 | * they can be distributed to any online CPU in hardware. The | |
847 | * kernel has no influence on that. So all active legacy vectors | |
848 | * must be installed on all CPUs. All non legacy interrupts can be | |
849 | * cleared. | |
74afab7a | 850 | */ |
f0cc6cca TG |
851 | for (vector = 0; vector < NR_VECTORS; vector++) |
852 | this_cpu_write(vector_irq[vector], __setup_vector_irq(vector)); | |
74afab7a JL |
853 | } |
854 | ||
bdc1dad2 TG |
855 | static void __vector_cleanup(struct vector_cleanup *cl, bool check_irr); |
856 | ||
0fa115da TG |
857 | void lapic_offline(void) |
858 | { | |
bdc1dad2 TG |
859 | struct vector_cleanup *cl = this_cpu_ptr(&vector_cleanup); |
860 | ||
0fa115da | 861 | lock_vector_lock(); |
bdc1dad2 TG |
862 | |
863 | /* In case the vector cleanup timer has not expired */ | |
864 | __vector_cleanup(cl, false); | |
865 | ||
0fa115da | 866 | irq_matrix_offline(vector_matrix); |
367ed4e3 | 867 | WARN_ON_ONCE(timer_delete_sync_try(&cl->timer) < 0); |
bdc1dad2 TG |
868 | WARN_ON_ONCE(!hlist_empty(&cl->head)); |
869 | ||
0fa115da TG |
870 | unlock_vector_lock(); |
871 | } | |
872 | ||
ba801640 TG |
873 | static int apic_set_affinity(struct irq_data *irqd, |
874 | const struct cpumask *dest, bool force) | |
875 | { | |
876 | int err; | |
877 | ||
baedb87d TG |
878 | if (WARN_ON_ONCE(!irqd_is_activated(irqd))) |
879 | return -EIO; | |
02edee15 | 880 | |
2db1f959 TG |
881 | raw_spin_lock(&vector_lock); |
882 | cpumask_and(vector_searchmask, dest, cpu_online_mask); | |
883 | if (irqd_affinity_is_managed(irqd)) | |
884 | err = assign_managed_vector(irqd, vector_searchmask); | |
885 | else | |
886 | err = assign_vector_locked(irqd, vector_searchmask); | |
887 | raw_spin_unlock(&vector_lock); | |
ba801640 TG |
888 | return err ? err : IRQ_SET_MASK_OK; |
889 | } | |
890 | ||
751dc837 TG |
891 | static void free_moved_vector(struct apic_chip_data *apicd) |
892 | { | |
893 | unsigned int vector = apicd->prev_vector; | |
894 | unsigned int cpu = apicd->prev_cpu; | |
895 | bool managed = apicd->is_managed; | |
896 | ||
897 | /* | |
898 | * Managed interrupts are usually not migrated away | |
899 | * from an online CPU, but CPU isolation 'managed_irq' | |
900 | * can make that happen. | |
901 | * 1) Activation does not take the isolation into account | |
902 | * to keep the code simple | |
903 | * 2) Migration away from an isolated CPU can happen when | |
904 | * a non-isolated CPU which is in the calculated | |
905 | * affinity mask comes online. | |
906 | */ | |
907 | trace_vector_free_moved(apicd->irq, cpu, vector, managed); | |
908 | irq_matrix_free(vector_matrix, cpu, vector, managed); | |
909 | per_cpu(vector_irq, cpu)[vector] = VECTOR_UNUSED; | |
910 | hlist_del_init(&apicd->clist); | |
911 | apicd->prev_vector = 0; | |
912 | apicd->move_in_progress = 0; | |
913 | } | |
914 | ||
915 | /* | |
916 | * Called from fixup_irqs() with @desc->lock held and interrupts disabled. | |
917 | */ | |
918 | static void apic_force_complete_move(struct irq_data *irqd) | |
919 | { | |
920 | unsigned int cpu = smp_processor_id(); | |
921 | struct apic_chip_data *apicd; | |
922 | unsigned int vector; | |
923 | ||
924 | guard(raw_spinlock)(&vector_lock); | |
925 | apicd = apic_chip_data(irqd); | |
926 | if (!apicd) | |
927 | return; | |
928 | ||
929 | /* | |
930 | * If prev_vector is empty or the descriptor is neither currently | |
931 | * nor previously on the outgoing CPU no action required. | |
932 | */ | |
933 | vector = apicd->prev_vector; | |
934 | if (!vector || (apicd->cpu != cpu && apicd->prev_cpu != cpu)) | |
935 | return; | |
936 | ||
937 | /* | |
938 | * This is tricky. If the cleanup of the old vector has not been | |
939 | * done yet, then the following setaffinity call will fail with | |
940 | * -EBUSY. This can leave the interrupt in a stale state. | |
941 | * | |
942 | * All CPUs are stuck in stop machine with interrupts disabled so | |
943 | * calling __irq_complete_move() would be completely pointless. | |
944 | * | |
945 | * 1) The interrupt is in move_in_progress state. That means that we | |
946 | * have not seen an interrupt since the io_apic was reprogrammed to | |
947 | * the new vector. | |
948 | * | |
949 | * 2) The interrupt has fired on the new vector, but the cleanup IPIs | |
950 | * have not been processed yet. | |
951 | */ | |
952 | if (apicd->move_in_progress) { | |
953 | /* | |
954 | * In theory there is a race: | |
955 | * | |
956 | * set_ioapic(new_vector) <-- Interrupt is raised before update | |
957 | * is effective, i.e. it's raised on | |
958 | * the old vector. | |
959 | * | |
960 | * So if the target cpu cannot handle that interrupt before | |
961 | * the old vector is cleaned up, we get a spurious interrupt | |
962 | * and in the worst case the ioapic irq line becomes stale. | |
963 | * | |
964 | * But in case of cpu hotplug this should be a non issue | |
965 | * because if the affinity update happens right before all | |
966 | * cpus rendezvous in stop machine, there is no way that the | |
967 | * interrupt can be blocked on the target cpu because all cpus | |
968 | * loops first with interrupts enabled in stop machine, so the | |
969 | * old vector is not yet cleaned up when the interrupt fires. | |
970 | * | |
971 | * So the only way to run into this issue is if the delivery | |
972 | * of the interrupt on the apic/system bus would be delayed | |
973 | * beyond the point where the target cpu disables interrupts | |
974 | * in stop machine. I doubt that it can happen, but at least | |
975 | * there is a theoretical chance. Virtualization might be | |
976 | * able to expose this, but AFAICT the IOAPIC emulation is not | |
977 | * as stupid as the real hardware. | |
978 | * | |
979 | * Anyway, there is nothing we can do about that at this point | |
980 | * w/o refactoring the whole fixup_irq() business completely. | |
981 | * We print at least the irq number and the old vector number, | |
982 | * so we have the necessary information when a problem in that | |
983 | * area arises. | |
984 | */ | |
985 | pr_warn("IRQ fixup: irq %d move in progress, old vector %d\n", | |
986 | irqd->irq, vector); | |
987 | } | |
988 | free_moved_vector(apicd); | |
989 | } | |
990 | ||
ba801640 | 991 | #else |
751dc837 TG |
992 | # define apic_set_affinity NULL |
993 | # define apic_force_complete_move NULL | |
ba801640 TG |
994 | #endif |
995 | ||
86ba6551 | 996 | static int apic_retrigger_irq(struct irq_data *irqd) |
74afab7a | 997 | { |
86ba6551 | 998 | struct apic_chip_data *apicd = apic_chip_data(irqd); |
74afab7a | 999 | unsigned long flags; |
74afab7a JL |
1000 | |
1001 | raw_spin_lock_irqsave(&vector_lock, flags); | |
28b82352 | 1002 | __apic_send_IPI(apicd->cpu, apicd->vector); |
74afab7a JL |
1003 | raw_spin_unlock_irqrestore(&vector_lock, flags); |
1004 | ||
1005 | return 1; | |
1006 | } | |
1007 | ||
c0255770 | 1008 | void apic_ack_irq(struct irq_data *irqd) |
74afab7a | 1009 | { |
86ba6551 | 1010 | irq_move_irq(irqd); |
670c04ad | 1011 | apic_eoi(); |
c0255770 TG |
1012 | } |
1013 | ||
1014 | void apic_ack_edge(struct irq_data *irqd) | |
1015 | { | |
1016 | irq_complete_move(irqd_cfg(irqd)); | |
1017 | apic_ack_irq(irqd); | |
74afab7a JL |
1018 | } |
1019 | ||
f598181a DW |
1020 | static void x86_vector_msi_compose_msg(struct irq_data *data, |
1021 | struct msi_msg *msg) | |
1022 | { | |
1023 | __irq_msi_compose_msg(irqd_cfg(data), msg, false); | |
1024 | } | |
1025 | ||
b5dc8e6c | 1026 | static struct irq_chip lapic_controller = { |
751dc837 TG |
1027 | .name = "APIC", |
1028 | .irq_ack = apic_ack_edge, | |
1029 | .irq_set_affinity = apic_set_affinity, | |
1030 | .irq_compose_msi_msg = x86_vector_msi_compose_msg, | |
1031 | .irq_force_complete_move = apic_force_complete_move, | |
1032 | .irq_retrigger = apic_retrigger_irq, | |
b5dc8e6c JL |
1033 | }; |
1034 | ||
74afab7a | 1035 | #ifdef CONFIG_SMP |
c6c2002b | 1036 | |
bdc1dad2 | 1037 | static void __vector_cleanup(struct vector_cleanup *cl, bool check_irr) |
74afab7a | 1038 | { |
dccfe314 TG |
1039 | struct apic_chip_data *apicd; |
1040 | struct hlist_node *tmp; | |
bdc1dad2 | 1041 | bool rearm = false; |
74afab7a | 1042 | |
bdc1dad2 | 1043 | lockdep_assert_held(&vector_lock); |
df54c493 | 1044 | |
bdc1dad2 | 1045 | hlist_for_each_entry_safe(apicd, tmp, &cl->head, clist) { |
fef05a07 | 1046 | unsigned int vector = apicd->prev_vector; |
74afab7a | 1047 | |
74afab7a | 1048 | /* |
dccfe314 | 1049 | * Paranoia: Check if the vector that needs to be cleaned |
bdc1dad2 TG |
1050 | * up is registered at the APICs IRR. That's clearly a |
1051 | * hardware issue if the vector arrived on the old target | |
1052 | * _after_ interrupts were disabled above. Keep @apicd | |
1053 | * on the list and schedule the timer again to give the CPU | |
1054 | * a chance to handle the pending interrupt. | |
1055 | * | |
1056 | * Do not check IRR when called from lapic_offline(), because | |
1057 | * fixup_irqs() was just called to scan IRR for set bits and | |
1058 | * forward them to new destination CPUs via IPIs. | |
74afab7a | 1059 | */ |
fef05a07 | 1060 | if (check_irr && is_vector_pending(vector)) { |
bdc1dad2 TG |
1061 | pr_warn_once("Moved interrupt pending in old target APIC %u\n", apicd->irq); |
1062 | rearm = true; | |
dccfe314 | 1063 | continue; |
74afab7a | 1064 | } |
69cde000 | 1065 | free_moved_vector(apicd); |
74afab7a JL |
1066 | } |
1067 | ||
bdc1dad2 TG |
1068 | /* |
1069 | * Must happen under vector_lock to make the timer_pending() check | |
1070 | * in __vector_schedule_cleanup() race free against the rearm here. | |
1071 | */ | |
1072 | if (rearm) | |
1073 | mod_timer(&cl->timer, jiffies + 1); | |
1074 | } | |
1075 | ||
1076 | static void vector_cleanup_callback(struct timer_list *tmr) | |
1077 | { | |
1078 | struct vector_cleanup *cl = container_of(tmr, typeof(*cl), timer); | |
1079 | ||
1080 | /* Prevent vectors vanishing under us */ | |
1081 | raw_spin_lock_irq(&vector_lock); | |
1082 | __vector_cleanup(cl, true); | |
1083 | raw_spin_unlock_irq(&vector_lock); | |
74afab7a JL |
1084 | } |
1085 | ||
a539cc86 | 1086 | static void __vector_schedule_cleanup(struct apic_chip_data *apicd) |
dccfe314 | 1087 | { |
bdc1dad2 | 1088 | unsigned int cpu = apicd->prev_cpu; |
dccfe314 TG |
1089 | |
1090 | raw_spin_lock(&vector_lock); | |
1091 | apicd->move_in_progress = 0; | |
dccfe314 | 1092 | if (cpu_online(cpu)) { |
bdc1dad2 TG |
1093 | struct vector_cleanup *cl = per_cpu_ptr(&vector_cleanup, cpu); |
1094 | ||
1095 | hlist_add_head(&apicd->clist, &cl->head); | |
1096 | ||
1097 | /* | |
1098 | * The lockless timer_pending() check is safe here. If it | |
1099 | * returns true, then the callback will observe this new | |
1100 | * apic data in the hlist as everything is serialized by | |
1101 | * vector lock. | |
1102 | * | |
1103 | * If it returns false then the timer is either not armed | |
1104 | * or the other CPU executes the callback, which again | |
1105 | * would be blocked on vector lock. Rearming it in the | |
1106 | * latter case makes it fire for nothing. | |
1107 | * | |
1108 | * This is also safe against the callback rearming the timer | |
1109 | * because that's serialized via vector lock too. | |
1110 | */ | |
1111 | if (!timer_pending(&cl->timer)) { | |
1112 | cl->timer.expires = jiffies + 1; | |
1113 | add_timer_on(&cl->timer, cpu); | |
1114 | } | |
dccfe314 | 1115 | } else { |
a6c11c0a DZ |
1116 | pr_warn("IRQ %u schedule cleanup for offline CPU %u\n", apicd->irq, cpu); |
1117 | free_moved_vector(apicd); | |
dccfe314 TG |
1118 | } |
1119 | raw_spin_unlock(&vector_lock); | |
1120 | } | |
1121 | ||
a539cc86 | 1122 | void vector_schedule_cleanup(struct irq_cfg *cfg) |
dccfe314 TG |
1123 | { |
1124 | struct apic_chip_data *apicd; | |
1125 | ||
ba224fea | 1126 | apicd = container_of(cfg, struct apic_chip_data, hw_irq_cfg); |
dccfe314 | 1127 | if (apicd->move_in_progress) |
a539cc86 | 1128 | __vector_schedule_cleanup(apicd); |
dccfe314 TG |
1129 | } |
1130 | ||
e027ffff | 1131 | void irq_complete_move(struct irq_cfg *cfg) |
74afab7a | 1132 | { |
86ba6551 | 1133 | struct apic_chip_data *apicd; |
74afab7a | 1134 | |
ba224fea | 1135 | apicd = container_of(cfg, struct apic_chip_data, hw_irq_cfg); |
86ba6551 | 1136 | if (likely(!apicd->move_in_progress)) |
74afab7a JL |
1137 | return; |
1138 | ||
e027ffff TG |
1139 | /* |
1140 | * If the interrupt arrived on the new target CPU, cleanup the | |
1141 | * vector on the old target CPU. A vector check is not required | |
1142 | * because an interrupt can never move from one vector to another | |
1143 | * on the same CPU. | |
1144 | */ | |
1145 | if (apicd->cpu == smp_processor_id()) | |
a539cc86 | 1146 | __vector_schedule_cleanup(apicd); |
74afab7a JL |
1147 | } |
1148 | ||
2cffad7b TG |
1149 | #ifdef CONFIG_HOTPLUG_CPU |
1150 | /* | |
1151 | * Note, this is not accurate accounting, but at least good enough to | |
1152 | * prevent that the actual interrupt move will run out of vectors. | |
1153 | */ | |
1154 | int lapic_can_unplug_cpu(void) | |
1155 | { | |
1156 | unsigned int rsvd, avl, tomove, cpu = smp_processor_id(); | |
1157 | int ret = 0; | |
1158 | ||
1159 | raw_spin_lock(&vector_lock); | |
1160 | tomove = irq_matrix_allocated(vector_matrix); | |
1161 | avl = irq_matrix_available(vector_matrix, true); | |
1162 | if (avl < tomove) { | |
1163 | pr_warn("CPU %u has %u vectors, %u available. Cannot disable CPU\n", | |
1164 | cpu, tomove, avl); | |
1165 | ret = -ENOSPC; | |
1166 | goto out; | |
1167 | } | |
1168 | rsvd = irq_matrix_reserved(vector_matrix); | |
1169 | if (avl < rsvd) { | |
1170 | pr_warn("Reserved vectors %u > available %u. IRQ request may fail\n", | |
1171 | rsvd, avl); | |
1172 | } | |
1173 | out: | |
1174 | raw_spin_unlock(&vector_lock); | |
1175 | return ret; | |
1176 | } | |
1177 | #endif /* HOTPLUG_CPU */ | |
1178 | #endif /* SMP */ | |
74afab7a | 1179 | |
74afab7a JL |
1180 | static void __init print_APIC_field(int base) |
1181 | { | |
1182 | int i; | |
1183 | ||
1184 | printk(KERN_DEBUG); | |
1185 | ||
1186 | for (i = 0; i < 8; i++) | |
1187 | pr_cont("%08x", apic_read(base + i*0x10)); | |
1188 | ||
1189 | pr_cont("\n"); | |
1190 | } | |
1191 | ||
1192 | static void __init print_local_APIC(void *dummy) | |
1193 | { | |
1194 | unsigned int i, v, ver, maxlvt; | |
1195 | u64 icr; | |
1196 | ||
849d3569 | 1197 | pr_debug("printing local APIC contents on CPU#%d/%d:\n", |
a6625b47 | 1198 | smp_processor_id(), read_apic_id()); |
74afab7a | 1199 | v = apic_read(APIC_ID); |
849d3569 | 1200 | pr_info("... APIC ID: %08x (%01x)\n", v, read_apic_id()); |
74afab7a | 1201 | v = apic_read(APIC_LVR); |
849d3569 | 1202 | pr_info("... APIC VERSION: %08x\n", v); |
74afab7a JL |
1203 | ver = GET_APIC_VERSION(v); |
1204 | maxlvt = lapic_get_maxlvt(); | |
1205 | ||
1206 | v = apic_read(APIC_TASKPRI); | |
849d3569 | 1207 | pr_debug("... APIC TASKPRI: %08x (%02x)\n", v, v & APIC_TPRI_MASK); |
74afab7a JL |
1208 | |
1209 | /* !82489DX */ | |
1210 | if (APIC_INTEGRATED(ver)) { | |
1211 | if (!APIC_XAPIC(ver)) { | |
1212 | v = apic_read(APIC_ARBPRI); | |
849d3569 JL |
1213 | pr_debug("... APIC ARBPRI: %08x (%02x)\n", |
1214 | v, v & APIC_ARBPRI_MASK); | |
74afab7a JL |
1215 | } |
1216 | v = apic_read(APIC_PROCPRI); | |
849d3569 | 1217 | pr_debug("... APIC PROCPRI: %08x\n", v); |
74afab7a JL |
1218 | } |
1219 | ||
1220 | /* | |
1221 | * Remote read supported only in the 82489DX and local APIC for | |
1222 | * Pentium processors. | |
1223 | */ | |
1224 | if (!APIC_INTEGRATED(ver) || maxlvt == 3) { | |
1225 | v = apic_read(APIC_RRR); | |
849d3569 | 1226 | pr_debug("... APIC RRR: %08x\n", v); |
74afab7a JL |
1227 | } |
1228 | ||
1229 | v = apic_read(APIC_LDR); | |
849d3569 | 1230 | pr_debug("... APIC LDR: %08x\n", v); |
74afab7a JL |
1231 | if (!x2apic_enabled()) { |
1232 | v = apic_read(APIC_DFR); | |
849d3569 | 1233 | pr_debug("... APIC DFR: %08x\n", v); |
74afab7a JL |
1234 | } |
1235 | v = apic_read(APIC_SPIV); | |
849d3569 | 1236 | pr_debug("... APIC SPIV: %08x\n", v); |
74afab7a | 1237 | |
849d3569 | 1238 | pr_debug("... APIC ISR field:\n"); |
74afab7a | 1239 | print_APIC_field(APIC_ISR); |
849d3569 | 1240 | pr_debug("... APIC TMR field:\n"); |
74afab7a | 1241 | print_APIC_field(APIC_TMR); |
849d3569 | 1242 | pr_debug("... APIC IRR field:\n"); |
74afab7a JL |
1243 | print_APIC_field(APIC_IRR); |
1244 | ||
1245 | /* !82489DX */ | |
1246 | if (APIC_INTEGRATED(ver)) { | |
1247 | /* Due to the Pentium erratum 3AP. */ | |
1248 | if (maxlvt > 3) | |
1249 | apic_write(APIC_ESR, 0); | |
1250 | ||
1251 | v = apic_read(APIC_ESR); | |
849d3569 | 1252 | pr_debug("... APIC ESR: %08x\n", v); |
74afab7a JL |
1253 | } |
1254 | ||
1255 | icr = apic_icr_read(); | |
849d3569 JL |
1256 | pr_debug("... APIC ICR: %08x\n", (u32)icr); |
1257 | pr_debug("... APIC ICR2: %08x\n", (u32)(icr >> 32)); | |
74afab7a JL |
1258 | |
1259 | v = apic_read(APIC_LVTT); | |
849d3569 | 1260 | pr_debug("... APIC LVTT: %08x\n", v); |
74afab7a JL |
1261 | |
1262 | if (maxlvt > 3) { | |
1263 | /* PC is LVT#4. */ | |
1264 | v = apic_read(APIC_LVTPC); | |
849d3569 | 1265 | pr_debug("... APIC LVTPC: %08x\n", v); |
74afab7a JL |
1266 | } |
1267 | v = apic_read(APIC_LVT0); | |
849d3569 | 1268 | pr_debug("... APIC LVT0: %08x\n", v); |
74afab7a | 1269 | v = apic_read(APIC_LVT1); |
849d3569 | 1270 | pr_debug("... APIC LVT1: %08x\n", v); |
74afab7a JL |
1271 | |
1272 | if (maxlvt > 2) { | |
1273 | /* ERR is LVT#3. */ | |
1274 | v = apic_read(APIC_LVTERR); | |
849d3569 | 1275 | pr_debug("... APIC LVTERR: %08x\n", v); |
74afab7a JL |
1276 | } |
1277 | ||
1278 | v = apic_read(APIC_TMICT); | |
849d3569 | 1279 | pr_debug("... APIC TMICT: %08x\n", v); |
74afab7a | 1280 | v = apic_read(APIC_TMCCT); |
849d3569 | 1281 | pr_debug("... APIC TMCCT: %08x\n", v); |
74afab7a | 1282 | v = apic_read(APIC_TDCR); |
849d3569 | 1283 | pr_debug("... APIC TDCR: %08x\n", v); |
74afab7a JL |
1284 | |
1285 | if (boot_cpu_has(X86_FEATURE_EXTAPIC)) { | |
1286 | v = apic_read(APIC_EFEAT); | |
1287 | maxlvt = (v >> 16) & 0xff; | |
849d3569 | 1288 | pr_debug("... APIC EFEAT: %08x\n", v); |
74afab7a | 1289 | v = apic_read(APIC_ECTRL); |
849d3569 | 1290 | pr_debug("... APIC ECTRL: %08x\n", v); |
74afab7a JL |
1291 | for (i = 0; i < maxlvt; i++) { |
1292 | v = apic_read(APIC_EILVTn(i)); | |
849d3569 | 1293 | pr_debug("... APIC EILVT%d: %08x\n", i, v); |
74afab7a JL |
1294 | } |
1295 | } | |
1296 | pr_cont("\n"); | |
1297 | } | |
1298 | ||
1299 | static void __init print_local_APICs(int maxcpu) | |
1300 | { | |
1301 | int cpu; | |
1302 | ||
1303 | if (!maxcpu) | |
1304 | return; | |
1305 | ||
1306 | preempt_disable(); | |
1307 | for_each_online_cpu(cpu) { | |
1308 | if (cpu >= maxcpu) | |
1309 | break; | |
1310 | smp_call_function_single(cpu, print_local_APIC, NULL, 1); | |
1311 | } | |
1312 | preempt_enable(); | |
1313 | } | |
1314 | ||
1315 | static void __init print_PIC(void) | |
1316 | { | |
1317 | unsigned int v; | |
1318 | unsigned long flags; | |
1319 | ||
1320 | if (!nr_legacy_irqs()) | |
1321 | return; | |
1322 | ||
849d3569 | 1323 | pr_debug("\nprinting PIC contents\n"); |
74afab7a JL |
1324 | |
1325 | raw_spin_lock_irqsave(&i8259A_lock, flags); | |
1326 | ||
1327 | v = inb(0xa1) << 8 | inb(0x21); | |
849d3569 | 1328 | pr_debug("... PIC IMR: %04x\n", v); |
74afab7a JL |
1329 | |
1330 | v = inb(0xa0) << 8 | inb(0x20); | |
849d3569 | 1331 | pr_debug("... PIC IRR: %04x\n", v); |
74afab7a JL |
1332 | |
1333 | outb(0x0b, 0xa0); | |
1334 | outb(0x0b, 0x20); | |
1335 | v = inb(0xa0) << 8 | inb(0x20); | |
1336 | outb(0x0a, 0xa0); | |
1337 | outb(0x0a, 0x20); | |
1338 | ||
1339 | raw_spin_unlock_irqrestore(&i8259A_lock, flags); | |
1340 | ||
849d3569 | 1341 | pr_debug("... PIC ISR: %04x\n", v); |
74afab7a | 1342 | |
d2531661 | 1343 | v = inb(PIC_ELCR2) << 8 | inb(PIC_ELCR1); |
849d3569 | 1344 | pr_debug("... PIC ELCR: %04x\n", v); |
74afab7a JL |
1345 | } |
1346 | ||
1347 | static int show_lapic __initdata = 1; | |
1348 | static __init int setup_show_lapic(char *arg) | |
1349 | { | |
1350 | int num = -1; | |
1351 | ||
1352 | if (strcmp(arg, "all") == 0) { | |
1353 | show_lapic = CONFIG_NR_CPUS; | |
1354 | } else { | |
1355 | get_option(&arg, &num); | |
1356 | if (num >= 0) | |
1357 | show_lapic = num; | |
1358 | } | |
1359 | ||
1360 | return 1; | |
1361 | } | |
1362 | __setup("show_lapic=", setup_show_lapic); | |
1363 | ||
1364 | static int __init print_ICs(void) | |
1365 | { | |
1366 | if (apic_verbosity == APIC_QUIET) | |
1367 | return 0; | |
1368 | ||
1369 | print_PIC(); | |
1370 | ||
1371 | /* don't print out if apic is not there */ | |
93984fbd | 1372 | if (!boot_cpu_has(X86_FEATURE_APIC) && !apic_from_smp_config()) |
74afab7a JL |
1373 | return 0; |
1374 | ||
1375 | print_local_APICs(show_lapic); | |
1376 | print_IO_APICs(); | |
1377 | ||
1378 | return 0; | |
1379 | } | |
1380 | ||
1381 | late_initcall(print_ICs); |