Commit | Line | Data |
---|---|---|
e46cdb66 JF |
1 | /* |
2 | * Xen event channels | |
3 | * | |
4 | * Xen models interrupts with abstract event channels. Because each | |
5 | * domain gets 1024 event channels, but NR_IRQ is not that large, we | |
6 | * must dynamically map irqs<->event channels. The event channels | |
7 | * interface with the rest of the kernel by defining a xen interrupt | |
8 | * chip. When an event is recieved, it is mapped to an irq and sent | |
9 | * through the normal interrupt processing path. | |
10 | * | |
11 | * There are four kinds of events which can be mapped to an event | |
12 | * channel: | |
13 | * | |
14 | * 1. Inter-domain notifications. This includes all the virtual | |
15 | * device events, since they're driven by front-ends in another domain | |
16 | * (typically dom0). | |
17 | * 2. VIRQs, typically used for timers. These are per-cpu events. | |
18 | * 3. IPIs. | |
19 | * 4. Hardware interrupts. Not supported at present. | |
20 | * | |
21 | * Jeremy Fitzhardinge <jeremy@xensource.com>, XenSource Inc, 2007 | |
22 | */ | |
23 | ||
24 | #include <linux/linkage.h> | |
25 | #include <linux/interrupt.h> | |
26 | #include <linux/irq.h> | |
27 | #include <linux/module.h> | |
28 | #include <linux/string.h> | |
29 | ||
30 | #include <asm/ptrace.h> | |
31 | #include <asm/irq.h> | |
32 | #include <asm/sync_bitops.h> | |
33 | #include <asm/xen/hypercall.h> | |
8d1b8753 | 34 | #include <asm/xen/hypervisor.h> |
e46cdb66 JF |
35 | |
36 | #include <xen/events.h> | |
37 | #include <xen/interface/xen.h> | |
38 | #include <xen/interface/event_channel.h> | |
39 | ||
40 | #include "xen-ops.h" | |
41 | ||
42 | /* | |
43 | * This lock protects updates to the following mapping and reference-count | |
44 | * arrays. The lock does not need to be acquired to read the mapping tables. | |
45 | */ | |
46 | static DEFINE_SPINLOCK(irq_mapping_update_lock); | |
47 | ||
48 | /* IRQ <-> VIRQ mapping. */ | |
49 | static DEFINE_PER_CPU(int, virq_to_irq[NR_VIRQS]) = {[0 ... NR_VIRQS-1] = -1}; | |
50 | ||
f87e4cac JF |
51 | /* IRQ <-> IPI mapping */ |
52 | static DEFINE_PER_CPU(int, ipi_to_irq[XEN_NR_IPIS]) = {[0 ... XEN_NR_IPIS-1] = -1}; | |
53 | ||
e46cdb66 JF |
54 | /* Packed IRQ information: binding type, sub-type index, and event channel. */ |
55 | struct packed_irq | |
56 | { | |
57 | unsigned short evtchn; | |
58 | unsigned char index; | |
59 | unsigned char type; | |
60 | }; | |
61 | ||
62 | static struct packed_irq irq_info[NR_IRQS]; | |
63 | ||
64 | /* Binding types. */ | |
f87e4cac JF |
65 | enum { |
66 | IRQT_UNBOUND, | |
67 | IRQT_PIRQ, | |
68 | IRQT_VIRQ, | |
69 | IRQT_IPI, | |
70 | IRQT_EVTCHN | |
71 | }; | |
e46cdb66 JF |
72 | |
73 | /* Convenient shorthand for packed representation of an unbound IRQ. */ | |
74 | #define IRQ_UNBOUND mk_irq_info(IRQT_UNBOUND, 0, 0) | |
75 | ||
76 | static int evtchn_to_irq[NR_EVENT_CHANNELS] = { | |
77 | [0 ... NR_EVENT_CHANNELS-1] = -1 | |
78 | }; | |
79 | static unsigned long cpu_evtchn_mask[NR_CPUS][NR_EVENT_CHANNELS/BITS_PER_LONG]; | |
80 | static u8 cpu_evtchn[NR_EVENT_CHANNELS]; | |
81 | ||
82 | /* Reference counts for bindings to IRQs. */ | |
83 | static int irq_bindcount[NR_IRQS]; | |
84 | ||
85 | /* Xen will never allocate port zero for any purpose. */ | |
86 | #define VALID_EVTCHN(chn) ((chn) != 0) | |
87 | ||
88 | /* | |
89 | * Force a proper event-channel callback from Xen after clearing the | |
90 | * callback mask. We do this in a very simple manner, by making a call | |
91 | * down into Xen. The pending flag will be checked by Xen on return. | |
92 | */ | |
93 | void force_evtchn_callback(void) | |
94 | { | |
95 | (void)HYPERVISOR_xen_version(0, NULL); | |
96 | } | |
97 | EXPORT_SYMBOL_GPL(force_evtchn_callback); | |
98 | ||
99 | static struct irq_chip xen_dynamic_chip; | |
100 | ||
101 | /* Constructor for packed IRQ information. */ | |
102 | static inline struct packed_irq mk_irq_info(u32 type, u32 index, u32 evtchn) | |
103 | { | |
104 | return (struct packed_irq) { evtchn, index, type }; | |
105 | } | |
106 | ||
107 | /* | |
108 | * Accessors for packed IRQ information. | |
109 | */ | |
110 | static inline unsigned int evtchn_from_irq(int irq) | |
111 | { | |
112 | return irq_info[irq].evtchn; | |
113 | } | |
114 | ||
115 | static inline unsigned int index_from_irq(int irq) | |
116 | { | |
117 | return irq_info[irq].index; | |
118 | } | |
119 | ||
120 | static inline unsigned int type_from_irq(int irq) | |
121 | { | |
122 | return irq_info[irq].type; | |
123 | } | |
124 | ||
125 | static inline unsigned long active_evtchns(unsigned int cpu, | |
126 | struct shared_info *sh, | |
127 | unsigned int idx) | |
128 | { | |
129 | return (sh->evtchn_pending[idx] & | |
130 | cpu_evtchn_mask[cpu][idx] & | |
131 | ~sh->evtchn_mask[idx]); | |
132 | } | |
133 | ||
134 | static void bind_evtchn_to_cpu(unsigned int chn, unsigned int cpu) | |
135 | { | |
136 | int irq = evtchn_to_irq[chn]; | |
137 | ||
138 | BUG_ON(irq == -1); | |
139 | #ifdef CONFIG_SMP | |
140 | irq_desc[irq].affinity = cpumask_of_cpu(cpu); | |
141 | #endif | |
142 | ||
143 | __clear_bit(chn, cpu_evtchn_mask[cpu_evtchn[chn]]); | |
144 | __set_bit(chn, cpu_evtchn_mask[cpu]); | |
145 | ||
146 | cpu_evtchn[chn] = cpu; | |
147 | } | |
148 | ||
149 | static void init_evtchn_cpu_bindings(void) | |
150 | { | |
151 | #ifdef CONFIG_SMP | |
152 | int i; | |
153 | /* By default all event channels notify CPU#0. */ | |
154 | for (i = 0; i < NR_IRQS; i++) | |
155 | irq_desc[i].affinity = cpumask_of_cpu(0); | |
156 | #endif | |
157 | ||
158 | memset(cpu_evtchn, 0, sizeof(cpu_evtchn)); | |
159 | memset(cpu_evtchn_mask[0], ~0, sizeof(cpu_evtchn_mask[0])); | |
160 | } | |
161 | ||
162 | static inline unsigned int cpu_from_evtchn(unsigned int evtchn) | |
163 | { | |
164 | return cpu_evtchn[evtchn]; | |
165 | } | |
166 | ||
167 | static inline void clear_evtchn(int port) | |
168 | { | |
169 | struct shared_info *s = HYPERVISOR_shared_info; | |
170 | sync_clear_bit(port, &s->evtchn_pending[0]); | |
171 | } | |
172 | ||
173 | static inline void set_evtchn(int port) | |
174 | { | |
175 | struct shared_info *s = HYPERVISOR_shared_info; | |
176 | sync_set_bit(port, &s->evtchn_pending[0]); | |
177 | } | |
178 | ||
179 | ||
180 | /** | |
181 | * notify_remote_via_irq - send event to remote end of event channel via irq | |
182 | * @irq: irq of event channel to send event to | |
183 | * | |
184 | * Unlike notify_remote_via_evtchn(), this is safe to use across | |
185 | * save/restore. Notifications on a broken connection are silently | |
186 | * dropped. | |
187 | */ | |
188 | void notify_remote_via_irq(int irq) | |
189 | { | |
190 | int evtchn = evtchn_from_irq(irq); | |
191 | ||
192 | if (VALID_EVTCHN(evtchn)) | |
193 | notify_remote_via_evtchn(evtchn); | |
194 | } | |
195 | EXPORT_SYMBOL_GPL(notify_remote_via_irq); | |
196 | ||
197 | static void mask_evtchn(int port) | |
198 | { | |
199 | struct shared_info *s = HYPERVISOR_shared_info; | |
200 | sync_set_bit(port, &s->evtchn_mask[0]); | |
201 | } | |
202 | ||
203 | static void unmask_evtchn(int port) | |
204 | { | |
205 | struct shared_info *s = HYPERVISOR_shared_info; | |
206 | unsigned int cpu = get_cpu(); | |
207 | ||
208 | BUG_ON(!irqs_disabled()); | |
209 | ||
210 | /* Slow path (hypercall) if this is a non-local port. */ | |
211 | if (unlikely(cpu != cpu_from_evtchn(port))) { | |
212 | struct evtchn_unmask unmask = { .port = port }; | |
213 | (void)HYPERVISOR_event_channel_op(EVTCHNOP_unmask, &unmask); | |
214 | } else { | |
215 | struct vcpu_info *vcpu_info = __get_cpu_var(xen_vcpu); | |
216 | ||
217 | sync_clear_bit(port, &s->evtchn_mask[0]); | |
218 | ||
219 | /* | |
220 | * The following is basically the equivalent of | |
221 | * 'hw_resend_irq'. Just like a real IO-APIC we 'lose | |
222 | * the interrupt edge' if the channel is masked. | |
223 | */ | |
224 | if (sync_test_bit(port, &s->evtchn_pending[0]) && | |
225 | !sync_test_and_set_bit(port / BITS_PER_LONG, | |
226 | &vcpu_info->evtchn_pending_sel)) | |
227 | vcpu_info->evtchn_upcall_pending = 1; | |
228 | } | |
229 | ||
230 | put_cpu(); | |
231 | } | |
232 | ||
233 | static int find_unbound_irq(void) | |
234 | { | |
235 | int irq; | |
236 | ||
237 | /* Only allocate from dynirq range */ | |
238 | for (irq = 0; irq < NR_IRQS; irq++) | |
239 | if (irq_bindcount[irq] == 0) | |
240 | break; | |
241 | ||
242 | if (irq == NR_IRQS) | |
243 | panic("No available IRQ to bind to: increase NR_IRQS!\n"); | |
244 | ||
245 | return irq; | |
246 | } | |
247 | ||
b536b4b9 | 248 | int bind_evtchn_to_irq(unsigned int evtchn) |
e46cdb66 JF |
249 | { |
250 | int irq; | |
251 | ||
252 | spin_lock(&irq_mapping_update_lock); | |
253 | ||
254 | irq = evtchn_to_irq[evtchn]; | |
255 | ||
256 | if (irq == -1) { | |
257 | irq = find_unbound_irq(); | |
258 | ||
259 | dynamic_irq_init(irq); | |
260 | set_irq_chip_and_handler_name(irq, &xen_dynamic_chip, | |
261 | handle_level_irq, "event"); | |
262 | ||
263 | evtchn_to_irq[evtchn] = irq; | |
264 | irq_info[irq] = mk_irq_info(IRQT_EVTCHN, 0, evtchn); | |
265 | } | |
266 | ||
267 | irq_bindcount[irq]++; | |
268 | ||
269 | spin_unlock(&irq_mapping_update_lock); | |
270 | ||
271 | return irq; | |
272 | } | |
b536b4b9 | 273 | EXPORT_SYMBOL_GPL(bind_evtchn_to_irq); |
e46cdb66 | 274 | |
f87e4cac JF |
275 | static int bind_ipi_to_irq(unsigned int ipi, unsigned int cpu) |
276 | { | |
277 | struct evtchn_bind_ipi bind_ipi; | |
278 | int evtchn, irq; | |
279 | ||
280 | spin_lock(&irq_mapping_update_lock); | |
281 | ||
282 | irq = per_cpu(ipi_to_irq, cpu)[ipi]; | |
283 | if (irq == -1) { | |
284 | irq = find_unbound_irq(); | |
285 | if (irq < 0) | |
286 | goto out; | |
287 | ||
288 | dynamic_irq_init(irq); | |
289 | set_irq_chip_and_handler_name(irq, &xen_dynamic_chip, | |
290 | handle_level_irq, "ipi"); | |
291 | ||
292 | bind_ipi.vcpu = cpu; | |
293 | if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_ipi, | |
294 | &bind_ipi) != 0) | |
295 | BUG(); | |
296 | evtchn = bind_ipi.port; | |
297 | ||
298 | evtchn_to_irq[evtchn] = irq; | |
299 | irq_info[irq] = mk_irq_info(IRQT_IPI, ipi, evtchn); | |
300 | ||
301 | per_cpu(ipi_to_irq, cpu)[ipi] = irq; | |
302 | ||
303 | bind_evtchn_to_cpu(evtchn, cpu); | |
304 | } | |
305 | ||
306 | irq_bindcount[irq]++; | |
307 | ||
308 | out: | |
309 | spin_unlock(&irq_mapping_update_lock); | |
310 | return irq; | |
311 | } | |
312 | ||
313 | ||
e46cdb66 JF |
314 | static int bind_virq_to_irq(unsigned int virq, unsigned int cpu) |
315 | { | |
316 | struct evtchn_bind_virq bind_virq; | |
317 | int evtchn, irq; | |
318 | ||
319 | spin_lock(&irq_mapping_update_lock); | |
320 | ||
321 | irq = per_cpu(virq_to_irq, cpu)[virq]; | |
322 | ||
323 | if (irq == -1) { | |
324 | bind_virq.virq = virq; | |
325 | bind_virq.vcpu = cpu; | |
326 | if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_virq, | |
327 | &bind_virq) != 0) | |
328 | BUG(); | |
329 | evtchn = bind_virq.port; | |
330 | ||
331 | irq = find_unbound_irq(); | |
332 | ||
333 | dynamic_irq_init(irq); | |
334 | set_irq_chip_and_handler_name(irq, &xen_dynamic_chip, | |
335 | handle_level_irq, "virq"); | |
336 | ||
337 | evtchn_to_irq[evtchn] = irq; | |
338 | irq_info[irq] = mk_irq_info(IRQT_VIRQ, virq, evtchn); | |
339 | ||
340 | per_cpu(virq_to_irq, cpu)[virq] = irq; | |
341 | ||
342 | bind_evtchn_to_cpu(evtchn, cpu); | |
343 | } | |
344 | ||
345 | irq_bindcount[irq]++; | |
346 | ||
347 | spin_unlock(&irq_mapping_update_lock); | |
348 | ||
349 | return irq; | |
350 | } | |
351 | ||
352 | static void unbind_from_irq(unsigned int irq) | |
353 | { | |
354 | struct evtchn_close close; | |
355 | int evtchn = evtchn_from_irq(irq); | |
356 | ||
357 | spin_lock(&irq_mapping_update_lock); | |
358 | ||
359 | if (VALID_EVTCHN(evtchn) && (--irq_bindcount[irq] == 0)) { | |
360 | close.port = evtchn; | |
361 | if (HYPERVISOR_event_channel_op(EVTCHNOP_close, &close) != 0) | |
362 | BUG(); | |
363 | ||
364 | switch (type_from_irq(irq)) { | |
365 | case IRQT_VIRQ: | |
366 | per_cpu(virq_to_irq, cpu_from_evtchn(evtchn)) | |
367 | [index_from_irq(irq)] = -1; | |
368 | break; | |
369 | default: | |
370 | break; | |
371 | } | |
372 | ||
373 | /* Closed ports are implicitly re-bound to VCPU0. */ | |
374 | bind_evtchn_to_cpu(evtchn, 0); | |
375 | ||
376 | evtchn_to_irq[evtchn] = -1; | |
377 | irq_info[irq] = IRQ_UNBOUND; | |
378 | ||
379 | dynamic_irq_init(irq); | |
380 | } | |
381 | ||
382 | spin_unlock(&irq_mapping_update_lock); | |
383 | } | |
384 | ||
385 | int bind_evtchn_to_irqhandler(unsigned int evtchn, | |
7c239975 | 386 | irq_handler_t handler, |
e46cdb66 JF |
387 | unsigned long irqflags, |
388 | const char *devname, void *dev_id) | |
389 | { | |
390 | unsigned int irq; | |
391 | int retval; | |
392 | ||
393 | irq = bind_evtchn_to_irq(evtchn); | |
394 | retval = request_irq(irq, handler, irqflags, devname, dev_id); | |
395 | if (retval != 0) { | |
396 | unbind_from_irq(irq); | |
397 | return retval; | |
398 | } | |
399 | ||
400 | return irq; | |
401 | } | |
402 | EXPORT_SYMBOL_GPL(bind_evtchn_to_irqhandler); | |
403 | ||
404 | int bind_virq_to_irqhandler(unsigned int virq, unsigned int cpu, | |
7c239975 | 405 | irq_handler_t handler, |
e46cdb66 JF |
406 | unsigned long irqflags, const char *devname, void *dev_id) |
407 | { | |
408 | unsigned int irq; | |
409 | int retval; | |
410 | ||
411 | irq = bind_virq_to_irq(virq, cpu); | |
412 | retval = request_irq(irq, handler, irqflags, devname, dev_id); | |
413 | if (retval != 0) { | |
414 | unbind_from_irq(irq); | |
415 | return retval; | |
416 | } | |
417 | ||
418 | return irq; | |
419 | } | |
420 | EXPORT_SYMBOL_GPL(bind_virq_to_irqhandler); | |
421 | ||
f87e4cac JF |
422 | int bind_ipi_to_irqhandler(enum ipi_vector ipi, |
423 | unsigned int cpu, | |
424 | irq_handler_t handler, | |
425 | unsigned long irqflags, | |
426 | const char *devname, | |
427 | void *dev_id) | |
428 | { | |
429 | int irq, retval; | |
430 | ||
431 | irq = bind_ipi_to_irq(ipi, cpu); | |
432 | if (irq < 0) | |
433 | return irq; | |
434 | ||
435 | retval = request_irq(irq, handler, irqflags, devname, dev_id); | |
436 | if (retval != 0) { | |
437 | unbind_from_irq(irq); | |
438 | return retval; | |
439 | } | |
440 | ||
441 | return irq; | |
442 | } | |
443 | ||
e46cdb66 JF |
444 | void unbind_from_irqhandler(unsigned int irq, void *dev_id) |
445 | { | |
446 | free_irq(irq, dev_id); | |
447 | unbind_from_irq(irq); | |
448 | } | |
449 | EXPORT_SYMBOL_GPL(unbind_from_irqhandler); | |
450 | ||
f87e4cac JF |
451 | void xen_send_IPI_one(unsigned int cpu, enum ipi_vector vector) |
452 | { | |
453 | int irq = per_cpu(ipi_to_irq, cpu)[vector]; | |
454 | BUG_ON(irq < 0); | |
455 | notify_remote_via_irq(irq); | |
456 | } | |
457 | ||
458 | ||
e46cdb66 JF |
459 | /* |
460 | * Search the CPUs pending events bitmasks. For each one found, map | |
461 | * the event number to an irq, and feed it into do_IRQ() for | |
462 | * handling. | |
463 | * | |
464 | * Xen uses a two-level bitmap to speed searching. The first level is | |
465 | * a bitset of words which contain pending event bits. The second | |
466 | * level is a bitset of pending events themselves. | |
467 | */ | |
75604d7f | 468 | void xen_evtchn_do_upcall(struct pt_regs *regs) |
e46cdb66 JF |
469 | { |
470 | int cpu = get_cpu(); | |
471 | struct shared_info *s = HYPERVISOR_shared_info; | |
472 | struct vcpu_info *vcpu_info = __get_cpu_var(xen_vcpu); | |
473 | unsigned long pending_words; | |
474 | ||
475 | vcpu_info->evtchn_upcall_pending = 0; | |
476 | ||
477 | /* NB. No need for a barrier here -- XCHG is a barrier on x86. */ | |
478 | pending_words = xchg(&vcpu_info->evtchn_pending_sel, 0); | |
479 | while (pending_words != 0) { | |
480 | unsigned long pending_bits; | |
481 | int word_idx = __ffs(pending_words); | |
482 | pending_words &= ~(1UL << word_idx); | |
483 | ||
484 | while ((pending_bits = active_evtchns(cpu, s, word_idx)) != 0) { | |
485 | int bit_idx = __ffs(pending_bits); | |
486 | int port = (word_idx * BITS_PER_LONG) + bit_idx; | |
487 | int irq = evtchn_to_irq[port]; | |
488 | ||
489 | if (irq != -1) { | |
65ea5b03 | 490 | regs->orig_ax = ~irq; |
e46cdb66 JF |
491 | do_IRQ(regs); |
492 | } | |
493 | } | |
494 | } | |
495 | ||
496 | put_cpu(); | |
497 | } | |
498 | ||
499 | /* Rebind an evtchn so that it gets delivered to a specific cpu */ | |
500 | static void rebind_irq_to_cpu(unsigned irq, unsigned tcpu) | |
501 | { | |
502 | struct evtchn_bind_vcpu bind_vcpu; | |
503 | int evtchn = evtchn_from_irq(irq); | |
504 | ||
505 | if (!VALID_EVTCHN(evtchn)) | |
506 | return; | |
507 | ||
508 | /* Send future instances of this interrupt to other vcpu. */ | |
509 | bind_vcpu.port = evtchn; | |
510 | bind_vcpu.vcpu = tcpu; | |
511 | ||
512 | /* | |
513 | * If this fails, it usually just indicates that we're dealing with a | |
514 | * virq or IPI channel, which don't actually need to be rebound. Ignore | |
515 | * it, but don't do the xenlinux-level rebind in that case. | |
516 | */ | |
517 | if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_vcpu, &bind_vcpu) >= 0) | |
518 | bind_evtchn_to_cpu(evtchn, tcpu); | |
519 | } | |
520 | ||
521 | ||
522 | static void set_affinity_irq(unsigned irq, cpumask_t dest) | |
523 | { | |
524 | unsigned tcpu = first_cpu(dest); | |
525 | rebind_irq_to_cpu(irq, tcpu); | |
526 | } | |
527 | ||
528 | static void enable_dynirq(unsigned int irq) | |
529 | { | |
530 | int evtchn = evtchn_from_irq(irq); | |
531 | ||
532 | if (VALID_EVTCHN(evtchn)) | |
533 | unmask_evtchn(evtchn); | |
534 | } | |
535 | ||
536 | static void disable_dynirq(unsigned int irq) | |
537 | { | |
538 | int evtchn = evtchn_from_irq(irq); | |
539 | ||
540 | if (VALID_EVTCHN(evtchn)) | |
541 | mask_evtchn(evtchn); | |
542 | } | |
543 | ||
544 | static void ack_dynirq(unsigned int irq) | |
545 | { | |
546 | int evtchn = evtchn_from_irq(irq); | |
547 | ||
548 | move_native_irq(irq); | |
549 | ||
550 | if (VALID_EVTCHN(evtchn)) | |
551 | clear_evtchn(evtchn); | |
552 | } | |
553 | ||
554 | static int retrigger_dynirq(unsigned int irq) | |
555 | { | |
556 | int evtchn = evtchn_from_irq(irq); | |
557 | int ret = 0; | |
558 | ||
559 | if (VALID_EVTCHN(evtchn)) { | |
560 | set_evtchn(evtchn); | |
561 | ret = 1; | |
562 | } | |
563 | ||
564 | return ret; | |
565 | } | |
566 | ||
567 | static struct irq_chip xen_dynamic_chip __read_mostly = { | |
568 | .name = "xen-dyn", | |
569 | .mask = disable_dynirq, | |
570 | .unmask = enable_dynirq, | |
571 | .ack = ack_dynirq, | |
572 | .set_affinity = set_affinity_irq, | |
573 | .retrigger = retrigger_dynirq, | |
574 | }; | |
575 | ||
576 | void __init xen_init_IRQ(void) | |
577 | { | |
578 | int i; | |
579 | ||
580 | init_evtchn_cpu_bindings(); | |
581 | ||
582 | /* No event channels are 'live' right now. */ | |
583 | for (i = 0; i < NR_EVENT_CHANNELS; i++) | |
584 | mask_evtchn(i); | |
585 | ||
586 | /* Dynamic IRQ space is currently unbound. Zero the refcnts. */ | |
587 | for (i = 0; i < NR_IRQS; i++) | |
588 | irq_bindcount[i] = 0; | |
589 | ||
590 | irq_ctx_init(smp_processor_id()); | |
591 | } |