1 #include <linux/interrupt.h>
2 #include <linux/dmar.h>
3 #include <linux/spinlock.h>
4 #include <linux/jiffies.h>
7 #include <asm/io_apic.h>
10 #include <linux/intel-iommu.h>
11 #include "intr_remapping.h"
12 #include <acpi/acpi.h>
14 static struct ioapic_scope ir_ioapic[MAX_IO_APICS];
15 static int ir_ioapic_num;
16 int intr_remapping_enabled;
18 static int disable_intremap;
19 static __init int setup_nointremap(char *str)
24 early_param("nointremap", setup_nointremap);
27 struct intel_iommu *iommu;
33 #ifdef CONFIG_GENERIC_HARDIRQS
34 static struct irq_2_iommu *get_one_free_irq_2_iommu(int cpu)
36 struct irq_2_iommu *iommu;
39 node = cpu_to_node(cpu);
41 iommu = kzalloc_node(sizeof(*iommu), GFP_ATOMIC, node);
42 printk(KERN_DEBUG "alloc irq_2_iommu on cpu %d node %d\n", cpu, node);
47 static struct irq_2_iommu *irq_2_iommu(unsigned int irq)
49 struct irq_desc *desc;
51 desc = irq_to_desc(irq);
53 if (WARN_ON_ONCE(!desc))
56 return desc->irq_2_iommu;
59 static struct irq_2_iommu *irq_2_iommu_alloc_cpu(unsigned int irq, int cpu)
61 struct irq_desc *desc;
62 struct irq_2_iommu *irq_iommu;
65 * alloc irq desc if not allocated already.
67 desc = irq_to_desc_alloc_cpu(irq, cpu);
69 printk(KERN_INFO "can not get irq_desc for %d\n", irq);
73 irq_iommu = desc->irq_2_iommu;
76 desc->irq_2_iommu = get_one_free_irq_2_iommu(cpu);
78 return desc->irq_2_iommu;
81 static struct irq_2_iommu *irq_2_iommu_alloc(unsigned int irq)
83 return irq_2_iommu_alloc_cpu(irq, boot_cpu_id);
86 #else /* !CONFIG_SPARSE_IRQ */
88 static struct irq_2_iommu irq_2_iommuX[NR_IRQS];
90 static struct irq_2_iommu *irq_2_iommu(unsigned int irq)
93 return &irq_2_iommuX[irq];
97 static struct irq_2_iommu *irq_2_iommu_alloc(unsigned int irq)
99 return irq_2_iommu(irq);
103 static DEFINE_SPINLOCK(irq_2_ir_lock);
105 static struct irq_2_iommu *valid_irq_2_iommu(unsigned int irq)
107 struct irq_2_iommu *irq_iommu;
109 irq_iommu = irq_2_iommu(irq);
114 if (!irq_iommu->iommu)
120 int irq_remapped(int irq)
122 return valid_irq_2_iommu(irq) != NULL;
125 int get_irte(int irq, struct irte *entry)
128 struct irq_2_iommu *irq_iommu;
134 spin_lock_irqsave(&irq_2_ir_lock, flags);
135 irq_iommu = valid_irq_2_iommu(irq);
137 spin_unlock_irqrestore(&irq_2_ir_lock, flags);
141 index = irq_iommu->irte_index + irq_iommu->sub_handle;
142 *entry = *(irq_iommu->iommu->ir_table->base + index);
144 spin_unlock_irqrestore(&irq_2_ir_lock, flags);
148 int alloc_irte(struct intel_iommu *iommu, int irq, u16 count)
150 struct ir_table *table = iommu->ir_table;
151 struct irq_2_iommu *irq_iommu;
152 u16 index, start_index;
153 unsigned int mask = 0;
160 #ifndef CONFIG_SPARSE_IRQ
161 /* protect irq_2_iommu_alloc later */
167 * start the IRTE search from index 0.
169 index = start_index = 0;
172 count = __roundup_pow_of_two(count);
176 if (mask > ecap_max_handle_mask(iommu->ecap)) {
178 "Requested mask %x exceeds the max invalidation handle"
179 " mask value %Lx\n", mask,
180 ecap_max_handle_mask(iommu->ecap));
184 spin_lock_irqsave(&irq_2_ir_lock, flags);
186 for (i = index; i < index + count; i++)
187 if (table->base[i].present)
189 /* empty index found */
190 if (i == index + count)
193 index = (index + count) % INTR_REMAP_TABLE_ENTRIES;
195 if (index == start_index) {
196 spin_unlock_irqrestore(&irq_2_ir_lock, flags);
197 printk(KERN_ERR "can't allocate an IRTE\n");
202 for (i = index; i < index + count; i++)
203 table->base[i].present = 1;
205 irq_iommu = irq_2_iommu_alloc(irq);
207 spin_unlock_irqrestore(&irq_2_ir_lock, flags);
208 printk(KERN_ERR "can't allocate irq_2_iommu\n");
212 irq_iommu->iommu = iommu;
213 irq_iommu->irte_index = index;
214 irq_iommu->sub_handle = 0;
215 irq_iommu->irte_mask = mask;
217 spin_unlock_irqrestore(&irq_2_ir_lock, flags);
222 static int qi_flush_iec(struct intel_iommu *iommu, int index, int mask)
226 desc.low = QI_IEC_IIDEX(index) | QI_IEC_TYPE | QI_IEC_IM(mask)
230 return qi_submit_sync(&desc, iommu);
233 int map_irq_to_irte_handle(int irq, u16 *sub_handle)
236 struct irq_2_iommu *irq_iommu;
239 spin_lock_irqsave(&irq_2_ir_lock, flags);
240 irq_iommu = valid_irq_2_iommu(irq);
242 spin_unlock_irqrestore(&irq_2_ir_lock, flags);
246 *sub_handle = irq_iommu->sub_handle;
247 index = irq_iommu->irte_index;
248 spin_unlock_irqrestore(&irq_2_ir_lock, flags);
252 int set_irte_irq(int irq, struct intel_iommu *iommu, u16 index, u16 subhandle)
254 struct irq_2_iommu *irq_iommu;
257 spin_lock_irqsave(&irq_2_ir_lock, flags);
259 irq_iommu = irq_2_iommu_alloc(irq);
262 spin_unlock_irqrestore(&irq_2_ir_lock, flags);
263 printk(KERN_ERR "can't allocate irq_2_iommu\n");
267 irq_iommu->iommu = iommu;
268 irq_iommu->irte_index = index;
269 irq_iommu->sub_handle = subhandle;
270 irq_iommu->irte_mask = 0;
272 spin_unlock_irqrestore(&irq_2_ir_lock, flags);
277 int clear_irte_irq(int irq, struct intel_iommu *iommu, u16 index)
279 struct irq_2_iommu *irq_iommu;
282 spin_lock_irqsave(&irq_2_ir_lock, flags);
283 irq_iommu = valid_irq_2_iommu(irq);
285 spin_unlock_irqrestore(&irq_2_ir_lock, flags);
289 irq_iommu->iommu = NULL;
290 irq_iommu->irte_index = 0;
291 irq_iommu->sub_handle = 0;
292 irq_2_iommu(irq)->irte_mask = 0;
294 spin_unlock_irqrestore(&irq_2_ir_lock, flags);
299 int modify_irte(int irq, struct irte *irte_modified)
304 struct intel_iommu *iommu;
305 struct irq_2_iommu *irq_iommu;
308 spin_lock_irqsave(&irq_2_ir_lock, flags);
309 irq_iommu = valid_irq_2_iommu(irq);
311 spin_unlock_irqrestore(&irq_2_ir_lock, flags);
315 iommu = irq_iommu->iommu;
317 index = irq_iommu->irte_index + irq_iommu->sub_handle;
318 irte = &iommu->ir_table->base[index];
320 set_64bit((unsigned long *)irte, irte_modified->low);
321 __iommu_flush_cache(iommu, irte, sizeof(*irte));
323 rc = qi_flush_iec(iommu, index, 0);
324 spin_unlock_irqrestore(&irq_2_ir_lock, flags);
329 int flush_irte(int irq)
333 struct intel_iommu *iommu;
334 struct irq_2_iommu *irq_iommu;
337 spin_lock_irqsave(&irq_2_ir_lock, flags);
338 irq_iommu = valid_irq_2_iommu(irq);
340 spin_unlock_irqrestore(&irq_2_ir_lock, flags);
344 iommu = irq_iommu->iommu;
346 index = irq_iommu->irte_index + irq_iommu->sub_handle;
348 rc = qi_flush_iec(iommu, index, irq_iommu->irte_mask);
349 spin_unlock_irqrestore(&irq_2_ir_lock, flags);
354 struct intel_iommu *map_ioapic_to_ir(int apic)
358 for (i = 0; i < MAX_IO_APICS; i++)
359 if (ir_ioapic[i].id == apic)
360 return ir_ioapic[i].iommu;
364 struct intel_iommu *map_dev_to_ir(struct pci_dev *dev)
366 struct dmar_drhd_unit *drhd;
368 drhd = dmar_find_matched_drhd_unit(dev);
375 int free_irte(int irq)
380 struct intel_iommu *iommu;
381 struct irq_2_iommu *irq_iommu;
384 spin_lock_irqsave(&irq_2_ir_lock, flags);
385 irq_iommu = valid_irq_2_iommu(irq);
387 spin_unlock_irqrestore(&irq_2_ir_lock, flags);
391 iommu = irq_iommu->iommu;
393 index = irq_iommu->irte_index + irq_iommu->sub_handle;
394 irte = &iommu->ir_table->base[index];
396 if (!irq_iommu->sub_handle) {
397 for (i = 0; i < (1 << irq_iommu->irte_mask); i++)
398 set_64bit((unsigned long *)(irte + i), 0);
399 rc = qi_flush_iec(iommu, index, irq_iommu->irte_mask);
402 irq_iommu->iommu = NULL;
403 irq_iommu->irte_index = 0;
404 irq_iommu->sub_handle = 0;
405 irq_iommu->irte_mask = 0;
407 spin_unlock_irqrestore(&irq_2_ir_lock, flags);
412 static void iommu_set_intr_remapping(struct intel_iommu *iommu, int mode)
418 addr = virt_to_phys((void *)iommu->ir_table->base);
420 spin_lock_irqsave(&iommu->register_lock, flags);
422 dmar_writeq(iommu->reg + DMAR_IRTA_REG,
423 (addr) | IR_X2APIC_MODE(mode) | INTR_REMAP_TABLE_REG_SIZE);
425 /* Set interrupt-remapping table pointer */
426 cmd = iommu->gcmd | DMA_GCMD_SIRTP;
427 iommu->gcmd |= DMA_GCMD_SIRTP;
428 writel(cmd, iommu->reg + DMAR_GCMD_REG);
430 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
431 readl, (sts & DMA_GSTS_IRTPS), sts);
432 spin_unlock_irqrestore(&iommu->register_lock, flags);
435 * global invalidation of interrupt entry cache before enabling
436 * interrupt-remapping.
438 qi_global_iec(iommu);
440 spin_lock_irqsave(&iommu->register_lock, flags);
442 /* Enable interrupt-remapping */
443 cmd = iommu->gcmd | DMA_GCMD_IRE;
444 iommu->gcmd |= DMA_GCMD_IRE;
445 writel(cmd, iommu->reg + DMAR_GCMD_REG);
447 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
448 readl, (sts & DMA_GSTS_IRES), sts);
450 spin_unlock_irqrestore(&iommu->register_lock, flags);
454 static int setup_intr_remapping(struct intel_iommu *iommu, int mode)
456 struct ir_table *ir_table;
459 ir_table = iommu->ir_table = kzalloc(sizeof(struct ir_table),
462 if (!iommu->ir_table)
465 pages = alloc_pages(GFP_ATOMIC | __GFP_ZERO, INTR_REMAP_PAGE_ORDER);
468 printk(KERN_ERR "failed to allocate pages of order %d\n",
469 INTR_REMAP_PAGE_ORDER);
470 kfree(iommu->ir_table);
474 ir_table->base = page_address(pages);
476 iommu_set_intr_remapping(iommu, mode);
481 * Disable Interrupt Remapping.
483 static void iommu_disable_intr_remapping(struct intel_iommu *iommu)
488 if (!ecap_ir_support(iommu->ecap))
492 * global invalidation of interrupt entry cache before disabling
493 * interrupt-remapping.
495 qi_global_iec(iommu);
497 spin_lock_irqsave(&iommu->register_lock, flags);
499 sts = dmar_readq(iommu->reg + DMAR_GSTS_REG);
500 if (!(sts & DMA_GSTS_IRES))
503 iommu->gcmd &= ~DMA_GCMD_IRE;
504 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
506 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
507 readl, !(sts & DMA_GSTS_IRES), sts);
510 spin_unlock_irqrestore(&iommu->register_lock, flags);
513 int __init intr_remapping_supported(void)
515 struct dmar_drhd_unit *drhd;
517 if (disable_intremap)
520 for_each_drhd_unit(drhd) {
521 struct intel_iommu *iommu = drhd->iommu;
523 if (!ecap_ir_support(iommu->ecap))
530 int __init enable_intr_remapping(int eim)
532 struct dmar_drhd_unit *drhd;
535 for_each_drhd_unit(drhd) {
536 struct intel_iommu *iommu = drhd->iommu;
539 * If the queued invalidation is already initialized,
540 * shouldn't disable it.
546 * Clear previous faults.
548 dmar_fault(-1, iommu);
551 * Disable intr remapping and queued invalidation, if already
552 * enabled prior to OS handover.
554 iommu_disable_intr_remapping(iommu);
556 dmar_disable_qi(iommu);
560 * check for the Interrupt-remapping support
562 for_each_drhd_unit(drhd) {
563 struct intel_iommu *iommu = drhd->iommu;
565 if (!ecap_ir_support(iommu->ecap))
568 if (eim && !ecap_eim_support(iommu->ecap)) {
569 printk(KERN_INFO "DRHD %Lx: EIM not supported by DRHD, "
570 " ecap %Lx\n", drhd->reg_base_addr, iommu->ecap);
576 * Enable queued invalidation for all the DRHD's.
578 for_each_drhd_unit(drhd) {
580 struct intel_iommu *iommu = drhd->iommu;
581 ret = dmar_enable_qi(iommu);
584 printk(KERN_ERR "DRHD %Lx: failed to enable queued, "
585 " invalidation, ecap %Lx, ret %d\n",
586 drhd->reg_base_addr, iommu->ecap, ret);
592 * Setup Interrupt-remapping for all the DRHD's now.
594 for_each_drhd_unit(drhd) {
595 struct intel_iommu *iommu = drhd->iommu;
597 if (!ecap_ir_support(iommu->ecap))
600 if (setup_intr_remapping(iommu, eim))
609 intr_remapping_enabled = 1;
615 * handle error condition gracefully here!
620 static int ir_parse_ioapic_scope(struct acpi_dmar_header *header,
621 struct intel_iommu *iommu)
623 struct acpi_dmar_hardware_unit *drhd;
624 struct acpi_dmar_device_scope *scope;
627 drhd = (struct acpi_dmar_hardware_unit *)header;
629 start = (void *)(drhd + 1);
630 end = ((void *)drhd) + header->length;
632 while (start < end) {
634 if (scope->entry_type == ACPI_DMAR_SCOPE_TYPE_IOAPIC) {
635 if (ir_ioapic_num == MAX_IO_APICS) {
636 printk(KERN_WARNING "Exceeded Max IO APICS\n");
640 printk(KERN_INFO "IOAPIC id %d under DRHD base"
641 " 0x%Lx\n", scope->enumeration_id,
644 ir_ioapic[ir_ioapic_num].iommu = iommu;
645 ir_ioapic[ir_ioapic_num].id = scope->enumeration_id;
648 start += scope->length;
655 * Finds the assocaition between IOAPIC's and its Interrupt-remapping
658 int __init parse_ioapics_under_ir(void)
660 struct dmar_drhd_unit *drhd;
661 int ir_supported = 0;
663 for_each_drhd_unit(drhd) {
664 struct intel_iommu *iommu = drhd->iommu;
666 if (ecap_ir_support(iommu->ecap)) {
667 if (ir_parse_ioapic_scope(drhd->hdr, iommu))
674 if (ir_supported && ir_ioapic_num != nr_ioapics) {
676 "Not all IO-APIC's listed under remapping hardware\n");
683 void disable_intr_remapping(void)
685 struct dmar_drhd_unit *drhd;
686 struct intel_iommu *iommu = NULL;
689 * Disable Interrupt-remapping for all the DRHD's now.
691 for_each_iommu(iommu, drhd) {
692 if (!ecap_ir_support(iommu->ecap))
695 iommu_disable_intr_remapping(iommu);
699 int reenable_intr_remapping(int eim)
701 struct dmar_drhd_unit *drhd;
703 struct intel_iommu *iommu = NULL;
705 for_each_iommu(iommu, drhd)
707 dmar_reenable_qi(iommu);
710 * Setup Interrupt-remapping for all the DRHD's now.
712 for_each_iommu(iommu, drhd) {
713 if (!ecap_ir_support(iommu->ecap))
716 /* Set up interrupt remapping for iommu.*/
717 iommu_set_intr_remapping(iommu, eim);
728 * handle error condition gracefully here!