1 // SPDX-License-Identifier: GPL-2.0
3 #define pr_fmt(fmt) "DMAR-IR: " fmt
5 #include <linux/interrupt.h>
6 #include <linux/dmar.h>
7 #include <linux/spinlock.h>
8 #include <linux/slab.h>
9 #include <linux/jiffies.h>
10 #include <linux/hpet.h>
11 #include <linux/pci.h>
12 #include <linux/irq.h>
13 #include <linux/acpi.h>
14 #include <linux/irqdomain.h>
15 #include <linux/crash_dump.h>
16 #include <asm/io_apic.h>
20 #include <asm/irq_remapping.h>
21 #include <asm/pci-direct.h>
24 #include "../irq_remapping.h"
25 #include "cap_audit.h"
33 struct intel_iommu *iommu;
35 unsigned int bus; /* PCI bus number */
36 unsigned int devfn; /* PCI devfn number */
40 struct intel_iommu *iommu;
47 struct intel_iommu *iommu;
54 struct intel_ir_data {
55 struct irq_2_iommu irq_2_iommu;
56 struct irte irte_entry;
58 struct msi_msg msi_entry;
62 #define IR_X2APIC_MODE(mode) (mode ? (1 << 11) : 0)
63 #define IRTE_DEST(dest) ((eim_mode) ? dest : dest << 8)
65 static int __read_mostly eim_mode;
66 static struct ioapic_scope ir_ioapic[MAX_IO_APICS];
67 static struct hpet_scope ir_hpet[MAX_HPET_TBS];
74 * ->iommu->register_lock
76 * intel_irq_remap_ops.{supported,prepare,enable,disable,reenable} are called
77 * in single-threaded environment with interrupt disabled, so no need to tabke
78 * the dmar_global_lock.
80 DEFINE_RAW_SPINLOCK(irq_2_ir_lock);
81 static const struct irq_domain_ops intel_ir_domain_ops;
83 static void iommu_disable_irq_remapping(struct intel_iommu *iommu);
84 static int __init parse_ioapics_under_ir(void);
85 static const struct msi_parent_ops dmar_msi_parent_ops, virt_dmar_msi_parent_ops;
87 static bool ir_pre_enabled(struct intel_iommu *iommu)
89 return (iommu->flags & VTD_FLAG_IRQ_REMAP_PRE_ENABLED);
92 static void clear_ir_pre_enabled(struct intel_iommu *iommu)
94 iommu->flags &= ~VTD_FLAG_IRQ_REMAP_PRE_ENABLED;
97 static void init_ir_status(struct intel_iommu *iommu)
101 gsts = readl(iommu->reg + DMAR_GSTS_REG);
102 if (gsts & DMA_GSTS_IRES)
103 iommu->flags |= VTD_FLAG_IRQ_REMAP_PRE_ENABLED;
106 static int alloc_irte(struct intel_iommu *iommu,
107 struct irq_2_iommu *irq_iommu, u16 count)
109 struct ir_table *table = iommu->ir_table;
110 unsigned int mask = 0;
114 if (!count || !irq_iommu)
118 count = __roundup_pow_of_two(count);
122 if (mask > ecap_max_handle_mask(iommu->ecap)) {
123 pr_err("Requested mask %x exceeds the max invalidation handle"
124 " mask value %Lx\n", mask,
125 ecap_max_handle_mask(iommu->ecap));
129 raw_spin_lock_irqsave(&irq_2_ir_lock, flags);
130 index = bitmap_find_free_region(table->bitmap,
131 INTR_REMAP_TABLE_ENTRIES, mask);
133 pr_warn("IR%d: can't allocate an IRTE\n", iommu->seq_id);
135 irq_iommu->iommu = iommu;
136 irq_iommu->irte_index = index;
137 irq_iommu->sub_handle = 0;
138 irq_iommu->irte_mask = mask;
139 irq_iommu->mode = IRQ_REMAPPING;
141 raw_spin_unlock_irqrestore(&irq_2_ir_lock, flags);
146 static int qi_flush_iec(struct intel_iommu *iommu, int index, int mask)
150 desc.qw0 = QI_IEC_IIDEX(index) | QI_IEC_TYPE | QI_IEC_IM(mask)
156 return qi_submit_sync(iommu, &desc, 1, 0);
159 static int modify_irte(struct irq_2_iommu *irq_iommu,
160 struct irte *irte_modified)
162 struct intel_iommu *iommu;
170 raw_spin_lock_irqsave(&irq_2_ir_lock, flags);
172 iommu = irq_iommu->iommu;
174 index = irq_iommu->irte_index + irq_iommu->sub_handle;
175 irte = &iommu->ir_table->base[index];
177 if ((irte->pst == 1) || (irte_modified->pst == 1)) {
180 ret = cmpxchg_double(&irte->low, &irte->high,
181 irte->low, irte->high,
182 irte_modified->low, irte_modified->high);
184 * We use cmpxchg16 to atomically update the 128-bit IRTE,
185 * and it cannot be updated by the hardware or other processors
186 * behind us, so the return value of cmpxchg16 should be the
187 * same as the old value.
191 WRITE_ONCE(irte->low, irte_modified->low);
192 WRITE_ONCE(irte->high, irte_modified->high);
194 __iommu_flush_cache(iommu, irte, sizeof(*irte));
196 rc = qi_flush_iec(iommu, index, 0);
198 /* Update iommu mode according to the IRTE mode */
199 irq_iommu->mode = irte->pst ? IRQ_POSTING : IRQ_REMAPPING;
200 raw_spin_unlock_irqrestore(&irq_2_ir_lock, flags);
205 static struct intel_iommu *map_hpet_to_iommu(u8 hpet_id)
209 for (i = 0; i < MAX_HPET_TBS; i++) {
210 if (ir_hpet[i].id == hpet_id && ir_hpet[i].iommu)
211 return ir_hpet[i].iommu;
216 static struct intel_iommu *map_ioapic_to_iommu(int apic)
220 for (i = 0; i < MAX_IO_APICS; i++) {
221 if (ir_ioapic[i].id == apic && ir_ioapic[i].iommu)
222 return ir_ioapic[i].iommu;
227 static struct irq_domain *map_dev_to_ir(struct pci_dev *dev)
229 struct dmar_drhd_unit *drhd = dmar_find_matched_drhd_unit(dev);
231 return drhd ? drhd->iommu->ir_domain : NULL;
234 static int clear_entries(struct irq_2_iommu *irq_iommu)
236 struct irte *start, *entry, *end;
237 struct intel_iommu *iommu;
240 if (irq_iommu->sub_handle)
243 iommu = irq_iommu->iommu;
244 index = irq_iommu->irte_index;
246 start = iommu->ir_table->base + index;
247 end = start + (1 << irq_iommu->irte_mask);
249 for (entry = start; entry < end; entry++) {
250 WRITE_ONCE(entry->low, 0);
251 WRITE_ONCE(entry->high, 0);
253 bitmap_release_region(iommu->ir_table->bitmap, index,
254 irq_iommu->irte_mask);
256 return qi_flush_iec(iommu, index, irq_iommu->irte_mask);
260 * source validation type
262 #define SVT_NO_VERIFY 0x0 /* no verification is required */
263 #define SVT_VERIFY_SID_SQ 0x1 /* verify using SID and SQ fields */
264 #define SVT_VERIFY_BUS 0x2 /* verify bus of request-id */
267 * source-id qualifier
269 #define SQ_ALL_16 0x0 /* verify all 16 bits of request-id */
270 #define SQ_13_IGNORE_1 0x1 /* verify most significant 13 bits, ignore
271 * the third least significant bit
273 #define SQ_13_IGNORE_2 0x2 /* verify most significant 13 bits, ignore
274 * the second and third least significant bits
276 #define SQ_13_IGNORE_3 0x3 /* verify most significant 13 bits, ignore
277 * the least three significant bits
281 * set SVT, SQ and SID fields of irte to verify
282 * source ids of interrupt requests
284 static void set_irte_sid(struct irte *irte, unsigned int svt,
285 unsigned int sq, unsigned int sid)
287 if (disable_sourceid_checking)
295 * Set an IRTE to match only the bus number. Interrupt requests that reference
296 * this IRTE must have a requester-id whose bus number is between or equal
297 * to the start_bus and end_bus arguments.
299 static void set_irte_verify_bus(struct irte *irte, unsigned int start_bus,
300 unsigned int end_bus)
302 set_irte_sid(irte, SVT_VERIFY_BUS, SQ_ALL_16,
303 (start_bus << 8) | end_bus);
306 static int set_ioapic_sid(struct irte *irte, int apic)
314 down_read(&dmar_global_lock);
315 for (i = 0; i < MAX_IO_APICS; i++) {
316 if (ir_ioapic[i].iommu && ir_ioapic[i].id == apic) {
317 sid = (ir_ioapic[i].bus << 8) | ir_ioapic[i].devfn;
321 up_read(&dmar_global_lock);
324 pr_warn("Failed to set source-id of IOAPIC (%d)\n", apic);
328 set_irte_sid(irte, SVT_VERIFY_SID_SQ, SQ_ALL_16, sid);
333 static int set_hpet_sid(struct irte *irte, u8 id)
341 down_read(&dmar_global_lock);
342 for (i = 0; i < MAX_HPET_TBS; i++) {
343 if (ir_hpet[i].iommu && ir_hpet[i].id == id) {
344 sid = (ir_hpet[i].bus << 8) | ir_hpet[i].devfn;
348 up_read(&dmar_global_lock);
351 pr_warn("Failed to set source-id of HPET block (%d)\n", id);
356 * Should really use SQ_ALL_16. Some platforms are broken.
357 * While we figure out the right quirks for these broken platforms, use
358 * SQ_13_IGNORE_3 for now.
360 set_irte_sid(irte, SVT_VERIFY_SID_SQ, SQ_13_IGNORE_3, sid);
365 struct set_msi_sid_data {
366 struct pci_dev *pdev;
372 static int set_msi_sid_cb(struct pci_dev *pdev, u16 alias, void *opaque)
374 struct set_msi_sid_data *data = opaque;
376 if (data->count == 0 || PCI_BUS_NUM(alias) == PCI_BUS_NUM(data->alias))
377 data->busmatch_count++;
386 static int set_msi_sid(struct irte *irte, struct pci_dev *dev)
388 struct set_msi_sid_data data;
394 data.busmatch_count = 0;
395 pci_for_each_dma_alias(dev, set_msi_sid_cb, &data);
398 * DMA alias provides us with a PCI device and alias. The only case
399 * where the it will return an alias on a different bus than the
400 * device is the case of a PCIe-to-PCI bridge, where the alias is for
401 * the subordinate bus. In this case we can only verify the bus.
403 * If there are multiple aliases, all with the same bus number,
404 * then all we can do is verify the bus. This is typical in NTB
405 * hardware which use proxy IDs where the device will generate traffic
406 * from multiple devfn numbers on the same bus.
408 * If the alias device is on a different bus than our source device
409 * then we have a topology based alias, use it.
411 * Otherwise, the alias is for a device DMA quirk and we cannot
412 * assume that MSI uses the same requester ID. Therefore use the
415 if (PCI_BUS_NUM(data.alias) != data.pdev->bus->number)
416 set_irte_verify_bus(irte, PCI_BUS_NUM(data.alias),
418 else if (data.count >= 2 && data.busmatch_count == data.count)
419 set_irte_verify_bus(irte, dev->bus->number, dev->bus->number);
420 else if (data.pdev->bus->number != dev->bus->number)
421 set_irte_sid(irte, SVT_VERIFY_SID_SQ, SQ_ALL_16, data.alias);
423 set_irte_sid(irte, SVT_VERIFY_SID_SQ, SQ_ALL_16,
429 static int iommu_load_old_irte(struct intel_iommu *iommu)
431 struct irte *old_ir_table;
432 phys_addr_t irt_phys;
437 /* Check whether the old ir-table has the same size as ours */
438 irta = dmar_readq(iommu->reg + DMAR_IRTA_REG);
439 if ((irta & INTR_REMAP_TABLE_REG_SIZE_MASK)
440 != INTR_REMAP_TABLE_REG_SIZE)
443 irt_phys = irta & VTD_PAGE_MASK;
444 size = INTR_REMAP_TABLE_ENTRIES*sizeof(struct irte);
446 /* Map the old IR table */
447 old_ir_table = memremap(irt_phys, size, MEMREMAP_WB);
452 memcpy(iommu->ir_table->base, old_ir_table, size);
454 __iommu_flush_cache(iommu, iommu->ir_table->base, size);
457 * Now check the table for used entries and mark those as
458 * allocated in the bitmap
460 for (i = 0; i < INTR_REMAP_TABLE_ENTRIES; i++) {
461 if (iommu->ir_table->base[i].present)
462 bitmap_set(iommu->ir_table->bitmap, i, 1);
465 memunmap(old_ir_table);
471 static void iommu_set_irq_remapping(struct intel_iommu *iommu, int mode)
477 addr = virt_to_phys((void *)iommu->ir_table->base);
479 raw_spin_lock_irqsave(&iommu->register_lock, flags);
481 dmar_writeq(iommu->reg + DMAR_IRTA_REG,
482 (addr) | IR_X2APIC_MODE(mode) | INTR_REMAP_TABLE_REG_SIZE);
484 /* Set interrupt-remapping table pointer */
485 writel(iommu->gcmd | DMA_GCMD_SIRTP, iommu->reg + DMAR_GCMD_REG);
487 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
488 readl, (sts & DMA_GSTS_IRTPS), sts);
489 raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
492 * Global invalidation of interrupt entry cache to make sure the
493 * hardware uses the new irq remapping table.
495 if (!cap_esirtps(iommu->cap))
496 qi_global_iec(iommu);
499 static void iommu_enable_irq_remapping(struct intel_iommu *iommu)
504 raw_spin_lock_irqsave(&iommu->register_lock, flags);
506 /* Enable interrupt-remapping */
507 iommu->gcmd |= DMA_GCMD_IRE;
508 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
509 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
510 readl, (sts & DMA_GSTS_IRES), sts);
512 /* Block compatibility-format MSIs */
513 if (sts & DMA_GSTS_CFIS) {
514 iommu->gcmd &= ~DMA_GCMD_CFI;
515 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
516 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
517 readl, !(sts & DMA_GSTS_CFIS), sts);
521 * With CFI clear in the Global Command register, we should be
522 * protected from dangerous (i.e. compatibility) interrupts
523 * regardless of x2apic status. Check just to be sure.
525 if (sts & DMA_GSTS_CFIS)
527 "Compatibility-format IRQs enabled despite intr remapping;\n"
528 "you are vulnerable to IRQ injection.\n");
530 raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
533 static int intel_setup_irq_remapping(struct intel_iommu *iommu)
535 struct ir_table *ir_table;
536 struct fwnode_handle *fn;
537 unsigned long *bitmap;
543 ir_table = kzalloc(sizeof(struct ir_table), GFP_KERNEL);
547 pages = alloc_pages_node(iommu->node, GFP_KERNEL | __GFP_ZERO,
548 INTR_REMAP_PAGE_ORDER);
550 pr_err("IR%d: failed to allocate pages of order %d\n",
551 iommu->seq_id, INTR_REMAP_PAGE_ORDER);
555 bitmap = bitmap_zalloc(INTR_REMAP_TABLE_ENTRIES, GFP_ATOMIC);
556 if (bitmap == NULL) {
557 pr_err("IR%d: failed to allocate bitmap\n", iommu->seq_id);
561 fn = irq_domain_alloc_named_id_fwnode("INTEL-IR", iommu->seq_id);
563 goto out_free_bitmap;
566 irq_domain_create_hierarchy(arch_get_ir_parent_domain(),
567 0, INTR_REMAP_TABLE_ENTRIES,
568 fn, &intel_ir_domain_ops,
570 if (!iommu->ir_domain) {
571 pr_err("IR%d: failed to allocate irqdomain\n", iommu->seq_id);
572 goto out_free_fwnode;
575 irq_domain_update_bus_token(iommu->ir_domain, DOMAIN_BUS_DMAR);
576 iommu->ir_domain->flags |= IRQ_DOMAIN_FLAG_MSI_PARENT;
578 if (cap_caching_mode(iommu->cap))
579 iommu->ir_domain->msi_parent_ops = &virt_dmar_msi_parent_ops;
581 iommu->ir_domain->msi_parent_ops = &dmar_msi_parent_ops;
583 ir_table->base = page_address(pages);
584 ir_table->bitmap = bitmap;
585 iommu->ir_table = ir_table;
588 * If the queued invalidation is already initialized,
589 * shouldn't disable it.
593 * Clear previous faults.
595 dmar_fault(-1, iommu);
596 dmar_disable_qi(iommu);
598 if (dmar_enable_qi(iommu)) {
599 pr_err("Failed to enable queued invalidation\n");
600 goto out_free_ir_domain;
604 init_ir_status(iommu);
606 if (ir_pre_enabled(iommu)) {
607 if (!is_kdump_kernel()) {
608 pr_warn("IRQ remapping was enabled on %s but we are not in kdump mode\n",
610 clear_ir_pre_enabled(iommu);
611 iommu_disable_irq_remapping(iommu);
612 } else if (iommu_load_old_irte(iommu))
613 pr_err("Failed to copy IR table for %s from previous kernel\n",
616 pr_info("Copied IR table for %s from previous kernel\n",
620 iommu_set_irq_remapping(iommu, eim_mode);
625 irq_domain_remove(iommu->ir_domain);
626 iommu->ir_domain = NULL;
628 irq_domain_free_fwnode(fn);
632 __free_pages(pages, INTR_REMAP_PAGE_ORDER);
636 iommu->ir_table = NULL;
641 static void intel_teardown_irq_remapping(struct intel_iommu *iommu)
643 struct fwnode_handle *fn;
645 if (iommu && iommu->ir_table) {
646 if (iommu->ir_domain) {
647 fn = iommu->ir_domain->fwnode;
649 irq_domain_remove(iommu->ir_domain);
650 irq_domain_free_fwnode(fn);
651 iommu->ir_domain = NULL;
653 free_pages((unsigned long)iommu->ir_table->base,
654 INTR_REMAP_PAGE_ORDER);
655 bitmap_free(iommu->ir_table->bitmap);
656 kfree(iommu->ir_table);
657 iommu->ir_table = NULL;
662 * Disable Interrupt Remapping.
664 static void iommu_disable_irq_remapping(struct intel_iommu *iommu)
669 if (!ecap_ir_support(iommu->ecap))
673 * global invalidation of interrupt entry cache before disabling
674 * interrupt-remapping.
676 if (!cap_esirtps(iommu->cap))
677 qi_global_iec(iommu);
679 raw_spin_lock_irqsave(&iommu->register_lock, flags);
681 sts = readl(iommu->reg + DMAR_GSTS_REG);
682 if (!(sts & DMA_GSTS_IRES))
685 iommu->gcmd &= ~DMA_GCMD_IRE;
686 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
688 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
689 readl, !(sts & DMA_GSTS_IRES), sts);
692 raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
695 static int __init dmar_x2apic_optout(void)
697 struct acpi_table_dmar *dmar;
698 dmar = (struct acpi_table_dmar *)dmar_tbl;
699 if (!dmar || no_x2apic_optout)
701 return dmar->flags & DMAR_X2APIC_OPT_OUT;
704 static void __init intel_cleanup_irq_remapping(void)
706 struct dmar_drhd_unit *drhd;
707 struct intel_iommu *iommu;
709 for_each_iommu(iommu, drhd) {
710 if (ecap_ir_support(iommu->ecap)) {
711 iommu_disable_irq_remapping(iommu);
712 intel_teardown_irq_remapping(iommu);
716 if (x2apic_supported())
717 pr_warn("Failed to enable irq remapping. You are vulnerable to irq-injection attacks.\n");
720 static int __init intel_prepare_irq_remapping(void)
722 struct dmar_drhd_unit *drhd;
723 struct intel_iommu *iommu;
726 if (irq_remap_broken) {
727 pr_warn("This system BIOS has enabled interrupt remapping\n"
728 "on a chipset that contains an erratum making that\n"
729 "feature unstable. To maintain system stability\n"
730 "interrupt remapping is being disabled. Please\n"
731 "contact your BIOS vendor for an update\n");
732 add_taint(TAINT_FIRMWARE_WORKAROUND, LOCKDEP_STILL_OK);
736 if (dmar_table_init() < 0)
739 if (intel_cap_audit(CAP_AUDIT_STATIC_IRQR, NULL))
742 if (!dmar_ir_support())
745 if (parse_ioapics_under_ir()) {
746 pr_info("Not enabling interrupt remapping\n");
750 /* First make sure all IOMMUs support IRQ remapping */
751 for_each_iommu(iommu, drhd)
752 if (!ecap_ir_support(iommu->ecap))
755 /* Detect remapping mode: lapic or x2apic */
756 if (x2apic_supported()) {
757 eim = !dmar_x2apic_optout();
759 pr_info("x2apic is disabled because BIOS sets x2apic opt out bit.");
760 pr_info("Use 'intremap=no_x2apic_optout' to override the BIOS setting.\n");
764 for_each_iommu(iommu, drhd) {
765 if (eim && !ecap_eim_support(iommu->ecap)) {
766 pr_info("%s does not support EIM\n", iommu->name);
773 pr_info("Queued invalidation will be enabled to support x2apic and Intr-remapping.\n");
775 /* Do the initializations early */
776 for_each_iommu(iommu, drhd) {
777 if (intel_setup_irq_remapping(iommu)) {
778 pr_err("Failed to setup irq remapping for %s\n",
787 intel_cleanup_irq_remapping();
792 * Set Posted-Interrupts capability.
794 static inline void set_irq_posting_cap(void)
796 struct dmar_drhd_unit *drhd;
797 struct intel_iommu *iommu;
799 if (!disable_irq_post) {
801 * If IRTE is in posted format, the 'pda' field goes across the
802 * 64-bit boundary, we need use cmpxchg16b to atomically update
803 * it. We only expose posted-interrupt when X86_FEATURE_CX16
804 * is supported. Actually, hardware platforms supporting PI
805 * should have X86_FEATURE_CX16 support, this has been confirmed
806 * with Intel hardware guys.
808 if (boot_cpu_has(X86_FEATURE_CX16))
809 intel_irq_remap_ops.capability |= 1 << IRQ_POSTING_CAP;
811 for_each_iommu(iommu, drhd)
812 if (!cap_pi_support(iommu->cap)) {
813 intel_irq_remap_ops.capability &=
814 ~(1 << IRQ_POSTING_CAP);
820 static int __init intel_enable_irq_remapping(void)
822 struct dmar_drhd_unit *drhd;
823 struct intel_iommu *iommu;
827 * Setup Interrupt-remapping for all the DRHD's now.
829 for_each_iommu(iommu, drhd) {
830 if (!ir_pre_enabled(iommu))
831 iommu_enable_irq_remapping(iommu);
838 irq_remapping_enabled = 1;
840 set_irq_posting_cap();
842 pr_info("Enabled IRQ remapping in %s mode\n", eim_mode ? "x2apic" : "xapic");
844 return eim_mode ? IRQ_REMAP_X2APIC_MODE : IRQ_REMAP_XAPIC_MODE;
847 intel_cleanup_irq_remapping();
851 static int ir_parse_one_hpet_scope(struct acpi_dmar_device_scope *scope,
852 struct intel_iommu *iommu,
853 struct acpi_dmar_hardware_unit *drhd)
855 struct acpi_dmar_pci_path *path;
857 int count, free = -1;
860 path = (struct acpi_dmar_pci_path *)(scope + 1);
861 count = (scope->length - sizeof(struct acpi_dmar_device_scope))
862 / sizeof(struct acpi_dmar_pci_path);
864 while (--count > 0) {
866 * Access PCI directly due to the PCI
867 * subsystem isn't initialized yet.
869 bus = read_pci_config_byte(bus, path->device, path->function,
874 for (count = 0; count < MAX_HPET_TBS; count++) {
875 if (ir_hpet[count].iommu == iommu &&
876 ir_hpet[count].id == scope->enumeration_id)
878 else if (ir_hpet[count].iommu == NULL && free == -1)
882 pr_warn("Exceeded Max HPET blocks\n");
886 ir_hpet[free].iommu = iommu;
887 ir_hpet[free].id = scope->enumeration_id;
888 ir_hpet[free].bus = bus;
889 ir_hpet[free].devfn = PCI_DEVFN(path->device, path->function);
890 pr_info("HPET id %d under DRHD base 0x%Lx\n",
891 scope->enumeration_id, drhd->address);
896 static int ir_parse_one_ioapic_scope(struct acpi_dmar_device_scope *scope,
897 struct intel_iommu *iommu,
898 struct acpi_dmar_hardware_unit *drhd)
900 struct acpi_dmar_pci_path *path;
902 int count, free = -1;
905 path = (struct acpi_dmar_pci_path *)(scope + 1);
906 count = (scope->length - sizeof(struct acpi_dmar_device_scope))
907 / sizeof(struct acpi_dmar_pci_path);
909 while (--count > 0) {
911 * Access PCI directly due to the PCI
912 * subsystem isn't initialized yet.
914 bus = read_pci_config_byte(bus, path->device, path->function,
919 for (count = 0; count < MAX_IO_APICS; count++) {
920 if (ir_ioapic[count].iommu == iommu &&
921 ir_ioapic[count].id == scope->enumeration_id)
923 else if (ir_ioapic[count].iommu == NULL && free == -1)
927 pr_warn("Exceeded Max IO APICS\n");
931 ir_ioapic[free].bus = bus;
932 ir_ioapic[free].devfn = PCI_DEVFN(path->device, path->function);
933 ir_ioapic[free].iommu = iommu;
934 ir_ioapic[free].id = scope->enumeration_id;
935 pr_info("IOAPIC id %d under DRHD base 0x%Lx IOMMU %d\n",
936 scope->enumeration_id, drhd->address, iommu->seq_id);
941 static int ir_parse_ioapic_hpet_scope(struct acpi_dmar_header *header,
942 struct intel_iommu *iommu)
945 struct acpi_dmar_hardware_unit *drhd;
946 struct acpi_dmar_device_scope *scope;
949 drhd = (struct acpi_dmar_hardware_unit *)header;
950 start = (void *)(drhd + 1);
951 end = ((void *)drhd) + header->length;
953 while (start < end && ret == 0) {
955 if (scope->entry_type == ACPI_DMAR_SCOPE_TYPE_IOAPIC)
956 ret = ir_parse_one_ioapic_scope(scope, iommu, drhd);
957 else if (scope->entry_type == ACPI_DMAR_SCOPE_TYPE_HPET)
958 ret = ir_parse_one_hpet_scope(scope, iommu, drhd);
959 start += scope->length;
965 static void ir_remove_ioapic_hpet_scope(struct intel_iommu *iommu)
969 for (i = 0; i < MAX_HPET_TBS; i++)
970 if (ir_hpet[i].iommu == iommu)
971 ir_hpet[i].iommu = NULL;
973 for (i = 0; i < MAX_IO_APICS; i++)
974 if (ir_ioapic[i].iommu == iommu)
975 ir_ioapic[i].iommu = NULL;
979 * Finds the assocaition between IOAPIC's and its Interrupt-remapping
982 static int __init parse_ioapics_under_ir(void)
984 struct dmar_drhd_unit *drhd;
985 struct intel_iommu *iommu;
986 bool ir_supported = false;
989 for_each_iommu(iommu, drhd) {
992 if (!ecap_ir_support(iommu->ecap))
995 ret = ir_parse_ioapic_hpet_scope(drhd->hdr, iommu);
1005 for (ioapic_idx = 0; ioapic_idx < nr_ioapics; ioapic_idx++) {
1006 int ioapic_id = mpc_ioapic_id(ioapic_idx);
1007 if (!map_ioapic_to_iommu(ioapic_id)) {
1008 pr_err(FW_BUG "ioapic %d has no mapping iommu, "
1009 "interrupt remapping will be disabled\n",
1018 static int __init ir_dev_scope_init(void)
1022 if (!irq_remapping_enabled)
1025 down_write(&dmar_global_lock);
1026 ret = dmar_dev_scope_init();
1027 up_write(&dmar_global_lock);
1031 rootfs_initcall(ir_dev_scope_init);
1033 static void disable_irq_remapping(void)
1035 struct dmar_drhd_unit *drhd;
1036 struct intel_iommu *iommu = NULL;
1039 * Disable Interrupt-remapping for all the DRHD's now.
1041 for_each_iommu(iommu, drhd) {
1042 if (!ecap_ir_support(iommu->ecap))
1045 iommu_disable_irq_remapping(iommu);
1049 * Clear Posted-Interrupts capability.
1051 if (!disable_irq_post)
1052 intel_irq_remap_ops.capability &= ~(1 << IRQ_POSTING_CAP);
1055 static int reenable_irq_remapping(int eim)
1057 struct dmar_drhd_unit *drhd;
1059 struct intel_iommu *iommu = NULL;
1061 for_each_iommu(iommu, drhd)
1063 dmar_reenable_qi(iommu);
1066 * Setup Interrupt-remapping for all the DRHD's now.
1068 for_each_iommu(iommu, drhd) {
1069 if (!ecap_ir_support(iommu->ecap))
1072 /* Set up interrupt remapping for iommu.*/
1073 iommu_set_irq_remapping(iommu, eim);
1074 iommu_enable_irq_remapping(iommu);
1081 set_irq_posting_cap();
1087 * handle error condition gracefully here!
1093 * Store the MSI remapping domain pointer in the device if enabled.
1095 * This is called from dmar_pci_bus_add_dev() so it works even when DMA
1096 * remapping is disabled. Only update the pointer if the device is not
1097 * already handled by a non default PCI/MSI interrupt domain. This protects
1100 void intel_irq_remap_add_device(struct dmar_pci_notify_info *info)
1102 if (!irq_remapping_enabled || !pci_dev_has_default_msi_parent_domain(info->dev))
1105 dev_set_msi_domain(&info->dev->dev, map_dev_to_ir(info->dev));
1108 static void prepare_irte(struct irte *irte, int vector, unsigned int dest)
1110 memset(irte, 0, sizeof(*irte));
1113 irte->dst_mode = apic->dest_mode_logical;
1115 * Trigger mode in the IRTE will always be edge, and for IO-APIC, the
1116 * actual level or edge trigger will be setup in the IO-APIC
1117 * RTE. This will help simplify level triggered irq migration.
1118 * For more details, see the comments (in io_apic.c) explainig IO-APIC
1119 * irq migration in the presence of interrupt-remapping.
1121 irte->trigger_mode = 0;
1122 irte->dlvry_mode = apic->delivery_mode;
1123 irte->vector = vector;
1124 irte->dest_id = IRTE_DEST(dest);
1125 irte->redir_hint = 1;
1128 struct irq_remap_ops intel_irq_remap_ops = {
1129 .prepare = intel_prepare_irq_remapping,
1130 .enable = intel_enable_irq_remapping,
1131 .disable = disable_irq_remapping,
1132 .reenable = reenable_irq_remapping,
1133 .enable_faulting = enable_drhd_fault_handling,
1136 static void intel_ir_reconfigure_irte(struct irq_data *irqd, bool force)
1138 struct intel_ir_data *ir_data = irqd->chip_data;
1139 struct irte *irte = &ir_data->irte_entry;
1140 struct irq_cfg *cfg = irqd_cfg(irqd);
1143 * Atomically updates the IRTE with the new destination, vector
1144 * and flushes the interrupt entry cache.
1146 irte->vector = cfg->vector;
1147 irte->dest_id = IRTE_DEST(cfg->dest_apicid);
1149 /* Update the hardware only if the interrupt is in remapped mode. */
1150 if (force || ir_data->irq_2_iommu.mode == IRQ_REMAPPING)
1151 modify_irte(&ir_data->irq_2_iommu, irte);
1155 * Migrate the IO-APIC irq in the presence of intr-remapping.
1157 * For both level and edge triggered, irq migration is a simple atomic
1158 * update(of vector and cpu destination) of IRTE and flush the hardware cache.
1160 * For level triggered, we eliminate the io-apic RTE modification (with the
1161 * updated vector information), by using a virtual vector (io-apic pin number).
1162 * Real vector that is used for interrupting cpu will be coming from
1163 * the interrupt-remapping table entry.
1165 * As the migration is a simple atomic update of IRTE, the same mechanism
1166 * is used to migrate MSI irq's in the presence of interrupt-remapping.
1169 intel_ir_set_affinity(struct irq_data *data, const struct cpumask *mask,
1172 struct irq_data *parent = data->parent_data;
1173 struct irq_cfg *cfg = irqd_cfg(data);
1176 ret = parent->chip->irq_set_affinity(parent, mask, force);
1177 if (ret < 0 || ret == IRQ_SET_MASK_OK_DONE)
1180 intel_ir_reconfigure_irte(data, false);
1182 * After this point, all the interrupts will start arriving
1183 * at the new destination. So, time to cleanup the previous
1184 * vector allocation.
1186 send_cleanup_vector(cfg);
1188 return IRQ_SET_MASK_OK_DONE;
1191 static void intel_ir_compose_msi_msg(struct irq_data *irq_data,
1192 struct msi_msg *msg)
1194 struct intel_ir_data *ir_data = irq_data->chip_data;
1196 *msg = ir_data->msi_entry;
1199 static int intel_ir_set_vcpu_affinity(struct irq_data *data, void *info)
1201 struct intel_ir_data *ir_data = data->chip_data;
1202 struct vcpu_data *vcpu_pi_info = info;
1204 /* stop posting interrupts, back to remapping mode */
1205 if (!vcpu_pi_info) {
1206 modify_irte(&ir_data->irq_2_iommu, &ir_data->irte_entry);
1208 struct irte irte_pi;
1211 * We are not caching the posted interrupt entry. We
1212 * copy the data from the remapped entry and modify
1213 * the fields which are relevant for posted mode. The
1214 * cached remapped entry is used for switching back to
1217 memset(&irte_pi, 0, sizeof(irte_pi));
1218 dmar_copy_shared_irte(&irte_pi, &ir_data->irte_entry);
1220 /* Update the posted mode fields */
1222 irte_pi.p_urgent = 0;
1223 irte_pi.p_vector = vcpu_pi_info->vector;
1224 irte_pi.pda_l = (vcpu_pi_info->pi_desc_addr >>
1225 (32 - PDA_LOW_BIT)) & ~(-1UL << PDA_LOW_BIT);
1226 irte_pi.pda_h = (vcpu_pi_info->pi_desc_addr >> 32) &
1227 ~(-1UL << PDA_HIGH_BIT);
1229 modify_irte(&ir_data->irq_2_iommu, &irte_pi);
1235 static struct irq_chip intel_ir_chip = {
1237 .irq_ack = apic_ack_irq,
1238 .irq_set_affinity = intel_ir_set_affinity,
1239 .irq_compose_msi_msg = intel_ir_compose_msi_msg,
1240 .irq_set_vcpu_affinity = intel_ir_set_vcpu_affinity,
1243 static void fill_msi_msg(struct msi_msg *msg, u32 index, u32 subhandle)
1245 memset(msg, 0, sizeof(*msg));
1247 msg->arch_addr_lo.dmar_base_address = X86_MSI_BASE_ADDRESS_LOW;
1248 msg->arch_addr_lo.dmar_subhandle_valid = true;
1249 msg->arch_addr_lo.dmar_format = true;
1250 msg->arch_addr_lo.dmar_index_0_14 = index & 0x7FFF;
1251 msg->arch_addr_lo.dmar_index_15 = !!(index & 0x8000);
1253 msg->address_hi = X86_MSI_BASE_ADDRESS_HIGH;
1255 msg->arch_data.dmar_subhandle = subhandle;
1258 static void intel_irq_remapping_prepare_irte(struct intel_ir_data *data,
1259 struct irq_cfg *irq_cfg,
1260 struct irq_alloc_info *info,
1261 int index, int sub_handle)
1263 struct irte *irte = &data->irte_entry;
1265 prepare_irte(irte, irq_cfg->vector, irq_cfg->dest_apicid);
1267 switch (info->type) {
1268 case X86_IRQ_ALLOC_TYPE_IOAPIC:
1269 /* Set source-id of interrupt request */
1270 set_ioapic_sid(irte, info->devid);
1271 apic_printk(APIC_VERBOSE, KERN_DEBUG "IOAPIC[%d]: Set IRTE entry (P:%d FPD:%d Dst_Mode:%d Redir_hint:%d Trig_Mode:%d Dlvry_Mode:%X Avail:%X Vector:%02X Dest:%08X SID:%04X SQ:%X SVT:%X)\n",
1272 info->devid, irte->present, irte->fpd,
1273 irte->dst_mode, irte->redir_hint,
1274 irte->trigger_mode, irte->dlvry_mode,
1275 irte->avail, irte->vector, irte->dest_id,
1276 irte->sid, irte->sq, irte->svt);
1277 sub_handle = info->ioapic.pin;
1279 case X86_IRQ_ALLOC_TYPE_HPET:
1280 set_hpet_sid(irte, info->devid);
1282 case X86_IRQ_ALLOC_TYPE_PCI_MSI:
1283 case X86_IRQ_ALLOC_TYPE_PCI_MSIX:
1285 pci_real_dma_dev(msi_desc_to_pci_dev(info->desc)));
1291 fill_msi_msg(&data->msi_entry, index, sub_handle);
1294 static void intel_free_irq_resources(struct irq_domain *domain,
1295 unsigned int virq, unsigned int nr_irqs)
1297 struct irq_data *irq_data;
1298 struct intel_ir_data *data;
1299 struct irq_2_iommu *irq_iommu;
1300 unsigned long flags;
1302 for (i = 0; i < nr_irqs; i++) {
1303 irq_data = irq_domain_get_irq_data(domain, virq + i);
1304 if (irq_data && irq_data->chip_data) {
1305 data = irq_data->chip_data;
1306 irq_iommu = &data->irq_2_iommu;
1307 raw_spin_lock_irqsave(&irq_2_ir_lock, flags);
1308 clear_entries(irq_iommu);
1309 raw_spin_unlock_irqrestore(&irq_2_ir_lock, flags);
1310 irq_domain_reset_irq_data(irq_data);
1316 static int intel_irq_remapping_alloc(struct irq_domain *domain,
1317 unsigned int virq, unsigned int nr_irqs,
1320 struct intel_iommu *iommu = domain->host_data;
1321 struct irq_alloc_info *info = arg;
1322 struct intel_ir_data *data, *ird;
1323 struct irq_data *irq_data;
1324 struct irq_cfg *irq_cfg;
1327 if (!info || !iommu)
1329 if (nr_irqs > 1 && info->type != X86_IRQ_ALLOC_TYPE_PCI_MSI)
1332 ret = irq_domain_alloc_irqs_parent(domain, virq, nr_irqs, arg);
1337 data = kzalloc(sizeof(*data), GFP_KERNEL);
1339 goto out_free_parent;
1341 down_read(&dmar_global_lock);
1342 index = alloc_irte(iommu, &data->irq_2_iommu, nr_irqs);
1343 up_read(&dmar_global_lock);
1345 pr_warn("Failed to allocate IRTE\n");
1347 goto out_free_parent;
1350 for (i = 0; i < nr_irqs; i++) {
1351 irq_data = irq_domain_get_irq_data(domain, virq + i);
1352 irq_cfg = irqd_cfg(irq_data);
1353 if (!irq_data || !irq_cfg) {
1361 ird = kzalloc(sizeof(*ird), GFP_KERNEL);
1364 /* Initialize the common data */
1365 ird->irq_2_iommu = data->irq_2_iommu;
1366 ird->irq_2_iommu.sub_handle = i;
1371 irq_data->hwirq = (index << 16) + i;
1372 irq_data->chip_data = ird;
1373 irq_data->chip = &intel_ir_chip;
1374 intel_irq_remapping_prepare_irte(ird, irq_cfg, info, index, i);
1375 irq_set_status_flags(virq + i, IRQ_MOVE_PCNTXT);
1380 intel_free_irq_resources(domain, virq, i);
1382 irq_domain_free_irqs_common(domain, virq, nr_irqs);
1386 static void intel_irq_remapping_free(struct irq_domain *domain,
1387 unsigned int virq, unsigned int nr_irqs)
1389 intel_free_irq_resources(domain, virq, nr_irqs);
1390 irq_domain_free_irqs_common(domain, virq, nr_irqs);
1393 static int intel_irq_remapping_activate(struct irq_domain *domain,
1394 struct irq_data *irq_data, bool reserve)
1396 intel_ir_reconfigure_irte(irq_data, true);
1400 static void intel_irq_remapping_deactivate(struct irq_domain *domain,
1401 struct irq_data *irq_data)
1403 struct intel_ir_data *data = irq_data->chip_data;
1406 memset(&entry, 0, sizeof(entry));
1407 modify_irte(&data->irq_2_iommu, &entry);
1410 static int intel_irq_remapping_select(struct irq_domain *d,
1411 struct irq_fwspec *fwspec,
1412 enum irq_domain_bus_token bus_token)
1414 struct intel_iommu *iommu = NULL;
1416 if (x86_fwspec_is_ioapic(fwspec))
1417 iommu = map_ioapic_to_iommu(fwspec->param[0]);
1418 else if (x86_fwspec_is_hpet(fwspec))
1419 iommu = map_hpet_to_iommu(fwspec->param[0]);
1421 return iommu && d == iommu->ir_domain;
1424 static const struct irq_domain_ops intel_ir_domain_ops = {
1425 .select = intel_irq_remapping_select,
1426 .alloc = intel_irq_remapping_alloc,
1427 .free = intel_irq_remapping_free,
1428 .activate = intel_irq_remapping_activate,
1429 .deactivate = intel_irq_remapping_deactivate,
1432 static const struct msi_parent_ops dmar_msi_parent_ops = {
1433 .supported_flags = X86_VECTOR_MSI_FLAGS_SUPPORTED |
1434 MSI_FLAG_MULTI_PCI_MSI |
1437 .init_dev_msi_info = msi_parent_init_dev_msi_info,
1440 static const struct msi_parent_ops virt_dmar_msi_parent_ops = {
1441 .supported_flags = X86_VECTOR_MSI_FLAGS_SUPPORTED |
1442 MSI_FLAG_MULTI_PCI_MSI,
1444 .init_dev_msi_info = msi_parent_init_dev_msi_info,
1448 * Support of Interrupt Remapping Unit Hotplug
1450 static int dmar_ir_add(struct dmar_drhd_unit *dmaru, struct intel_iommu *iommu)
1453 int eim = x2apic_enabled();
1455 ret = intel_cap_audit(CAP_AUDIT_HOTPLUG_IRQR, iommu);
1459 if (eim && !ecap_eim_support(iommu->ecap)) {
1460 pr_info("DRHD %Lx: EIM not supported by DRHD, ecap %Lx\n",
1461 iommu->reg_phys, iommu->ecap);
1465 if (ir_parse_ioapic_hpet_scope(dmaru->hdr, iommu)) {
1466 pr_warn("DRHD %Lx: failed to parse managed IOAPIC/HPET\n",
1471 /* TODO: check all IOAPICs are covered by IOMMU */
1473 /* Setup Interrupt-remapping now. */
1474 ret = intel_setup_irq_remapping(iommu);
1476 pr_err("Failed to setup irq remapping for %s\n",
1478 intel_teardown_irq_remapping(iommu);
1479 ir_remove_ioapic_hpet_scope(iommu);
1481 iommu_enable_irq_remapping(iommu);
1487 int dmar_ir_hotplug(struct dmar_drhd_unit *dmaru, bool insert)
1490 struct intel_iommu *iommu = dmaru->iommu;
1492 if (!irq_remapping_enabled)
1496 if (!ecap_ir_support(iommu->ecap))
1498 if (irq_remapping_cap(IRQ_POSTING_CAP) &&
1499 !cap_pi_support(iommu->cap))
1503 if (!iommu->ir_table)
1504 ret = dmar_ir_add(dmaru, iommu);
1506 if (iommu->ir_table) {
1507 if (!bitmap_empty(iommu->ir_table->bitmap,
1508 INTR_REMAP_TABLE_ENTRIES)) {
1511 iommu_disable_irq_remapping(iommu);
1512 intel_teardown_irq_remapping(iommu);
1513 ir_remove_ioapic_hpet_scope(iommu);