2 * Copyright (c) 2006, Intel Corporation.
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
15 * Place - Suite 330, Boston, MA 02111-1307 USA.
17 * Copyright (C) 2006-2008 Intel Corporation
18 * Author: Ashok Raj <ashok.raj@intel.com>
19 * Author: Shaohua Li <shaohua.li@intel.com>
20 * Author: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
22 * This file implements early detection/parsing of Remapping Devices
23 * reported to OS through BIOS via DMA remapping reporting (DMAR) ACPI
26 * These routines are used by both DMA-remapping and Interrupt-remapping
29 #include <linux/pci.h>
30 #include <linux/dmar.h>
31 #include <linux/iova.h>
32 #include <linux/intel-iommu.h>
33 #include <linux/timer.h>
34 #include <linux/irq.h>
35 #include <linux/interrupt.h>
38 #define PREFIX "DMAR:"
40 /* No locks are needed as DMA remapping hardware unit
41 * list is constructed at boot time and hotplug of
42 * these units are not supported by the architecture.
44 LIST_HEAD(dmar_drhd_units);
46 static struct acpi_table_header * __initdata dmar_tbl;
47 static acpi_size dmar_tbl_size;
49 static void __init dmar_register_drhd_unit(struct dmar_drhd_unit *drhd)
52 * add INCLUDE_ALL at the tail, so scan the list will find it at
55 if (drhd->include_all)
56 list_add_tail(&drhd->list, &dmar_drhd_units);
58 list_add(&drhd->list, &dmar_drhd_units);
61 static int __init dmar_parse_one_dev_scope(struct acpi_dmar_device_scope *scope,
62 struct pci_dev **dev, u16 segment)
65 struct pci_dev *pdev = NULL;
66 struct acpi_dmar_pci_path *path;
69 bus = pci_find_bus(segment, scope->bus);
70 path = (struct acpi_dmar_pci_path *)(scope + 1);
71 count = (scope->length - sizeof(struct acpi_dmar_device_scope))
72 / sizeof(struct acpi_dmar_pci_path);
78 * Some BIOSes list non-exist devices in DMAR table, just
83 PREFIX "Device scope bus [%d] not found\n",
87 pdev = pci_get_slot(bus, PCI_DEVFN(path->dev, path->fn));
89 printk(KERN_WARNING PREFIX
90 "Device scope device [%04x:%02x:%02x.%02x] not found\n",
91 segment, bus->number, path->dev, path->fn);
96 bus = pdev->subordinate;
99 printk(KERN_WARNING PREFIX
100 "Device scope device [%04x:%02x:%02x.%02x] not found\n",
101 segment, scope->bus, path->dev, path->fn);
105 if ((scope->entry_type == ACPI_DMAR_SCOPE_TYPE_ENDPOINT && \
106 pdev->subordinate) || (scope->entry_type == \
107 ACPI_DMAR_SCOPE_TYPE_BRIDGE && !pdev->subordinate)) {
109 printk(KERN_WARNING PREFIX
110 "Device scope type does not match for %s\n",
118 static int __init dmar_parse_dev_scope(void *start, void *end, int *cnt,
119 struct pci_dev ***devices, u16 segment)
121 struct acpi_dmar_device_scope *scope;
127 while (start < end) {
129 if (scope->entry_type == ACPI_DMAR_SCOPE_TYPE_ENDPOINT ||
130 scope->entry_type == ACPI_DMAR_SCOPE_TYPE_BRIDGE)
133 printk(KERN_WARNING PREFIX
134 "Unsupported device scope\n");
135 start += scope->length;
140 *devices = kcalloc(*cnt, sizeof(struct pci_dev *), GFP_KERNEL);
146 while (start < end) {
148 if (scope->entry_type == ACPI_DMAR_SCOPE_TYPE_ENDPOINT ||
149 scope->entry_type == ACPI_DMAR_SCOPE_TYPE_BRIDGE) {
150 ret = dmar_parse_one_dev_scope(scope,
151 &(*devices)[index], segment);
158 start += scope->length;
165 * dmar_parse_one_drhd - parses exactly one DMA remapping hardware definition
166 * structure which uniquely represent one DMA remapping hardware unit
167 * present in the platform
170 dmar_parse_one_drhd(struct acpi_dmar_header *header)
172 struct acpi_dmar_hardware_unit *drhd;
173 struct dmar_drhd_unit *dmaru;
176 drhd = (struct acpi_dmar_hardware_unit *)header;
177 if (!drhd->address) {
178 /* Promote an attitude of violence to a BIOS engineer today */
179 WARN(1, "Your BIOS is broken; DMAR reported at address zero!\n"
180 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
181 dmi_get_system_info(DMI_BIOS_VENDOR),
182 dmi_get_system_info(DMI_BIOS_VERSION),
183 dmi_get_system_info(DMI_PRODUCT_VERSION));
186 dmaru = kzalloc(sizeof(*dmaru), GFP_KERNEL);
191 dmaru->reg_base_addr = drhd->address;
192 dmaru->segment = drhd->segment;
193 dmaru->include_all = drhd->flags & 0x1; /* BIT0: INCLUDE_ALL */
195 ret = alloc_iommu(dmaru);
200 dmar_register_drhd_unit(dmaru);
204 static int __init dmar_parse_dev(struct dmar_drhd_unit *dmaru)
206 struct acpi_dmar_hardware_unit *drhd;
209 drhd = (struct acpi_dmar_hardware_unit *) dmaru->hdr;
211 if (dmaru->include_all)
214 ret = dmar_parse_dev_scope((void *)(drhd + 1),
215 ((void *)drhd) + drhd->header.length,
216 &dmaru->devices_cnt, &dmaru->devices,
219 list_del(&dmaru->list);
226 LIST_HEAD(dmar_rmrr_units);
228 static void __init dmar_register_rmrr_unit(struct dmar_rmrr_unit *rmrr)
230 list_add(&rmrr->list, &dmar_rmrr_units);
235 dmar_parse_one_rmrr(struct acpi_dmar_header *header)
237 struct acpi_dmar_reserved_memory *rmrr;
238 struct dmar_rmrr_unit *rmrru;
240 rmrru = kzalloc(sizeof(*rmrru), GFP_KERNEL);
245 rmrr = (struct acpi_dmar_reserved_memory *)header;
246 rmrru->base_address = rmrr->base_address;
247 rmrru->end_address = rmrr->end_address;
249 dmar_register_rmrr_unit(rmrru);
254 rmrr_parse_dev(struct dmar_rmrr_unit *rmrru)
256 struct acpi_dmar_reserved_memory *rmrr;
259 rmrr = (struct acpi_dmar_reserved_memory *) rmrru->hdr;
260 ret = dmar_parse_dev_scope((void *)(rmrr + 1),
261 ((void *)rmrr) + rmrr->header.length,
262 &rmrru->devices_cnt, &rmrru->devices, rmrr->segment);
264 if (ret || (rmrru->devices_cnt == 0)) {
265 list_del(&rmrru->list);
271 static LIST_HEAD(dmar_atsr_units);
273 static int __init dmar_parse_one_atsr(struct acpi_dmar_header *hdr)
275 struct acpi_dmar_atsr *atsr;
276 struct dmar_atsr_unit *atsru;
278 atsr = container_of(hdr, struct acpi_dmar_atsr, header);
279 atsru = kzalloc(sizeof(*atsru), GFP_KERNEL);
284 atsru->include_all = atsr->flags & 0x1;
286 list_add(&atsru->list, &dmar_atsr_units);
291 static int __init atsr_parse_dev(struct dmar_atsr_unit *atsru)
294 struct acpi_dmar_atsr *atsr;
296 if (atsru->include_all)
299 atsr = container_of(atsru->hdr, struct acpi_dmar_atsr, header);
300 rc = dmar_parse_dev_scope((void *)(atsr + 1),
301 (void *)atsr + atsr->header.length,
302 &atsru->devices_cnt, &atsru->devices,
304 if (rc || !atsru->devices_cnt) {
305 list_del(&atsru->list);
312 int dmar_find_matched_atsr_unit(struct pci_dev *dev)
316 struct acpi_dmar_atsr *atsr;
317 struct dmar_atsr_unit *atsru;
319 list_for_each_entry(atsru, &dmar_atsr_units, list) {
320 atsr = container_of(atsru->hdr, struct acpi_dmar_atsr, header);
321 if (atsr->segment == pci_domain_nr(dev->bus))
328 for (bus = dev->bus; bus; bus = bus->parent) {
329 struct pci_dev *bridge = bus->self;
331 if (!bridge || !bridge->is_pcie ||
332 bridge->pcie_type == PCI_EXP_TYPE_PCI_BRIDGE)
335 if (bridge->pcie_type == PCI_EXP_TYPE_ROOT_PORT) {
336 for (i = 0; i < atsru->devices_cnt; i++)
337 if (atsru->devices[i] == bridge)
343 if (atsru->include_all)
351 dmar_table_print_dmar_entry(struct acpi_dmar_header *header)
353 struct acpi_dmar_hardware_unit *drhd;
354 struct acpi_dmar_reserved_memory *rmrr;
355 struct acpi_dmar_atsr *atsr;
357 switch (header->type) {
358 case ACPI_DMAR_TYPE_HARDWARE_UNIT:
359 drhd = container_of(header, struct acpi_dmar_hardware_unit,
361 printk (KERN_INFO PREFIX
362 "DRHD base: %#016Lx flags: %#x\n",
363 (unsigned long long)drhd->address, drhd->flags);
365 case ACPI_DMAR_TYPE_RESERVED_MEMORY:
366 rmrr = container_of(header, struct acpi_dmar_reserved_memory,
368 printk (KERN_INFO PREFIX
369 "RMRR base: %#016Lx end: %#016Lx\n",
370 (unsigned long long)rmrr->base_address,
371 (unsigned long long)rmrr->end_address);
373 case ACPI_DMAR_TYPE_ATSR:
374 atsr = container_of(header, struct acpi_dmar_atsr, header);
375 printk(KERN_INFO PREFIX "ATSR flags: %#x\n", atsr->flags);
381 * dmar_table_detect - checks to see if the platform supports DMAR devices
383 static int __init dmar_table_detect(void)
385 acpi_status status = AE_OK;
387 /* if we could find DMAR table, then there are DMAR devices */
388 status = acpi_get_table_with_size(ACPI_SIG_DMAR, 0,
389 (struct acpi_table_header **)&dmar_tbl,
392 if (ACPI_SUCCESS(status) && !dmar_tbl) {
393 printk (KERN_WARNING PREFIX "Unable to map DMAR\n");
394 status = AE_NOT_FOUND;
397 return (ACPI_SUCCESS(status) ? 1 : 0);
401 * parse_dmar_table - parses the DMA reporting table
404 parse_dmar_table(void)
406 struct acpi_table_dmar *dmar;
407 struct acpi_dmar_header *entry_header;
411 * Do it again, earlier dmar_tbl mapping could be mapped with
416 dmar = (struct acpi_table_dmar *)dmar_tbl;
420 if (dmar->width < PAGE_SHIFT - 1) {
421 printk(KERN_WARNING PREFIX "Invalid DMAR haw\n");
425 printk (KERN_INFO PREFIX "Host address width %d\n",
428 entry_header = (struct acpi_dmar_header *)(dmar + 1);
429 while (((unsigned long)entry_header) <
430 (((unsigned long)dmar) + dmar_tbl->length)) {
431 /* Avoid looping forever on bad ACPI tables */
432 if (entry_header->length == 0) {
433 printk(KERN_WARNING PREFIX
434 "Invalid 0-length structure\n");
439 dmar_table_print_dmar_entry(entry_header);
441 switch (entry_header->type) {
442 case ACPI_DMAR_TYPE_HARDWARE_UNIT:
443 ret = dmar_parse_one_drhd(entry_header);
445 case ACPI_DMAR_TYPE_RESERVED_MEMORY:
447 ret = dmar_parse_one_rmrr(entry_header);
450 case ACPI_DMAR_TYPE_ATSR:
452 ret = dmar_parse_one_atsr(entry_header);
456 printk(KERN_WARNING PREFIX
457 "Unknown DMAR structure type\n");
458 ret = 0; /* for forward compatibility */
464 entry_header = ((void *)entry_header + entry_header->length);
469 int dmar_pci_device_match(struct pci_dev *devices[], int cnt,
475 for (index = 0; index < cnt; index++)
476 if (dev == devices[index])
479 /* Check our parent */
480 dev = dev->bus->self;
486 struct dmar_drhd_unit *
487 dmar_find_matched_drhd_unit(struct pci_dev *dev)
489 struct dmar_drhd_unit *dmaru = NULL;
490 struct acpi_dmar_hardware_unit *drhd;
492 list_for_each_entry(dmaru, &dmar_drhd_units, list) {
493 drhd = container_of(dmaru->hdr,
494 struct acpi_dmar_hardware_unit,
497 if (dmaru->include_all &&
498 drhd->segment == pci_domain_nr(dev->bus))
501 if (dmar_pci_device_match(dmaru->devices,
502 dmaru->devices_cnt, dev))
509 int __init dmar_dev_scope_init(void)
511 struct dmar_drhd_unit *drhd, *drhd_n;
514 list_for_each_entry_safe(drhd, drhd_n, &dmar_drhd_units, list) {
515 ret = dmar_parse_dev(drhd);
522 struct dmar_rmrr_unit *rmrr, *rmrr_n;
523 struct dmar_atsr_unit *atsr, *atsr_n;
525 list_for_each_entry_safe(rmrr, rmrr_n, &dmar_rmrr_units, list) {
526 ret = rmrr_parse_dev(rmrr);
531 list_for_each_entry_safe(atsr, atsr_n, &dmar_atsr_units, list) {
532 ret = atsr_parse_dev(atsr);
543 int __init dmar_table_init(void)
545 static int dmar_table_initialized;
548 if (dmar_table_initialized)
551 dmar_table_initialized = 1;
553 ret = parse_dmar_table();
556 printk(KERN_INFO PREFIX "parse DMAR table failure.\n");
560 if (list_empty(&dmar_drhd_units)) {
561 printk(KERN_INFO PREFIX "No DMAR devices found\n");
566 if (list_empty(&dmar_rmrr_units))
567 printk(KERN_INFO PREFIX "No RMRR found\n");
569 if (list_empty(&dmar_atsr_units))
570 printk(KERN_INFO PREFIX "No ATSR found\n");
573 #ifdef CONFIG_INTR_REMAP
574 parse_ioapics_under_ir();
579 void __init detect_intel_iommu(void)
583 ret = dmar_table_detect();
586 #ifdef CONFIG_INTR_REMAP
587 struct acpi_table_dmar *dmar;
589 * for now we will disable dma-remapping when interrupt
590 * remapping is enabled.
591 * When support for queued invalidation for IOTLB invalidation
592 * is added, we will not need this any more.
594 dmar = (struct acpi_table_dmar *) dmar_tbl;
595 if (ret && cpu_has_x2apic && dmar->flags & 0x1)
597 "Queued invalidation will be enabled to support "
598 "x2apic and Intr-remapping.\n");
601 if (ret && !no_iommu && !iommu_detected && !swiotlb &&
606 early_acpi_os_unmap_memory(dmar_tbl, dmar_tbl_size);
611 int alloc_iommu(struct dmar_drhd_unit *drhd)
613 struct intel_iommu *iommu;
616 static int iommu_allocated = 0;
620 iommu = kzalloc(sizeof(*iommu), GFP_KERNEL);
624 iommu->seq_id = iommu_allocated++;
625 sprintf (iommu->name, "dmar%d", iommu->seq_id);
627 iommu->reg = ioremap(drhd->reg_base_addr, VTD_PAGE_SIZE);
629 printk(KERN_ERR "IOMMU: can't map the region\n");
632 iommu->cap = dmar_readq(iommu->reg + DMAR_CAP_REG);
633 iommu->ecap = dmar_readq(iommu->reg + DMAR_ECAP_REG);
636 agaw = iommu_calculate_agaw(iommu);
639 "Cannot get a valid agaw for iommu (seq_id = %d)\n",
643 msagaw = iommu_calculate_max_sagaw(iommu);
646 "Cannot get a valid max agaw for iommu (seq_id = %d)\n",
652 iommu->msagaw = msagaw;
654 /* the registers might be more than one page */
655 map_size = max_t(int, ecap_max_iotlb_offset(iommu->ecap),
656 cap_max_fault_reg_offset(iommu->cap));
657 map_size = VTD_PAGE_ALIGN(map_size);
658 if (map_size > VTD_PAGE_SIZE) {
660 iommu->reg = ioremap(drhd->reg_base_addr, map_size);
662 printk(KERN_ERR "IOMMU: can't map the region\n");
667 ver = readl(iommu->reg + DMAR_VER_REG);
668 pr_debug("IOMMU %llx: ver %d:%d cap %llx ecap %llx\n",
669 (unsigned long long)drhd->reg_base_addr,
670 DMAR_VER_MAJOR(ver), DMAR_VER_MINOR(ver),
671 (unsigned long long)iommu->cap,
672 (unsigned long long)iommu->ecap);
674 spin_lock_init(&iommu->register_lock);
683 void free_iommu(struct intel_iommu *iommu)
689 free_dmar_iommu(iommu);
698 * Reclaim all the submitted descriptors which have completed its work.
700 static inline void reclaim_free_desc(struct q_inval *qi)
702 while (qi->desc_status[qi->free_tail] == QI_DONE) {
703 qi->desc_status[qi->free_tail] = QI_FREE;
704 qi->free_tail = (qi->free_tail + 1) % QI_LENGTH;
709 static int qi_check_fault(struct intel_iommu *iommu, int index)
713 struct q_inval *qi = iommu->qi;
714 int wait_index = (index + 1) % QI_LENGTH;
716 fault = readl(iommu->reg + DMAR_FSTS_REG);
719 * If IQE happens, the head points to the descriptor associated
720 * with the error. No new descriptors are fetched until the IQE
723 if (fault & DMA_FSTS_IQE) {
724 head = readl(iommu->reg + DMAR_IQH_REG);
725 if ((head >> 4) == index) {
726 memcpy(&qi->desc[index], &qi->desc[wait_index],
727 sizeof(struct qi_desc));
728 __iommu_flush_cache(iommu, &qi->desc[index],
729 sizeof(struct qi_desc));
730 writel(DMA_FSTS_IQE, iommu->reg + DMAR_FSTS_REG);
739 * Submit the queued invalidation descriptor to the remapping
740 * hardware unit and wait for its completion.
742 int qi_submit_sync(struct qi_desc *desc, struct intel_iommu *iommu)
745 struct q_inval *qi = iommu->qi;
746 struct qi_desc *hw, wait_desc;
747 int wait_index, index;
755 spin_lock_irqsave(&qi->q_lock, flags);
756 while (qi->free_cnt < 3) {
757 spin_unlock_irqrestore(&qi->q_lock, flags);
759 spin_lock_irqsave(&qi->q_lock, flags);
762 index = qi->free_head;
763 wait_index = (index + 1) % QI_LENGTH;
765 qi->desc_status[index] = qi->desc_status[wait_index] = QI_IN_USE;
769 wait_desc.low = QI_IWD_STATUS_DATA(QI_DONE) |
770 QI_IWD_STATUS_WRITE | QI_IWD_TYPE;
771 wait_desc.high = virt_to_phys(&qi->desc_status[wait_index]);
773 hw[wait_index] = wait_desc;
775 __iommu_flush_cache(iommu, &hw[index], sizeof(struct qi_desc));
776 __iommu_flush_cache(iommu, &hw[wait_index], sizeof(struct qi_desc));
778 qi->free_head = (qi->free_head + 2) % QI_LENGTH;
782 * update the HW tail register indicating the presence of
785 writel(qi->free_head << 4, iommu->reg + DMAR_IQT_REG);
787 while (qi->desc_status[wait_index] != QI_DONE) {
789 * We will leave the interrupts disabled, to prevent interrupt
790 * context to queue another cmd while a cmd is already submitted
791 * and waiting for completion on this cpu. This is to avoid
792 * a deadlock where the interrupt context can wait indefinitely
793 * for free slots in the queue.
795 rc = qi_check_fault(iommu, index);
799 spin_unlock(&qi->q_lock);
801 spin_lock(&qi->q_lock);
804 qi->desc_status[index] = qi->desc_status[wait_index] = QI_DONE;
806 reclaim_free_desc(qi);
807 spin_unlock_irqrestore(&qi->q_lock, flags);
813 * Flush the global interrupt entry cache.
815 void qi_global_iec(struct intel_iommu *iommu)
819 desc.low = QI_IEC_TYPE;
822 /* should never fail */
823 qi_submit_sync(&desc, iommu);
826 void qi_flush_context(struct intel_iommu *iommu, u16 did, u16 sid, u8 fm,
831 desc.low = QI_CC_FM(fm) | QI_CC_SID(sid) | QI_CC_DID(did)
832 | QI_CC_GRAN(type) | QI_CC_TYPE;
835 qi_submit_sync(&desc, iommu);
838 void qi_flush_iotlb(struct intel_iommu *iommu, u16 did, u64 addr,
839 unsigned int size_order, u64 type)
846 if (cap_write_drain(iommu->cap))
849 if (cap_read_drain(iommu->cap))
852 desc.low = QI_IOTLB_DID(did) | QI_IOTLB_DR(dr) | QI_IOTLB_DW(dw)
853 | QI_IOTLB_GRAN(type) | QI_IOTLB_TYPE;
854 desc.high = QI_IOTLB_ADDR(addr) | QI_IOTLB_IH(ih)
855 | QI_IOTLB_AM(size_order);
857 qi_submit_sync(&desc, iommu);
861 * Disable Queued Invalidation interface.
863 void dmar_disable_qi(struct intel_iommu *iommu)
867 cycles_t start_time = get_cycles();
869 if (!ecap_qis(iommu->ecap))
872 spin_lock_irqsave(&iommu->register_lock, flags);
874 sts = dmar_readq(iommu->reg + DMAR_GSTS_REG);
875 if (!(sts & DMA_GSTS_QIES))
879 * Give a chance to HW to complete the pending invalidation requests.
881 while ((readl(iommu->reg + DMAR_IQT_REG) !=
882 readl(iommu->reg + DMAR_IQH_REG)) &&
883 (DMAR_OPERATION_TIMEOUT > (get_cycles() - start_time)))
886 iommu->gcmd &= ~DMA_GCMD_QIE;
887 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
889 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, readl,
890 !(sts & DMA_GSTS_QIES), sts);
892 spin_unlock_irqrestore(&iommu->register_lock, flags);
896 * Enable queued invalidation.
898 static void __dmar_enable_qi(struct intel_iommu *iommu)
902 struct q_inval *qi = iommu->qi;
904 qi->free_head = qi->free_tail = 0;
905 qi->free_cnt = QI_LENGTH;
907 spin_lock_irqsave(&iommu->register_lock, flags);
909 /* write zero to the tail reg */
910 writel(0, iommu->reg + DMAR_IQT_REG);
912 dmar_writeq(iommu->reg + DMAR_IQA_REG, virt_to_phys(qi->desc));
914 iommu->gcmd |= DMA_GCMD_QIE;
915 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
917 /* Make sure hardware complete it */
918 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, readl, (sts & DMA_GSTS_QIES), sts);
920 spin_unlock_irqrestore(&iommu->register_lock, flags);
924 * Enable Queued Invalidation interface. This is a must to support
925 * interrupt-remapping. Also used by DMA-remapping, which replaces
926 * register based IOTLB invalidation.
928 int dmar_enable_qi(struct intel_iommu *iommu)
932 if (!ecap_qis(iommu->ecap))
936 * queued invalidation is already setup and enabled.
941 iommu->qi = kmalloc(sizeof(*qi), GFP_ATOMIC);
947 qi->desc = (void *)(get_zeroed_page(GFP_ATOMIC));
954 qi->desc_status = kmalloc(QI_LENGTH * sizeof(int), GFP_ATOMIC);
955 if (!qi->desc_status) {
956 free_page((unsigned long) qi->desc);
962 qi->free_head = qi->free_tail = 0;
963 qi->free_cnt = QI_LENGTH;
965 spin_lock_init(&qi->q_lock);
967 __dmar_enable_qi(iommu);
972 /* iommu interrupt handling. Most stuff are MSI-like. */
980 static const char *dma_remap_fault_reasons[] =
983 "Present bit in root entry is clear",
984 "Present bit in context entry is clear",
985 "Invalid context entry",
986 "Access beyond MGAW",
987 "PTE Write access is not set",
988 "PTE Read access is not set",
989 "Next page table ptr is invalid",
990 "Root table address invalid",
991 "Context table ptr is invalid",
992 "non-zero reserved fields in RTP",
993 "non-zero reserved fields in CTP",
994 "non-zero reserved fields in PTE",
997 static const char *intr_remap_fault_reasons[] =
999 "Detected reserved fields in the decoded interrupt-remapped request",
1000 "Interrupt index exceeded the interrupt-remapping table size",
1001 "Present field in the IRTE entry is clear",
1002 "Error accessing interrupt-remapping table pointed by IRTA_REG",
1003 "Detected reserved fields in the IRTE entry",
1004 "Blocked a compatibility format interrupt request",
1005 "Blocked an interrupt request due to source-id verification failure",
1008 #define MAX_FAULT_REASON_IDX (ARRAY_SIZE(fault_reason_strings) - 1)
1010 const char *dmar_get_fault_reason(u8 fault_reason, int *fault_type)
1012 if (fault_reason >= 0x20 && (fault_reason <= 0x20 +
1013 ARRAY_SIZE(intr_remap_fault_reasons))) {
1014 *fault_type = INTR_REMAP;
1015 return intr_remap_fault_reasons[fault_reason - 0x20];
1016 } else if (fault_reason < ARRAY_SIZE(dma_remap_fault_reasons)) {
1017 *fault_type = DMA_REMAP;
1018 return dma_remap_fault_reasons[fault_reason];
1020 *fault_type = UNKNOWN;
1025 void dmar_msi_unmask(unsigned int irq)
1027 struct intel_iommu *iommu = get_irq_data(irq);
1031 spin_lock_irqsave(&iommu->register_lock, flag);
1032 writel(0, iommu->reg + DMAR_FECTL_REG);
1033 /* Read a reg to force flush the post write */
1034 readl(iommu->reg + DMAR_FECTL_REG);
1035 spin_unlock_irqrestore(&iommu->register_lock, flag);
1038 void dmar_msi_mask(unsigned int irq)
1041 struct intel_iommu *iommu = get_irq_data(irq);
1044 spin_lock_irqsave(&iommu->register_lock, flag);
1045 writel(DMA_FECTL_IM, iommu->reg + DMAR_FECTL_REG);
1046 /* Read a reg to force flush the post write */
1047 readl(iommu->reg + DMAR_FECTL_REG);
1048 spin_unlock_irqrestore(&iommu->register_lock, flag);
1051 void dmar_msi_write(int irq, struct msi_msg *msg)
1053 struct intel_iommu *iommu = get_irq_data(irq);
1056 spin_lock_irqsave(&iommu->register_lock, flag);
1057 writel(msg->data, iommu->reg + DMAR_FEDATA_REG);
1058 writel(msg->address_lo, iommu->reg + DMAR_FEADDR_REG);
1059 writel(msg->address_hi, iommu->reg + DMAR_FEUADDR_REG);
1060 spin_unlock_irqrestore(&iommu->register_lock, flag);
1063 void dmar_msi_read(int irq, struct msi_msg *msg)
1065 struct intel_iommu *iommu = get_irq_data(irq);
1068 spin_lock_irqsave(&iommu->register_lock, flag);
1069 msg->data = readl(iommu->reg + DMAR_FEDATA_REG);
1070 msg->address_lo = readl(iommu->reg + DMAR_FEADDR_REG);
1071 msg->address_hi = readl(iommu->reg + DMAR_FEUADDR_REG);
1072 spin_unlock_irqrestore(&iommu->register_lock, flag);
1075 static int dmar_fault_do_one(struct intel_iommu *iommu, int type,
1076 u8 fault_reason, u16 source_id, unsigned long long addr)
1081 reason = dmar_get_fault_reason(fault_reason, &fault_type);
1083 if (fault_type == INTR_REMAP)
1084 printk(KERN_ERR "INTR-REMAP: Request device [[%02x:%02x.%d] "
1085 "fault index %llx\n"
1086 "INTR-REMAP:[fault reason %02d] %s\n",
1087 (source_id >> 8), PCI_SLOT(source_id & 0xFF),
1088 PCI_FUNC(source_id & 0xFF), addr >> 48,
1089 fault_reason, reason);
1092 "DMAR:[%s] Request device [%02x:%02x.%d] "
1093 "fault addr %llx \n"
1094 "DMAR:[fault reason %02d] %s\n",
1095 (type ? "DMA Read" : "DMA Write"),
1096 (source_id >> 8), PCI_SLOT(source_id & 0xFF),
1097 PCI_FUNC(source_id & 0xFF), addr, fault_reason, reason);
1101 #define PRIMARY_FAULT_REG_LEN (16)
1102 irqreturn_t dmar_fault(int irq, void *dev_id)
1104 struct intel_iommu *iommu = dev_id;
1105 int reg, fault_index;
1109 spin_lock_irqsave(&iommu->register_lock, flag);
1110 fault_status = readl(iommu->reg + DMAR_FSTS_REG);
1112 printk(KERN_ERR "DRHD: handling fault status reg %x\n",
1115 /* TBD: ignore advanced fault log currently */
1116 if (!(fault_status & DMA_FSTS_PPF))
1119 fault_index = dma_fsts_fault_record_index(fault_status);
1120 reg = cap_fault_reg_offset(iommu->cap);
1128 /* highest 32 bits */
1129 data = readl(iommu->reg + reg +
1130 fault_index * PRIMARY_FAULT_REG_LEN + 12);
1131 if (!(data & DMA_FRCD_F))
1134 fault_reason = dma_frcd_fault_reason(data);
1135 type = dma_frcd_type(data);
1137 data = readl(iommu->reg + reg +
1138 fault_index * PRIMARY_FAULT_REG_LEN + 8);
1139 source_id = dma_frcd_source_id(data);
1141 guest_addr = dmar_readq(iommu->reg + reg +
1142 fault_index * PRIMARY_FAULT_REG_LEN);
1143 guest_addr = dma_frcd_page_addr(guest_addr);
1144 /* clear the fault */
1145 writel(DMA_FRCD_F, iommu->reg + reg +
1146 fault_index * PRIMARY_FAULT_REG_LEN + 12);
1148 spin_unlock_irqrestore(&iommu->register_lock, flag);
1150 dmar_fault_do_one(iommu, type, fault_reason,
1151 source_id, guest_addr);
1154 if (fault_index > cap_num_fault_regs(iommu->cap))
1156 spin_lock_irqsave(&iommu->register_lock, flag);
1159 /* clear all the other faults */
1160 fault_status = readl(iommu->reg + DMAR_FSTS_REG);
1161 writel(fault_status, iommu->reg + DMAR_FSTS_REG);
1163 spin_unlock_irqrestore(&iommu->register_lock, flag);
1167 int dmar_set_interrupt(struct intel_iommu *iommu)
1172 * Check if the fault interrupt is already initialized.
1179 printk(KERN_ERR "IOMMU: no free vectors\n");
1183 set_irq_data(irq, iommu);
1186 ret = arch_setup_dmar_msi(irq);
1188 set_irq_data(irq, NULL);
1194 ret = request_irq(irq, dmar_fault, 0, iommu->name, iommu);
1196 printk(KERN_ERR "IOMMU: can't request irq\n");
1200 int __init enable_drhd_fault_handling(void)
1202 struct dmar_drhd_unit *drhd;
1205 * Enable fault control interrupt.
1207 for_each_drhd_unit(drhd) {
1209 struct intel_iommu *iommu = drhd->iommu;
1210 ret = dmar_set_interrupt(iommu);
1213 printk(KERN_ERR "DRHD %Lx: failed to enable fault, "
1214 " interrupt, ret %d\n",
1215 (unsigned long long)drhd->reg_base_addr, ret);
1224 * Re-enable Queued Invalidation interface.
1226 int dmar_reenable_qi(struct intel_iommu *iommu)
1228 if (!ecap_qis(iommu->ecap))
1235 * First disable queued invalidation.
1237 dmar_disable_qi(iommu);
1239 * Then enable queued invalidation again. Since there is no pending
1240 * invalidation requests now, it's safe to re-enable queued
1243 __dmar_enable_qi(iommu);