x86/ia64: intel-iommu: move to drivers/iommu/
authorOhad Ben-Cohen <ohad@wizery.com>
Fri, 10 Jun 2011 18:42:27 +0000 (21:42 +0300)
committerJoerg Roedel <joerg.roedel@amd.com>
Tue, 21 Jun 2011 08:49:30 +0000 (10:49 +0200)
This should ease finding similarities with different platforms,
with the intention of solving problems once in a generic framework
which everyone can use.

Note: to move intel-iommu.c, the declaration of pci_find_upstream_pcie_bridge()
has to move from drivers/pci/pci.h to include/linux/pci.h. This is handled
in this patch, too.

As suggested, also drop DMAR's EXPERIMENTAL tag while we're at it.

Compile-tested on x86_64.

Signed-off-by: Ohad Ben-Cohen <ohad@wizery.com>
Signed-off-by: Joerg Roedel <joerg.roedel@amd.com>
17 files changed:
arch/ia64/Kconfig
arch/x86/Kconfig
drivers/iommu/Kconfig
drivers/iommu/Makefile
drivers/iommu/dmar.c [new file with mode: 0644]
drivers/iommu/intel-iommu.c [new file with mode: 0644]
drivers/iommu/intr_remapping.c [new file with mode: 0644]
drivers/iommu/intr_remapping.h [new file with mode: 0644]
drivers/iommu/iova.c [new file with mode: 0644]
drivers/pci/Makefile
drivers/pci/dmar.c [deleted file]
drivers/pci/intel-iommu.c [deleted file]
drivers/pci/intr_remapping.c [deleted file]
drivers/pci/intr_remapping.h [deleted file]
drivers/pci/iova.c [deleted file]
drivers/pci/pci.h
include/linux/pci.h

index 9929e4e11ea02472685ca899ce5b68790d7e7f6c..7336ba653b8fc6fabbdb51cda369926bbf1915ca 100644 (file)
@@ -627,27 +627,6 @@ source "drivers/pci/hotplug/Kconfig"
 
 source "drivers/pcmcia/Kconfig"
 
-config DMAR
-        bool "Support for DMA Remapping Devices (EXPERIMENTAL)"
-        depends on IA64_GENERIC && ACPI && EXPERIMENTAL
-       help
-         DMA remapping (DMAR) devices support enables independent address
-         translations for Direct Memory Access (DMA) from devices.
-         These DMA remapping devices are reported via ACPI tables
-         and include PCI device scope covered by these DMA
-         remapping devices.
-
-config DMAR_DEFAULT_ON
-       def_bool y
-       prompt "Enable DMA Remapping Devices by default"
-       depends on DMAR
-       help
-         Selecting this option will enable a DMAR device at boot time if
-         one is found. If this option is not selected, DMAR support can
-         be enabled by passing intel_iommu=on to the kernel. It is
-         recommended you say N here while the DMAR code remains
-         experimental.
-
 endmenu
 
 endif
index 1b6a2e212db40b0ae6cf368a952c8b7ab5933057..a169573c64fc701f27f1a44f579e95e80cf3db99 100644 (file)
@@ -1912,56 +1912,6 @@ config PCI_CNB20LE_QUIRK
 
          You should say N unless you know you need this.
 
-config DMAR
-       bool "Support for DMA Remapping Devices (EXPERIMENTAL)"
-       depends on PCI_MSI && ACPI && EXPERIMENTAL
-       select IOMMU_API
-       help
-         DMA remapping (DMAR) devices support enables independent address
-         translations for Direct Memory Access (DMA) from devices.
-         These DMA remapping devices are reported via ACPI tables
-         and include PCI device scope covered by these DMA
-         remapping devices.
-
-config DMAR_DEFAULT_ON
-       def_bool y
-       prompt "Enable DMA Remapping Devices by default"
-       depends on DMAR
-       help
-         Selecting this option will enable a DMAR device at boot time if
-         one is found. If this option is not selected, DMAR support can
-         be enabled by passing intel_iommu=on to the kernel. It is
-         recommended you say N here while the DMAR code remains
-         experimental.
-
-config DMAR_BROKEN_GFX_WA
-       bool "Workaround broken graphics drivers (going away soon)"
-       depends on DMAR && BROKEN
-       ---help---
-         Current Graphics drivers tend to use physical address
-         for DMA and avoid using DMA APIs. Setting this config
-         option permits the IOMMU driver to set a unity map for
-         all the OS-visible memory. Hence the driver can continue
-         to use physical addresses for DMA, at least until this
-         option is removed in the 2.6.32 kernel.
-
-config DMAR_FLOPPY_WA
-       def_bool y
-       depends on DMAR
-       ---help---
-         Floppy disk drivers are known to bypass DMA API calls
-         thereby failing to work when IOMMU is enabled. This
-         workaround will setup a 1:1 mapping for the first
-         16MiB to make floppy (an ISA device) work.
-
-config INTR_REMAP
-       bool "Support for Interrupt Remapping (EXPERIMENTAL)"
-       depends on X86_64 && X86_IO_APIC && PCI_MSI && ACPI && EXPERIMENTAL
-       ---help---
-         Supports Interrupt remapping for IO-APIC and MSI devices.
-         To use x2apic mode in the CPU's which support x2APIC enhancements or
-         to support platforms with CPU's having > 8 bit APIC ID, say Y.
-
 source "drivers/pci/pcie/Kconfig"
 
 source "drivers/pci/Kconfig"
index 9246c5bf25afd5e9a1de65bcea476820b0db7868..e2a5f141ae2d18088be68d37b261c1cce2b54017 100644 (file)
@@ -46,3 +46,52 @@ config AMD_IOMMU_STATS
          statistics about whats happening in the driver and exports that
          information to userspace via debugfs.
          If unsure, say N.
+
+# Intel IOMMU support
+config DMAR
+       bool "Support for DMA Remapping Devices"
+       depends on PCI_MSI && ACPI && (X86 || IA64_GENERIC)
+       select IOMMU_API
+       help
+         DMA remapping (DMAR) devices support enables independent address
+         translations for Direct Memory Access (DMA) from devices.
+         These DMA remapping devices are reported via ACPI tables
+         and include PCI device scope covered by these DMA
+         remapping devices.
+
+config DMAR_DEFAULT_ON
+       def_bool y
+       prompt "Enable DMA Remapping Devices by default"
+       depends on DMAR
+       help
+         Selecting this option will enable a DMAR device at boot time if
+         one is found. If this option is not selected, DMAR support can
+         be enabled by passing intel_iommu=on to the kernel.
+
+config DMAR_BROKEN_GFX_WA
+       bool "Workaround broken graphics drivers (going away soon)"
+       depends on DMAR && BROKEN && X86
+       ---help---
+         Current Graphics drivers tend to use physical address
+         for DMA and avoid using DMA APIs. Setting this config
+         option permits the IOMMU driver to set a unity map for
+         all the OS-visible memory. Hence the driver can continue
+         to use physical addresses for DMA, at least until this
+         option is removed in the 2.6.32 kernel.
+
+config DMAR_FLOPPY_WA
+       def_bool y
+       depends on DMAR && X86
+       ---help---
+         Floppy disk drivers are known to bypass DMA API calls
+         thereby failing to work when IOMMU is enabled. This
+         workaround will setup a 1:1 mapping for the first
+         16MiB to make floppy (an ISA device) work.
+
+config INTR_REMAP
+       bool "Support for Interrupt Remapping (EXPERIMENTAL)"
+       depends on X86_64 && X86_IO_APIC && PCI_MSI && ACPI && EXPERIMENTAL
+       ---help---
+         Supports Interrupt remapping for IO-APIC and MSI devices.
+         To use x2apic mode in the CPU's which support x2APIC enhancements or
+         to support platforms with CPU's having > 8 bit APIC ID, say Y.
index 4237eaf8460984f78cd29886a13c60ca8ef039ee..49e9c0f46bd5df551784a04d657c860329f5e543 100644 (file)
@@ -1,3 +1,5 @@
 obj-$(CONFIG_IOMMU_API) += iommu.o
 obj-$(CONFIG_MSM_IOMMU) += msm_iommu.o msm_iommu_dev.o
 obj-$(CONFIG_AMD_IOMMU) += amd_iommu.o
+obj-$(CONFIG_DMAR) += dmar.o iova.o intel-iommu.o
+obj-$(CONFIG_INTR_REMAP) += dmar.o intr_remapping.o
diff --git a/drivers/iommu/dmar.c b/drivers/iommu/dmar.c
new file mode 100644 (file)
index 0000000..3dc9bef
--- /dev/null
@@ -0,0 +1,1461 @@
+/*
+ * Copyright (c) 2006, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
+ * Place - Suite 330, Boston, MA 02111-1307 USA.
+ *
+ * Copyright (C) 2006-2008 Intel Corporation
+ * Author: Ashok Raj <ashok.raj@intel.com>
+ * Author: Shaohua Li <shaohua.li@intel.com>
+ * Author: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
+ *
+ * This file implements early detection/parsing of Remapping Devices
+ * reported to OS through BIOS via DMA remapping reporting (DMAR) ACPI
+ * tables.
+ *
+ * These routines are used by both DMA-remapping and Interrupt-remapping
+ */
+
+#include <linux/pci.h>
+#include <linux/dmar.h>
+#include <linux/iova.h>
+#include <linux/intel-iommu.h>
+#include <linux/timer.h>
+#include <linux/irq.h>
+#include <linux/interrupt.h>
+#include <linux/tboot.h>
+#include <linux/dmi.h>
+#include <linux/slab.h>
+#include <asm/iommu_table.h>
+
+#define PREFIX "DMAR: "
+
+/* No locks are needed as DMA remapping hardware unit
+ * list is constructed at boot time and hotplug of
+ * these units are not supported by the architecture.
+ */
+LIST_HEAD(dmar_drhd_units);
+
+static struct acpi_table_header * __initdata dmar_tbl;
+static acpi_size dmar_tbl_size;
+
+static void __init dmar_register_drhd_unit(struct dmar_drhd_unit *drhd)
+{
+       /*
+        * add INCLUDE_ALL at the tail, so scan the list will find it at
+        * the very end.
+        */
+       if (drhd->include_all)
+               list_add_tail(&drhd->list, &dmar_drhd_units);
+       else
+               list_add(&drhd->list, &dmar_drhd_units);
+}
+
+static int __init dmar_parse_one_dev_scope(struct acpi_dmar_device_scope *scope,
+                                          struct pci_dev **dev, u16 segment)
+{
+       struct pci_bus *bus;
+       struct pci_dev *pdev = NULL;
+       struct acpi_dmar_pci_path *path;
+       int count;
+
+       bus = pci_find_bus(segment, scope->bus);
+       path = (struct acpi_dmar_pci_path *)(scope + 1);
+       count = (scope->length - sizeof(struct acpi_dmar_device_scope))
+               / sizeof(struct acpi_dmar_pci_path);
+
+       while (count) {
+               if (pdev)
+                       pci_dev_put(pdev);
+               /*
+                * Some BIOSes list non-exist devices in DMAR table, just
+                * ignore it
+                */
+               if (!bus) {
+                       printk(KERN_WARNING
+                       PREFIX "Device scope bus [%d] not found\n",
+                       scope->bus);
+                       break;
+               }
+               pdev = pci_get_slot(bus, PCI_DEVFN(path->dev, path->fn));
+               if (!pdev) {
+                       printk(KERN_WARNING PREFIX
+                       "Device scope device [%04x:%02x:%02x.%02x] not found\n",
+                               segment, bus->number, path->dev, path->fn);
+                       break;
+               }
+               path ++;
+               count --;
+               bus = pdev->subordinate;
+       }
+       if (!pdev) {
+               printk(KERN_WARNING PREFIX
+               "Device scope device [%04x:%02x:%02x.%02x] not found\n",
+               segment, scope->bus, path->dev, path->fn);
+               *dev = NULL;
+               return 0;
+       }
+       if ((scope->entry_type == ACPI_DMAR_SCOPE_TYPE_ENDPOINT && \
+                       pdev->subordinate) || (scope->entry_type == \
+                       ACPI_DMAR_SCOPE_TYPE_BRIDGE && !pdev->subordinate)) {
+               pci_dev_put(pdev);
+               printk(KERN_WARNING PREFIX
+                       "Device scope type does not match for %s\n",
+                        pci_name(pdev));
+               return -EINVAL;
+       }
+       *dev = pdev;
+       return 0;
+}
+
+static int __init dmar_parse_dev_scope(void *start, void *end, int *cnt,
+                                      struct pci_dev ***devices, u16 segment)
+{
+       struct acpi_dmar_device_scope *scope;
+       void * tmp = start;
+       int index;
+       int ret;
+
+       *cnt = 0;
+       while (start < end) {
+               scope = start;
+               if (scope->entry_type == ACPI_DMAR_SCOPE_TYPE_ENDPOINT ||
+                   scope->entry_type == ACPI_DMAR_SCOPE_TYPE_BRIDGE)
+                       (*cnt)++;
+               else if (scope->entry_type != ACPI_DMAR_SCOPE_TYPE_IOAPIC) {
+                       printk(KERN_WARNING PREFIX
+                              "Unsupported device scope\n");
+               }
+               start += scope->length;
+       }
+       if (*cnt == 0)
+               return 0;
+
+       *devices = kcalloc(*cnt, sizeof(struct pci_dev *), GFP_KERNEL);
+       if (!*devices)
+               return -ENOMEM;
+
+       start = tmp;
+       index = 0;
+       while (start < end) {
+               scope = start;
+               if (scope->entry_type == ACPI_DMAR_SCOPE_TYPE_ENDPOINT ||
+                   scope->entry_type == ACPI_DMAR_SCOPE_TYPE_BRIDGE) {
+                       ret = dmar_parse_one_dev_scope(scope,
+                               &(*devices)[index], segment);
+                       if (ret) {
+                               kfree(*devices);
+                               return ret;
+                       }
+                       index ++;
+               }
+               start += scope->length;
+       }
+
+       return 0;
+}
+
+/**
+ * dmar_parse_one_drhd - parses exactly one DMA remapping hardware definition
+ * structure which uniquely represent one DMA remapping hardware unit
+ * present in the platform
+ */
+static int __init
+dmar_parse_one_drhd(struct acpi_dmar_header *header)
+{
+       struct acpi_dmar_hardware_unit *drhd;
+       struct dmar_drhd_unit *dmaru;
+       int ret = 0;
+
+       drhd = (struct acpi_dmar_hardware_unit *)header;
+       dmaru = kzalloc(sizeof(*dmaru), GFP_KERNEL);
+       if (!dmaru)
+               return -ENOMEM;
+
+       dmaru->hdr = header;
+       dmaru->reg_base_addr = drhd->address;
+       dmaru->segment = drhd->segment;
+       dmaru->include_all = drhd->flags & 0x1; /* BIT0: INCLUDE_ALL */
+
+       ret = alloc_iommu(dmaru);
+       if (ret) {
+               kfree(dmaru);
+               return ret;
+       }
+       dmar_register_drhd_unit(dmaru);
+       return 0;
+}
+
+static int __init dmar_parse_dev(struct dmar_drhd_unit *dmaru)
+{
+       struct acpi_dmar_hardware_unit *drhd;
+       int ret = 0;
+
+       drhd = (struct acpi_dmar_hardware_unit *) dmaru->hdr;
+
+       if (dmaru->include_all)
+               return 0;
+
+       ret = dmar_parse_dev_scope((void *)(drhd + 1),
+                               ((void *)drhd) + drhd->header.length,
+                               &dmaru->devices_cnt, &dmaru->devices,
+                               drhd->segment);
+       if (ret) {
+               list_del(&dmaru->list);
+               kfree(dmaru);
+       }
+       return ret;
+}
+
+#ifdef CONFIG_DMAR
+LIST_HEAD(dmar_rmrr_units);
+
+static void __init dmar_register_rmrr_unit(struct dmar_rmrr_unit *rmrr)
+{
+       list_add(&rmrr->list, &dmar_rmrr_units);
+}
+
+
+static int __init
+dmar_parse_one_rmrr(struct acpi_dmar_header *header)
+{
+       struct acpi_dmar_reserved_memory *rmrr;
+       struct dmar_rmrr_unit *rmrru;
+
+       rmrru = kzalloc(sizeof(*rmrru), GFP_KERNEL);
+       if (!rmrru)
+               return -ENOMEM;
+
+       rmrru->hdr = header;
+       rmrr = (struct acpi_dmar_reserved_memory *)header;
+       rmrru->base_address = rmrr->base_address;
+       rmrru->end_address = rmrr->end_address;
+
+       dmar_register_rmrr_unit(rmrru);
+       return 0;
+}
+
+static int __init
+rmrr_parse_dev(struct dmar_rmrr_unit *rmrru)
+{
+       struct acpi_dmar_reserved_memory *rmrr;
+       int ret;
+
+       rmrr = (struct acpi_dmar_reserved_memory *) rmrru->hdr;
+       ret = dmar_parse_dev_scope((void *)(rmrr + 1),
+               ((void *)rmrr) + rmrr->header.length,
+               &rmrru->devices_cnt, &rmrru->devices, rmrr->segment);
+
+       if (ret || (rmrru->devices_cnt == 0)) {
+               list_del(&rmrru->list);
+               kfree(rmrru);
+       }
+       return ret;
+}
+
+static LIST_HEAD(dmar_atsr_units);
+
+static int __init dmar_parse_one_atsr(struct acpi_dmar_header *hdr)
+{
+       struct acpi_dmar_atsr *atsr;
+       struct dmar_atsr_unit *atsru;
+
+       atsr = container_of(hdr, struct acpi_dmar_atsr, header);
+       atsru = kzalloc(sizeof(*atsru), GFP_KERNEL);
+       if (!atsru)
+               return -ENOMEM;
+
+       atsru->hdr = hdr;
+       atsru->include_all = atsr->flags & 0x1;
+
+       list_add(&atsru->list, &dmar_atsr_units);
+
+       return 0;
+}
+
+static int __init atsr_parse_dev(struct dmar_atsr_unit *atsru)
+{
+       int rc;
+       struct acpi_dmar_atsr *atsr;
+
+       if (atsru->include_all)
+               return 0;
+
+       atsr = container_of(atsru->hdr, struct acpi_dmar_atsr, header);
+       rc = dmar_parse_dev_scope((void *)(atsr + 1),
+                               (void *)atsr + atsr->header.length,
+                               &atsru->devices_cnt, &atsru->devices,
+                               atsr->segment);
+       if (rc || !atsru->devices_cnt) {
+               list_del(&atsru->list);
+               kfree(atsru);
+       }
+
+       return rc;
+}
+
+int dmar_find_matched_atsr_unit(struct pci_dev *dev)
+{
+       int i;
+       struct pci_bus *bus;
+       struct acpi_dmar_atsr *atsr;
+       struct dmar_atsr_unit *atsru;
+
+       dev = pci_physfn(dev);
+
+       list_for_each_entry(atsru, &dmar_atsr_units, list) {
+               atsr = container_of(atsru->hdr, struct acpi_dmar_atsr, header);
+               if (atsr->segment == pci_domain_nr(dev->bus))
+                       goto found;
+       }
+
+       return 0;
+
+found:
+       for (bus = dev->bus; bus; bus = bus->parent) {
+               struct pci_dev *bridge = bus->self;
+
+               if (!bridge || !pci_is_pcie(bridge) ||
+                   bridge->pcie_type == PCI_EXP_TYPE_PCI_BRIDGE)
+                       return 0;
+
+               if (bridge->pcie_type == PCI_EXP_TYPE_ROOT_PORT) {
+                       for (i = 0; i < atsru->devices_cnt; i++)
+                               if (atsru->devices[i] == bridge)
+                                       return 1;
+                       break;
+               }
+       }
+
+       if (atsru->include_all)
+               return 1;
+
+       return 0;
+}
+#endif
+
+#ifdef CONFIG_ACPI_NUMA
+static int __init
+dmar_parse_one_rhsa(struct acpi_dmar_header *header)
+{
+       struct acpi_dmar_rhsa *rhsa;
+       struct dmar_drhd_unit *drhd;
+
+       rhsa = (struct acpi_dmar_rhsa *)header;
+       for_each_drhd_unit(drhd) {
+               if (drhd->reg_base_addr == rhsa->base_address) {
+                       int node = acpi_map_pxm_to_node(rhsa->proximity_domain);
+
+                       if (!node_online(node))
+                               node = -1;
+                       drhd->iommu->node = node;
+                       return 0;
+               }
+       }
+       WARN_TAINT(
+               1, TAINT_FIRMWARE_WORKAROUND,
+               "Your BIOS is broken; RHSA refers to non-existent DMAR unit at %llx\n"
+               "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
+               drhd->reg_base_addr,
+               dmi_get_system_info(DMI_BIOS_VENDOR),
+               dmi_get_system_info(DMI_BIOS_VERSION),
+               dmi_get_system_info(DMI_PRODUCT_VERSION));
+
+       return 0;
+}
+#endif
+
+static void __init
+dmar_table_print_dmar_entry(struct acpi_dmar_header *header)
+{
+       struct acpi_dmar_hardware_unit *drhd;
+       struct acpi_dmar_reserved_memory *rmrr;
+       struct acpi_dmar_atsr *atsr;
+       struct acpi_dmar_rhsa *rhsa;
+
+       switch (header->type) {
+       case ACPI_DMAR_TYPE_HARDWARE_UNIT:
+               drhd = container_of(header, struct acpi_dmar_hardware_unit,
+                                   header);
+               printk (KERN_INFO PREFIX
+                       "DRHD base: %#016Lx flags: %#x\n",
+                       (unsigned long long)drhd->address, drhd->flags);
+               break;
+       case ACPI_DMAR_TYPE_RESERVED_MEMORY:
+               rmrr = container_of(header, struct acpi_dmar_reserved_memory,
+                                   header);
+               printk (KERN_INFO PREFIX
+                       "RMRR base: %#016Lx end: %#016Lx\n",
+                       (unsigned long long)rmrr->base_address,
+                       (unsigned long long)rmrr->end_address);
+               break;
+       case ACPI_DMAR_TYPE_ATSR:
+               atsr = container_of(header, struct acpi_dmar_atsr, header);
+               printk(KERN_INFO PREFIX "ATSR flags: %#x\n", atsr->flags);
+               break;
+       case ACPI_DMAR_HARDWARE_AFFINITY:
+               rhsa = container_of(header, struct acpi_dmar_rhsa, header);
+               printk(KERN_INFO PREFIX "RHSA base: %#016Lx proximity domain: %#x\n",
+                      (unsigned long long)rhsa->base_address,
+                      rhsa->proximity_domain);
+               break;
+       }
+}
+
+/**
+ * dmar_table_detect - checks to see if the platform supports DMAR devices
+ */
+static int __init dmar_table_detect(void)
+{
+       acpi_status status = AE_OK;
+
+       /* if we could find DMAR table, then there are DMAR devices */
+       status = acpi_get_table_with_size(ACPI_SIG_DMAR, 0,
+                               (struct acpi_table_header **)&dmar_tbl,
+                               &dmar_tbl_size);
+
+       if (ACPI_SUCCESS(status) && !dmar_tbl) {
+               printk (KERN_WARNING PREFIX "Unable to map DMAR\n");
+               status = AE_NOT_FOUND;
+       }
+
+       return (ACPI_SUCCESS(status) ? 1 : 0);
+}
+
+/**
+ * parse_dmar_table - parses the DMA reporting table
+ */
+static int __init
+parse_dmar_table(void)
+{
+       struct acpi_table_dmar *dmar;
+       struct acpi_dmar_header *entry_header;
+       int ret = 0;
+
+       /*
+        * Do it again, earlier dmar_tbl mapping could be mapped with
+        * fixed map.
+        */
+       dmar_table_detect();
+
+       /*
+        * ACPI tables may not be DMA protected by tboot, so use DMAR copy
+        * SINIT saved in SinitMleData in TXT heap (which is DMA protected)
+        */
+       dmar_tbl = tboot_get_dmar_table(dmar_tbl);
+
+       dmar = (struct acpi_table_dmar *)dmar_tbl;
+       if (!dmar)
+               return -ENODEV;
+
+       if (dmar->width < PAGE_SHIFT - 1) {
+               printk(KERN_WARNING PREFIX "Invalid DMAR haw\n");
+               return -EINVAL;
+       }
+
+       printk (KERN_INFO PREFIX "Host address width %d\n",
+               dmar->width + 1);
+
+       entry_header = (struct acpi_dmar_header *)(dmar + 1);
+       while (((unsigned long)entry_header) <
+                       (((unsigned long)dmar) + dmar_tbl->length)) {
+               /* Avoid looping forever on bad ACPI tables */
+               if (entry_header->length == 0) {
+                       printk(KERN_WARNING PREFIX
+                               "Invalid 0-length structure\n");
+                       ret = -EINVAL;
+                       break;
+               }
+
+               dmar_table_print_dmar_entry(entry_header);
+
+               switch (entry_header->type) {
+               case ACPI_DMAR_TYPE_HARDWARE_UNIT:
+                       ret = dmar_parse_one_drhd(entry_header);
+                       break;
+               case ACPI_DMAR_TYPE_RESERVED_MEMORY:
+#ifdef CONFIG_DMAR
+                       ret = dmar_parse_one_rmrr(entry_header);
+#endif
+                       break;
+               case ACPI_DMAR_TYPE_ATSR:
+#ifdef CONFIG_DMAR
+                       ret = dmar_parse_one_atsr(entry_header);
+#endif
+                       break;
+               case ACPI_DMAR_HARDWARE_AFFINITY:
+#ifdef CONFIG_ACPI_NUMA
+                       ret = dmar_parse_one_rhsa(entry_header);
+#endif
+                       break;
+               default:
+                       printk(KERN_WARNING PREFIX
+                               "Unknown DMAR structure type %d\n",
+                               entry_header->type);
+                       ret = 0; /* for forward compatibility */
+                       break;
+               }
+               if (ret)
+                       break;
+
+               entry_header = ((void *)entry_header + entry_header->length);
+       }
+       return ret;
+}
+
+static int dmar_pci_device_match(struct pci_dev *devices[], int cnt,
+                         struct pci_dev *dev)
+{
+       int index;
+
+       while (dev) {
+               for (index = 0; index < cnt; index++)
+                       if (dev == devices[index])
+                               return 1;
+
+               /* Check our parent */
+               dev = dev->bus->self;
+       }
+
+       return 0;
+}
+
+struct dmar_drhd_unit *
+dmar_find_matched_drhd_unit(struct pci_dev *dev)
+{
+       struct dmar_drhd_unit *dmaru = NULL;
+       struct acpi_dmar_hardware_unit *drhd;
+
+       dev = pci_physfn(dev);
+
+       list_for_each_entry(dmaru, &dmar_drhd_units, list) {
+               drhd = container_of(dmaru->hdr,
+                                   struct acpi_dmar_hardware_unit,
+                                   header);
+
+               if (dmaru->include_all &&
+                   drhd->segment == pci_domain_nr(dev->bus))
+                       return dmaru;
+
+               if (dmar_pci_device_match(dmaru->devices,
+                                         dmaru->devices_cnt, dev))
+                       return dmaru;
+       }
+
+       return NULL;
+}
+
+int __init dmar_dev_scope_init(void)
+{
+       struct dmar_drhd_unit *drhd, *drhd_n;
+       int ret = -ENODEV;
+
+       list_for_each_entry_safe(drhd, drhd_n, &dmar_drhd_units, list) {
+               ret = dmar_parse_dev(drhd);
+               if (ret)
+                       return ret;
+       }
+
+#ifdef CONFIG_DMAR
+       {
+               struct dmar_rmrr_unit *rmrr, *rmrr_n;
+               struct dmar_atsr_unit *atsr, *atsr_n;
+
+               list_for_each_entry_safe(rmrr, rmrr_n, &dmar_rmrr_units, list) {
+                       ret = rmrr_parse_dev(rmrr);
+                       if (ret)
+                               return ret;
+               }
+
+               list_for_each_entry_safe(atsr, atsr_n, &dmar_atsr_units, list) {
+                       ret = atsr_parse_dev(atsr);
+                       if (ret)
+                               return ret;
+               }
+       }
+#endif
+
+       return ret;
+}
+
+
+int __init dmar_table_init(void)
+{
+       static int dmar_table_initialized;
+       int ret;
+
+       if (dmar_table_initialized)
+               return 0;
+
+       dmar_table_initialized = 1;
+
+       ret = parse_dmar_table();
+       if (ret) {
+               if (ret != -ENODEV)
+                       printk(KERN_INFO PREFIX "parse DMAR table failure.\n");
+               return ret;
+       }
+
+       if (list_empty(&dmar_drhd_units)) {
+               printk(KERN_INFO PREFIX "No DMAR devices found\n");
+               return -ENODEV;
+       }
+
+#ifdef CONFIG_DMAR
+       if (list_empty(&dmar_rmrr_units))
+               printk(KERN_INFO PREFIX "No RMRR found\n");
+
+       if (list_empty(&dmar_atsr_units))
+               printk(KERN_INFO PREFIX "No ATSR found\n");
+#endif
+
+       return 0;
+}
+
+static void warn_invalid_dmar(u64 addr, const char *message)
+{
+       WARN_TAINT_ONCE(
+               1, TAINT_FIRMWARE_WORKAROUND,
+               "Your BIOS is broken; DMAR reported at address %llx%s!\n"
+               "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
+               addr, message,
+               dmi_get_system_info(DMI_BIOS_VENDOR),
+               dmi_get_system_info(DMI_BIOS_VERSION),
+               dmi_get_system_info(DMI_PRODUCT_VERSION));
+}
+
+int __init check_zero_address(void)
+{
+       struct acpi_table_dmar *dmar;
+       struct acpi_dmar_header *entry_header;
+       struct acpi_dmar_hardware_unit *drhd;
+
+       dmar = (struct acpi_table_dmar *)dmar_tbl;
+       entry_header = (struct acpi_dmar_header *)(dmar + 1);
+
+       while (((unsigned long)entry_header) <
+                       (((unsigned long)dmar) + dmar_tbl->length)) {
+               /* Avoid looping forever on bad ACPI tables */
+               if (entry_header->length == 0) {
+                       printk(KERN_WARNING PREFIX
+                               "Invalid 0-length structure\n");
+                       return 0;
+               }
+
+               if (entry_header->type == ACPI_DMAR_TYPE_HARDWARE_UNIT) {
+                       void __iomem *addr;
+                       u64 cap, ecap;
+
+                       drhd = (void *)entry_header;
+                       if (!drhd->address) {
+                               warn_invalid_dmar(0, "");
+                               goto failed;
+                       }
+
+                       addr = early_ioremap(drhd->address, VTD_PAGE_SIZE);
+                       if (!addr ) {
+                               printk("IOMMU: can't validate: %llx\n", drhd->address);
+                               goto failed;
+                       }
+                       cap = dmar_readq(addr + DMAR_CAP_REG);
+                       ecap = dmar_readq(addr + DMAR_ECAP_REG);
+                       early_iounmap(addr, VTD_PAGE_SIZE);
+                       if (cap == (uint64_t)-1 && ecap == (uint64_t)-1) {
+                               warn_invalid_dmar(drhd->address,
+                                                 " returns all ones");
+                               goto failed;
+                       }
+               }
+
+               entry_header = ((void *)entry_header + entry_header->length);
+       }
+       return 1;
+
+failed:
+#ifdef CONFIG_DMAR
+       dmar_disabled = 1;
+#endif
+       return 0;
+}
+
+int __init detect_intel_iommu(void)
+{
+       int ret;
+
+       ret = dmar_table_detect();
+       if (ret)
+               ret = check_zero_address();
+       {
+#ifdef CONFIG_INTR_REMAP
+               struct acpi_table_dmar *dmar;
+
+               dmar = (struct acpi_table_dmar *) dmar_tbl;
+               if (ret && cpu_has_x2apic && dmar->flags & 0x1)
+                       printk(KERN_INFO
+                              "Queued invalidation will be enabled to support "
+                              "x2apic and Intr-remapping.\n");
+#endif
+#ifdef CONFIG_DMAR
+               if (ret && !no_iommu && !iommu_detected && !dmar_disabled) {
+                       iommu_detected = 1;
+                       /* Make sure ACS will be enabled */
+                       pci_request_acs();
+               }
+#endif
+#ifdef CONFIG_X86
+               if (ret)
+                       x86_init.iommu.iommu_init = intel_iommu_init;
+#endif
+       }
+       early_acpi_os_unmap_memory(dmar_tbl, dmar_tbl_size);
+       dmar_tbl = NULL;
+
+       return ret ? 1 : -ENODEV;
+}
+
+
+int alloc_iommu(struct dmar_drhd_unit *drhd)
+{
+       struct intel_iommu *iommu;
+       int map_size;
+       u32 ver;
+       static int iommu_allocated = 0;
+       int agaw = 0;
+       int msagaw = 0;
+
+       if (!drhd->reg_base_addr) {
+               warn_invalid_dmar(0, "");
+               return -EINVAL;
+       }
+
+       iommu = kzalloc(sizeof(*iommu), GFP_KERNEL);
+       if (!iommu)
+               return -ENOMEM;
+
+       iommu->seq_id = iommu_allocated++;
+       sprintf (iommu->name, "dmar%d", iommu->seq_id);
+
+       iommu->reg = ioremap(drhd->reg_base_addr, VTD_PAGE_SIZE);
+       if (!iommu->reg) {
+               printk(KERN_ERR "IOMMU: can't map the region\n");
+               goto error;
+       }
+       iommu->cap = dmar_readq(iommu->reg + DMAR_CAP_REG);
+       iommu->ecap = dmar_readq(iommu->reg + DMAR_ECAP_REG);
+
+       if (iommu->cap == (uint64_t)-1 && iommu->ecap == (uint64_t)-1) {
+               warn_invalid_dmar(drhd->reg_base_addr, " returns all ones");
+               goto err_unmap;
+       }
+
+#ifdef CONFIG_DMAR
+       agaw = iommu_calculate_agaw(iommu);
+       if (agaw < 0) {
+               printk(KERN_ERR
+                      "Cannot get a valid agaw for iommu (seq_id = %d)\n",
+                      iommu->seq_id);
+               goto err_unmap;
+       }
+       msagaw = iommu_calculate_max_sagaw(iommu);
+       if (msagaw < 0) {
+               printk(KERN_ERR
+                       "Cannot get a valid max agaw for iommu (seq_id = %d)\n",
+                       iommu->seq_id);
+               goto err_unmap;
+       }
+#endif
+       iommu->agaw = agaw;
+       iommu->msagaw = msagaw;
+
+       iommu->node = -1;
+
+       /* the registers might be more than one page */
+       map_size = max_t(int, ecap_max_iotlb_offset(iommu->ecap),
+               cap_max_fault_reg_offset(iommu->cap));
+       map_size = VTD_PAGE_ALIGN(map_size);
+       if (map_size > VTD_PAGE_SIZE) {
+               iounmap(iommu->reg);
+               iommu->reg = ioremap(drhd->reg_base_addr, map_size);
+               if (!iommu->reg) {
+                       printk(KERN_ERR "IOMMU: can't map the region\n");
+                       goto error;
+               }
+       }
+
+       ver = readl(iommu->reg + DMAR_VER_REG);
+       pr_info("IOMMU %d: reg_base_addr %llx ver %d:%d cap %llx ecap %llx\n",
+               iommu->seq_id,
+               (unsigned long long)drhd->reg_base_addr,
+               DMAR_VER_MAJOR(ver), DMAR_VER_MINOR(ver),
+               (unsigned long long)iommu->cap,
+               (unsigned long long)iommu->ecap);
+
+       spin_lock_init(&iommu->register_lock);
+
+       drhd->iommu = iommu;
+       return 0;
+
+ err_unmap:
+       iounmap(iommu->reg);
+ error:
+       kfree(iommu);
+       return -1;
+}
+
+void free_iommu(struct intel_iommu *iommu)
+{
+       if (!iommu)
+               return;
+
+#ifdef CONFIG_DMAR
+       free_dmar_iommu(iommu);
+#endif
+
+       if (iommu->reg)
+               iounmap(iommu->reg);
+       kfree(iommu);
+}
+
+/*
+ * Reclaim all the submitted descriptors which have completed its work.
+ */
+static inline void reclaim_free_desc(struct q_inval *qi)
+{
+       while (qi->desc_status[qi->free_tail] == QI_DONE ||
+              qi->desc_status[qi->free_tail] == QI_ABORT) {
+               qi->desc_status[qi->free_tail] = QI_FREE;
+               qi->free_tail = (qi->free_tail + 1) % QI_LENGTH;
+               qi->free_cnt++;
+       }
+}
+
+static int qi_check_fault(struct intel_iommu *iommu, int index)
+{
+       u32 fault;
+       int head, tail;
+       struct q_inval *qi = iommu->qi;
+       int wait_index = (index + 1) % QI_LENGTH;
+
+       if (qi->desc_status[wait_index] == QI_ABORT)
+               return -EAGAIN;
+
+       fault = readl(iommu->reg + DMAR_FSTS_REG);
+
+       /*
+        * If IQE happens, the head points to the descriptor associated
+        * with the error. No new descriptors are fetched until the IQE
+        * is cleared.
+        */
+       if (fault & DMA_FSTS_IQE) {
+               head = readl(iommu->reg + DMAR_IQH_REG);
+               if ((head >> DMAR_IQ_SHIFT) == index) {
+                       printk(KERN_ERR "VT-d detected invalid descriptor: "
+                               "low=%llx, high=%llx\n",
+                               (unsigned long long)qi->desc[index].low,
+                               (unsigned long long)qi->desc[index].high);
+                       memcpy(&qi->desc[index], &qi->desc[wait_index],
+                                       sizeof(struct qi_desc));
+                       __iommu_flush_cache(iommu, &qi->desc[index],
+                                       sizeof(struct qi_desc));
+                       writel(DMA_FSTS_IQE, iommu->reg + DMAR_FSTS_REG);
+                       return -EINVAL;
+               }
+       }
+
+       /*
+        * If ITE happens, all pending wait_desc commands are aborted.
+        * No new descriptors are fetched until the ITE is cleared.
+        */
+       if (fault & DMA_FSTS_ITE) {
+               head = readl(iommu->reg + DMAR_IQH_REG);
+               head = ((head >> DMAR_IQ_SHIFT) - 1 + QI_LENGTH) % QI_LENGTH;
+               head |= 1;
+               tail = readl(iommu->reg + DMAR_IQT_REG);
+               tail = ((tail >> DMAR_IQ_SHIFT) - 1 + QI_LENGTH) % QI_LENGTH;
+
+               writel(DMA_FSTS_ITE, iommu->reg + DMAR_FSTS_REG);
+
+               do {
+                       if (qi->desc_status[head] == QI_IN_USE)
+                               qi->desc_status[head] = QI_ABORT;
+                       head = (head - 2 + QI_LENGTH) % QI_LENGTH;
+               } while (head != tail);
+
+               if (qi->desc_status[wait_index] == QI_ABORT)
+                       return -EAGAIN;
+       }
+
+       if (fault & DMA_FSTS_ICE)
+               writel(DMA_FSTS_ICE, iommu->reg + DMAR_FSTS_REG);
+
+       return 0;
+}
+
+/*
+ * Submit the queued invalidation descriptor to the remapping
+ * hardware unit and wait for its completion.
+ */
+int qi_submit_sync(struct qi_desc *desc, struct intel_iommu *iommu)
+{
+       int rc;
+       struct q_inval *qi = iommu->qi;
+       struct qi_desc *hw, wait_desc;
+       int wait_index, index;
+       unsigned long flags;
+
+       if (!qi)
+               return 0;
+
+       hw = qi->desc;
+
+restart:
+       rc = 0;
+
+       spin_lock_irqsave(&qi->q_lock, flags);
+       while (qi->free_cnt < 3) {
+               spin_unlock_irqrestore(&qi->q_lock, flags);
+               cpu_relax();
+               spin_lock_irqsave(&qi->q_lock, flags);
+       }
+
+       index = qi->free_head;
+       wait_index = (index + 1) % QI_LENGTH;
+
+       qi->desc_status[index] = qi->desc_status[wait_index] = QI_IN_USE;
+
+       hw[index] = *desc;
+
+       wait_desc.low = QI_IWD_STATUS_DATA(QI_DONE) |
+                       QI_IWD_STATUS_WRITE | QI_IWD_TYPE;
+       wait_desc.high = virt_to_phys(&qi->desc_status[wait_index]);
+
+       hw[wait_index] = wait_desc;
+
+       __iommu_flush_cache(iommu, &hw[index], sizeof(struct qi_desc));
+       __iommu_flush_cache(iommu, &hw[wait_index], sizeof(struct qi_desc));
+
+       qi->free_head = (qi->free_head + 2) % QI_LENGTH;
+       qi->free_cnt -= 2;
+
+       /*
+        * update the HW tail register indicating the presence of
+        * new descriptors.
+        */
+       writel(qi->free_head << DMAR_IQ_SHIFT, iommu->reg + DMAR_IQT_REG);
+
+       while (qi->desc_status[wait_index] != QI_DONE) {
+               /*
+                * We will leave the interrupts disabled, to prevent interrupt
+                * context to queue another cmd while a cmd is already submitted
+                * and waiting for completion on this cpu. This is to avoid
+                * a deadlock where the interrupt context can wait indefinitely
+                * for free slots in the queue.
+                */
+               rc = qi_check_fault(iommu, index);
+               if (rc)
+                       break;
+
+               spin_unlock(&qi->q_lock);
+               cpu_relax();
+               spin_lock(&qi->q_lock);
+       }
+
+       qi->desc_status[index] = QI_DONE;
+
+       reclaim_free_desc(qi);
+       spin_unlock_irqrestore(&qi->q_lock, flags);
+
+       if (rc == -EAGAIN)
+               goto restart;
+
+       return rc;
+}
+
+/*
+ * Flush the global interrupt entry cache.
+ */
+void qi_global_iec(struct intel_iommu *iommu)
+{
+       struct qi_desc desc;
+
+       desc.low = QI_IEC_TYPE;
+       desc.high = 0;
+
+       /* should never fail */
+       qi_submit_sync(&desc, iommu);
+}
+
+void qi_flush_context(struct intel_iommu *iommu, u16 did, u16 sid, u8 fm,
+                     u64 type)
+{
+       struct qi_desc desc;
+
+       desc.low = QI_CC_FM(fm) | QI_CC_SID(sid) | QI_CC_DID(did)
+                       | QI_CC_GRAN(type) | QI_CC_TYPE;
+       desc.high = 0;
+
+       qi_submit_sync(&desc, iommu);
+}
+
+void qi_flush_iotlb(struct intel_iommu *iommu, u16 did, u64 addr,
+                   unsigned int size_order, u64 type)
+{
+       u8 dw = 0, dr = 0;
+
+       struct qi_desc desc;
+       int ih = 0;
+
+       if (cap_write_drain(iommu->cap))
+               dw = 1;
+
+       if (cap_read_drain(iommu->cap))
+               dr = 1;
+
+       desc.low = QI_IOTLB_DID(did) | QI_IOTLB_DR(dr) | QI_IOTLB_DW(dw)
+               | QI_IOTLB_GRAN(type) | QI_IOTLB_TYPE;
+       desc.high = QI_IOTLB_ADDR(addr) | QI_IOTLB_IH(ih)
+               | QI_IOTLB_AM(size_order);
+
+       qi_submit_sync(&desc, iommu);
+}
+
+void qi_flush_dev_iotlb(struct intel_iommu *iommu, u16 sid, u16 qdep,
+                       u64 addr, unsigned mask)
+{
+       struct qi_desc desc;
+
+       if (mask) {
+               BUG_ON(addr & ((1 << (VTD_PAGE_SHIFT + mask)) - 1));
+               addr |= (1 << (VTD_PAGE_SHIFT + mask - 1)) - 1;
+               desc.high = QI_DEV_IOTLB_ADDR(addr) | QI_DEV_IOTLB_SIZE;
+       } else
+               desc.high = QI_DEV_IOTLB_ADDR(addr);
+
+       if (qdep >= QI_DEV_IOTLB_MAX_INVS)
+               qdep = 0;
+
+       desc.low = QI_DEV_IOTLB_SID(sid) | QI_DEV_IOTLB_QDEP(qdep) |
+                  QI_DIOTLB_TYPE;
+
+       qi_submit_sync(&desc, iommu);
+}
+
+/*
+ * Disable Queued Invalidation interface.
+ */
+void dmar_disable_qi(struct intel_iommu *iommu)
+{
+       unsigned long flags;
+       u32 sts;
+       cycles_t start_time = get_cycles();
+
+       if (!ecap_qis(iommu->ecap))
+               return;
+
+       spin_lock_irqsave(&iommu->register_lock, flags);
+
+       sts =  dmar_readq(iommu->reg + DMAR_GSTS_REG);
+       if (!(sts & DMA_GSTS_QIES))
+               goto end;
+
+       /*
+        * Give a chance to HW to complete the pending invalidation requests.
+        */
+       while ((readl(iommu->reg + DMAR_IQT_REG) !=
+               readl(iommu->reg + DMAR_IQH_REG)) &&
+               (DMAR_OPERATION_TIMEOUT > (get_cycles() - start_time)))
+               cpu_relax();
+
+       iommu->gcmd &= ~DMA_GCMD_QIE;
+       writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
+
+       IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, readl,
+                     !(sts & DMA_GSTS_QIES), sts);
+end:
+       spin_unlock_irqrestore(&iommu->register_lock, flags);
+}
+
+/*
+ * Enable queued invalidation.
+ */
+static void __dmar_enable_qi(struct intel_iommu *iommu)
+{
+       u32 sts;
+       unsigned long flags;
+       struct q_inval *qi = iommu->qi;
+
+       qi->free_head = qi->free_tail = 0;
+       qi->free_cnt = QI_LENGTH;
+
+       spin_lock_irqsave(&iommu->register_lock, flags);
+
+       /* write zero to the tail reg */
+       writel(0, iommu->reg + DMAR_IQT_REG);
+
+       dmar_writeq(iommu->reg + DMAR_IQA_REG, virt_to_phys(qi->desc));
+
+       iommu->gcmd |= DMA_GCMD_QIE;
+       writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
+
+       /* Make sure hardware complete it */
+       IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, readl, (sts & DMA_GSTS_QIES), sts);
+
+       spin_unlock_irqrestore(&iommu->register_lock, flags);
+}
+
+/*
+ * Enable Queued Invalidation interface. This is a must to support
+ * interrupt-remapping. Also used by DMA-remapping, which replaces
+ * register based IOTLB invalidation.
+ */
+int dmar_enable_qi(struct intel_iommu *iommu)
+{
+       struct q_inval *qi;
+       struct page *desc_page;
+
+       if (!ecap_qis(iommu->ecap))
+               return -ENOENT;
+
+       /*
+        * queued invalidation is already setup and enabled.
+        */
+       if (iommu->qi)
+               return 0;
+
+       iommu->qi = kmalloc(sizeof(*qi), GFP_ATOMIC);
+       if (!iommu->qi)
+               return -ENOMEM;
+
+       qi = iommu->qi;
+
+
+       desc_page = alloc_pages_node(iommu->node, GFP_ATOMIC | __GFP_ZERO, 0);
+       if (!desc_page) {
+               kfree(qi);
+               iommu->qi = 0;
+               return -ENOMEM;
+       }
+
+       qi->desc = page_address(desc_page);
+
+       qi->desc_status = kmalloc(QI_LENGTH * sizeof(int), GFP_ATOMIC);
+       if (!qi->desc_status) {
+               free_page((unsigned long) qi->desc);
+               kfree(qi);
+               iommu->qi = 0;
+               return -ENOMEM;
+       }
+
+       qi->free_head = qi->free_tail = 0;
+       qi->free_cnt = QI_LENGTH;
+
+       spin_lock_init(&qi->q_lock);
+
+       __dmar_enable_qi(iommu);
+
+       return 0;
+}
+
+/* iommu interrupt handling. Most stuff are MSI-like. */
+
+enum faulttype {
+       DMA_REMAP,
+       INTR_REMAP,
+       UNKNOWN,
+};
+
+static const char *dma_remap_fault_reasons[] =
+{
+       "Software",
+       "Present bit in root entry is clear",
+       "Present bit in context entry is clear",
+       "Invalid context entry",
+       "Access beyond MGAW",
+       "PTE Write access is not set",
+       "PTE Read access is not set",
+       "Next page table ptr is invalid",
+       "Root table address invalid",
+       "Context table ptr is invalid",
+       "non-zero reserved fields in RTP",
+       "non-zero reserved fields in CTP",
+       "non-zero reserved fields in PTE",
+};
+
+static const char *intr_remap_fault_reasons[] =
+{
+       "Detected reserved fields in the decoded interrupt-remapped request",
+       "Interrupt index exceeded the interrupt-remapping table size",
+       "Present field in the IRTE entry is clear",
+       "Error accessing interrupt-remapping table pointed by IRTA_REG",
+       "Detected reserved fields in the IRTE entry",
+       "Blocked a compatibility format interrupt request",
+       "Blocked an interrupt request due to source-id verification failure",
+};
+
+#define MAX_FAULT_REASON_IDX   (ARRAY_SIZE(fault_reason_strings) - 1)
+
+const char *dmar_get_fault_reason(u8 fault_reason, int *fault_type)
+{
+       if (fault_reason >= 0x20 && (fault_reason <= 0x20 +
+                                    ARRAY_SIZE(intr_remap_fault_reasons))) {
+               *fault_type = INTR_REMAP;
+               return intr_remap_fault_reasons[fault_reason - 0x20];
+       } else if (fault_reason < ARRAY_SIZE(dma_remap_fault_reasons)) {
+               *fault_type = DMA_REMAP;
+               return dma_remap_fault_reasons[fault_reason];
+       } else {
+               *fault_type = UNKNOWN;
+               return "Unknown";
+       }
+}
+
+void dmar_msi_unmask(struct irq_data *data)
+{
+       struct intel_iommu *iommu = irq_data_get_irq_handler_data(data);
+       unsigned long flag;
+
+       /* unmask it */
+       spin_lock_irqsave(&iommu->register_lock, flag);
+       writel(0, iommu->reg + DMAR_FECTL_REG);
+       /* Read a reg to force flush the post write */
+       readl(iommu->reg + DMAR_FECTL_REG);
+       spin_unlock_irqrestore(&iommu->register_lock, flag);
+}
+
+void dmar_msi_mask(struct irq_data *data)
+{
+       unsigned long flag;
+       struct intel_iommu *iommu = irq_data_get_irq_handler_data(data);
+
+       /* mask it */
+       spin_lock_irqsave(&iommu->register_lock, flag);
+       writel(DMA_FECTL_IM, iommu->reg + DMAR_FECTL_REG);
+       /* Read a reg to force flush the post write */
+       readl(iommu->reg + DMAR_FECTL_REG);
+       spin_unlock_irqrestore(&iommu->register_lock, flag);
+}
+
+void dmar_msi_write(int irq, struct msi_msg *msg)
+{
+       struct intel_iommu *iommu = irq_get_handler_data(irq);
+       unsigned long flag;
+
+       spin_lock_irqsave(&iommu->register_lock, flag);
+       writel(msg->data, iommu->reg + DMAR_FEDATA_REG);
+       writel(msg->address_lo, iommu->reg + DMAR_FEADDR_REG);
+       writel(msg->address_hi, iommu->reg + DMAR_FEUADDR_REG);
+       spin_unlock_irqrestore(&iommu->register_lock, flag);
+}
+
+void dmar_msi_read(int irq, struct msi_msg *msg)
+{
+       struct intel_iommu *iommu = irq_get_handler_data(irq);
+       unsigned long flag;
+
+       spin_lock_irqsave(&iommu->register_lock, flag);
+       msg->data = readl(iommu->reg + DMAR_FEDATA_REG);
+       msg->address_lo = readl(iommu->reg + DMAR_FEADDR_REG);
+       msg->address_hi = readl(iommu->reg + DMAR_FEUADDR_REG);
+       spin_unlock_irqrestore(&iommu->register_lock, flag);
+}
+
+static int dmar_fault_do_one(struct intel_iommu *iommu, int type,
+               u8 fault_reason, u16 source_id, unsigned long long addr)
+{
+       const char *reason;
+       int fault_type;
+
+       reason = dmar_get_fault_reason(fault_reason, &fault_type);
+
+       if (fault_type == INTR_REMAP)
+               printk(KERN_ERR "INTR-REMAP: Request device [[%02x:%02x.%d] "
+                      "fault index %llx\n"
+                       "INTR-REMAP:[fault reason %02d] %s\n",
+                       (source_id >> 8), PCI_SLOT(source_id & 0xFF),
+                       PCI_FUNC(source_id & 0xFF), addr >> 48,
+                       fault_reason, reason);
+       else
+               printk(KERN_ERR
+                      "DMAR:[%s] Request device [%02x:%02x.%d] "
+                      "fault addr %llx \n"
+                      "DMAR:[fault reason %02d] %s\n",
+                      (type ? "DMA Read" : "DMA Write"),
+                      (source_id >> 8), PCI_SLOT(source_id & 0xFF),
+                      PCI_FUNC(source_id & 0xFF), addr, fault_reason, reason);
+       return 0;
+}
+
+#define PRIMARY_FAULT_REG_LEN (16)
+irqreturn_t dmar_fault(int irq, void *dev_id)
+{
+       struct intel_iommu *iommu = dev_id;
+       int reg, fault_index;
+       u32 fault_status;
+       unsigned long flag;
+
+       spin_lock_irqsave(&iommu->register_lock, flag);
+       fault_status = readl(iommu->reg + DMAR_FSTS_REG);
+       if (fault_status)
+               printk(KERN_ERR "DRHD: handling fault status reg %x\n",
+                      fault_status);
+
+       /* TBD: ignore advanced fault log currently */
+       if (!(fault_status & DMA_FSTS_PPF))
+               goto clear_rest;
+
+       fault_index = dma_fsts_fault_record_index(fault_status);
+       reg = cap_fault_reg_offset(iommu->cap);
+       while (1) {
+               u8 fault_reason;
+               u16 source_id;
+               u64 guest_addr;
+               int type;
+               u32 data;
+
+               /* highest 32 bits */
+               data = readl(iommu->reg + reg +
+                               fault_index * PRIMARY_FAULT_REG_LEN + 12);
+               if (!(data & DMA_FRCD_F))
+                       break;
+
+               fault_reason = dma_frcd_fault_reason(data);
+               type = dma_frcd_type(data);
+
+               data = readl(iommu->reg + reg +
+                               fault_index * PRIMARY_FAULT_REG_LEN + 8);
+               source_id = dma_frcd_source_id(data);
+
+               guest_addr = dmar_readq(iommu->reg + reg +
+                               fault_index * PRIMARY_FAULT_REG_LEN);
+               guest_addr = dma_frcd_page_addr(guest_addr);
+               /* clear the fault */
+               writel(DMA_FRCD_F, iommu->reg + reg +
+                       fault_index * PRIMARY_FAULT_REG_LEN + 12);
+
+               spin_unlock_irqrestore(&iommu->register_lock, flag);
+
+               dmar_fault_do_one(iommu, type, fault_reason,
+                               source_id, guest_addr);
+
+               fault_index++;
+               if (fault_index >= cap_num_fault_regs(iommu->cap))
+                       fault_index = 0;
+               spin_lock_irqsave(&iommu->register_lock, flag);
+       }
+clear_rest:
+       /* clear all the other faults */
+       fault_status = readl(iommu->reg + DMAR_FSTS_REG);
+       writel(fault_status, iommu->reg + DMAR_FSTS_REG);
+
+       spin_unlock_irqrestore(&iommu->register_lock, flag);
+       return IRQ_HANDLED;
+}
+
+int dmar_set_interrupt(struct intel_iommu *iommu)
+{
+       int irq, ret;
+
+       /*
+        * Check if the fault interrupt is already initialized.
+        */
+       if (iommu->irq)
+               return 0;
+
+       irq = create_irq();
+       if (!irq) {
+               printk(KERN_ERR "IOMMU: no free vectors\n");
+               return -EINVAL;
+       }
+
+       irq_set_handler_data(irq, iommu);
+       iommu->irq = irq;
+
+       ret = arch_setup_dmar_msi(irq);
+       if (ret) {
+               irq_set_handler_data(irq, NULL);
+               iommu->irq = 0;
+               destroy_irq(irq);
+               return ret;
+       }
+
+       ret = request_irq(irq, dmar_fault, 0, iommu->name, iommu);
+       if (ret)
+               printk(KERN_ERR "IOMMU: can't request irq\n");
+       return ret;
+}
+
+int __init enable_drhd_fault_handling(void)
+{
+       struct dmar_drhd_unit *drhd;
+
+       /*
+        * Enable fault control interrupt.
+        */
+       for_each_drhd_unit(drhd) {
+               int ret;
+               struct intel_iommu *iommu = drhd->iommu;
+               ret = dmar_set_interrupt(iommu);
+
+               if (ret) {
+                       printk(KERN_ERR "DRHD %Lx: failed to enable fault, "
+                              " interrupt, ret %d\n",
+                              (unsigned long long)drhd->reg_base_addr, ret);
+                       return -1;
+               }
+
+               /*
+                * Clear any previous faults.
+                */
+               dmar_fault(iommu->irq, iommu);
+       }
+
+       return 0;
+}
+
+/*
+ * Re-enable Queued Invalidation interface.
+ */
+int dmar_reenable_qi(struct intel_iommu *iommu)
+{
+       if (!ecap_qis(iommu->ecap))
+               return -ENOENT;
+
+       if (!iommu->qi)
+               return -ENOENT;
+
+       /*
+        * First disable queued invalidation.
+        */
+       dmar_disable_qi(iommu);
+       /*
+        * Then enable queued invalidation again. Since there is no pending
+        * invalidation requests now, it's safe to re-enable queued
+        * invalidation.
+        */
+       __dmar_enable_qi(iommu);
+
+       return 0;
+}
+
+/*
+ * Check interrupt remapping support in DMAR table description.
+ */
+int __init dmar_ir_support(void)
+{
+       struct acpi_table_dmar *dmar;
+       dmar = (struct acpi_table_dmar *)dmar_tbl;
+       if (!dmar)
+               return 0;
+       return dmar->flags & 0x1;
+}
+IOMMU_INIT_POST(detect_intel_iommu);
diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
new file mode 100644 (file)
index 0000000..c621c98
--- /dev/null
@@ -0,0 +1,4016 @@
+/*
+ * Copyright (c) 2006, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
+ * Place - Suite 330, Boston, MA 02111-1307 USA.
+ *
+ * Copyright (C) 2006-2008 Intel Corporation
+ * Author: Ashok Raj <ashok.raj@intel.com>
+ * Author: Shaohua Li <shaohua.li@intel.com>
+ * Author: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
+ * Author: Fenghua Yu <fenghua.yu@intel.com>
+ */
+
+#include <linux/init.h>
+#include <linux/bitmap.h>
+#include <linux/debugfs.h>
+#include <linux/slab.h>
+#include <linux/irq.h>
+#include <linux/interrupt.h>
+#include <linux/spinlock.h>
+#include <linux/pci.h>
+#include <linux/dmar.h>
+#include <linux/dma-mapping.h>
+#include <linux/mempool.h>
+#include <linux/timer.h>
+#include <linux/iova.h>
+#include <linux/iommu.h>
+#include <linux/intel-iommu.h>
+#include <linux/syscore_ops.h>
+#include <linux/tboot.h>
+#include <linux/dmi.h>
+#include <linux/pci-ats.h>
+#include <asm/cacheflush.h>
+#include <asm/iommu.h>
+
+#define ROOT_SIZE              VTD_PAGE_SIZE
+#define CONTEXT_SIZE           VTD_PAGE_SIZE
+
+#define IS_BRIDGE_HOST_DEVICE(pdev) \
+                           ((pdev->class >> 8) == PCI_CLASS_BRIDGE_HOST)
+#define IS_GFX_DEVICE(pdev) ((pdev->class >> 16) == PCI_BASE_CLASS_DISPLAY)
+#define IS_ISA_DEVICE(pdev) ((pdev->class >> 8) == PCI_CLASS_BRIDGE_ISA)
+#define IS_AZALIA(pdev) ((pdev)->vendor == 0x8086 && (pdev)->device == 0x3a3e)
+
+#define IOAPIC_RANGE_START     (0xfee00000)
+#define IOAPIC_RANGE_END       (0xfeefffff)
+#define IOVA_START_ADDR                (0x1000)
+
+#define DEFAULT_DOMAIN_ADDRESS_WIDTH 48
+
+#define MAX_AGAW_WIDTH 64
+
+#define __DOMAIN_MAX_PFN(gaw)  ((((uint64_t)1) << (gaw-VTD_PAGE_SHIFT)) - 1)
+#define __DOMAIN_MAX_ADDR(gaw) ((((uint64_t)1) << gaw) - 1)
+
+/* We limit DOMAIN_MAX_PFN to fit in an unsigned long, and DOMAIN_MAX_ADDR
+   to match. That way, we can use 'unsigned long' for PFNs with impunity. */
+#define DOMAIN_MAX_PFN(gaw)    ((unsigned long) min_t(uint64_t, \
+                               __DOMAIN_MAX_PFN(gaw), (unsigned long)-1))
+#define DOMAIN_MAX_ADDR(gaw)   (((uint64_t)__DOMAIN_MAX_PFN(gaw)) << VTD_PAGE_SHIFT)
+
+#define IOVA_PFN(addr)         ((addr) >> PAGE_SHIFT)
+#define DMA_32BIT_PFN          IOVA_PFN(DMA_BIT_MASK(32))
+#define DMA_64BIT_PFN          IOVA_PFN(DMA_BIT_MASK(64))
+
+/* page table handling */
+#define LEVEL_STRIDE           (9)
+#define LEVEL_MASK             (((u64)1 << LEVEL_STRIDE) - 1)
+
+static inline int agaw_to_level(int agaw)
+{
+       return agaw + 2;
+}
+
+static inline int agaw_to_width(int agaw)
+{
+       return 30 + agaw * LEVEL_STRIDE;
+}
+
+static inline int width_to_agaw(int width)
+{
+       return (width - 30) / LEVEL_STRIDE;
+}
+
+static inline unsigned int level_to_offset_bits(int level)
+{
+       return (level - 1) * LEVEL_STRIDE;
+}
+
+static inline int pfn_level_offset(unsigned long pfn, int level)
+{
+       return (pfn >> level_to_offset_bits(level)) & LEVEL_MASK;
+}
+
+static inline unsigned long level_mask(int level)
+{
+       return -1UL << level_to_offset_bits(level);
+}
+
+static inline unsigned long level_size(int level)
+{
+       return 1UL << level_to_offset_bits(level);
+}
+
+static inline unsigned long align_to_level(unsigned long pfn, int level)
+{
+       return (pfn + level_size(level) - 1) & level_mask(level);
+}
+
+static inline unsigned long lvl_to_nr_pages(unsigned int lvl)
+{
+       return  1 << ((lvl - 1) * LEVEL_STRIDE);
+}
+
+/* VT-d pages must always be _smaller_ than MM pages. Otherwise things
+   are never going to work. */
+static inline unsigned long dma_to_mm_pfn(unsigned long dma_pfn)
+{
+       return dma_pfn >> (PAGE_SHIFT - VTD_PAGE_SHIFT);
+}
+
+static inline unsigned long mm_to_dma_pfn(unsigned long mm_pfn)
+{
+       return mm_pfn << (PAGE_SHIFT - VTD_PAGE_SHIFT);
+}
+static inline unsigned long page_to_dma_pfn(struct page *pg)
+{
+       return mm_to_dma_pfn(page_to_pfn(pg));
+}
+static inline unsigned long virt_to_dma_pfn(void *p)
+{
+       return page_to_dma_pfn(virt_to_page(p));
+}
+
+/* global iommu list, set NULL for ignored DMAR units */
+static struct intel_iommu **g_iommus;
+
+static void __init check_tylersburg_isoch(void);
+static int rwbf_quirk;
+
+/*
+ * set to 1 to panic kernel if can't successfully enable VT-d
+ * (used when kernel is launched w/ TXT)
+ */
+static int force_on = 0;
+
+/*
+ * 0: Present
+ * 1-11: Reserved
+ * 12-63: Context Ptr (12 - (haw-1))
+ * 64-127: Reserved
+ */
+struct root_entry {
+       u64     val;
+       u64     rsvd1;
+};
+#define ROOT_ENTRY_NR (VTD_PAGE_SIZE/sizeof(struct root_entry))
+static inline bool root_present(struct root_entry *root)
+{
+       return (root->val & 1);
+}
+static inline void set_root_present(struct root_entry *root)
+{
+       root->val |= 1;
+}
+static inline void set_root_value(struct root_entry *root, unsigned long value)
+{
+       root->val |= value & VTD_PAGE_MASK;
+}
+
+static inline struct context_entry *
+get_context_addr_from_root(struct root_entry *root)
+{
+       return (struct context_entry *)
+               (root_present(root)?phys_to_virt(
+               root->val & VTD_PAGE_MASK) :
+               NULL);
+}
+
+/*
+ * low 64 bits:
+ * 0: present
+ * 1: fault processing disable
+ * 2-3: translation type
+ * 12-63: address space root
+ * high 64 bits:
+ * 0-2: address width
+ * 3-6: aval
+ * 8-23: domain id
+ */
+struct context_entry {
+       u64 lo;
+       u64 hi;
+};
+
+static inline bool context_present(struct context_entry *context)
+{
+       return (context->lo & 1);
+}
+static inline void context_set_present(struct context_entry *context)
+{
+       context->lo |= 1;
+}
+
+static inline void context_set_fault_enable(struct context_entry *context)
+{
+       context->lo &= (((u64)-1) << 2) | 1;
+}
+
+static inline void context_set_translation_type(struct context_entry *context,
+                                               unsigned long value)
+{
+       context->lo &= (((u64)-1) << 4) | 3;
+       context->lo |= (value & 3) << 2;
+}
+
+static inline void context_set_address_root(struct context_entry *context,
+                                           unsigned long value)
+{
+       context->lo |= value & VTD_PAGE_MASK;
+}
+
+static inline void context_set_address_width(struct context_entry *context,
+                                            unsigned long value)
+{
+       context->hi |= value & 7;
+}
+
+static inline void context_set_domain_id(struct context_entry *context,
+                                        unsigned long value)
+{
+       context->hi |= (value & ((1 << 16) - 1)) << 8;
+}
+
+static inline void context_clear_entry(struct context_entry *context)
+{
+       context->lo = 0;
+       context->hi = 0;
+}
+
+/*
+ * 0: readable
+ * 1: writable
+ * 2-6: reserved
+ * 7: super page
+ * 8-10: available
+ * 11: snoop behavior
+ * 12-63: Host physcial address
+ */
+struct dma_pte {
+       u64 val;
+};
+
+static inline void dma_clear_pte(struct dma_pte *pte)
+{
+       pte->val = 0;
+}
+
+static inline void dma_set_pte_readable(struct dma_pte *pte)
+{
+       pte->val |= DMA_PTE_READ;
+}
+
+static inline void dma_set_pte_writable(struct dma_pte *pte)
+{
+       pte->val |= DMA_PTE_WRITE;
+}
+
+static inline void dma_set_pte_snp(struct dma_pte *pte)
+{
+       pte->val |= DMA_PTE_SNP;
+}
+
+static inline void dma_set_pte_prot(struct dma_pte *pte, unsigned long prot)
+{
+       pte->val = (pte->val & ~3) | (prot & 3);
+}
+
+static inline u64 dma_pte_addr(struct dma_pte *pte)
+{
+#ifdef CONFIG_64BIT
+       return pte->val & VTD_PAGE_MASK;
+#else
+       /* Must have a full atomic 64-bit read */
+       return  __cmpxchg64(&pte->val, 0ULL, 0ULL) & VTD_PAGE_MASK;
+#endif
+}
+
+static inline void dma_set_pte_pfn(struct dma_pte *pte, unsigned long pfn)
+{
+       pte->val |= (uint64_t)pfn << VTD_PAGE_SHIFT;
+}
+
+static inline bool dma_pte_present(struct dma_pte *pte)
+{
+       return (pte->val & 3) != 0;
+}
+
+static inline int first_pte_in_page(struct dma_pte *pte)
+{
+       return !((unsigned long)pte & ~VTD_PAGE_MASK);
+}
+
+/*
+ * This domain is a statically identity mapping domain.
+ *     1. This domain creats a static 1:1 mapping to all usable memory.
+ *     2. It maps to each iommu if successful.
+ *     3. Each iommu mapps to this domain if successful.
+ */
+static struct dmar_domain *si_domain;
+static int hw_pass_through = 1;
+
+/* devices under the same p2p bridge are owned in one domain */
+#define DOMAIN_FLAG_P2P_MULTIPLE_DEVICES (1 << 0)
+
+/* domain represents a virtual machine, more than one devices
+ * across iommus may be owned in one domain, e.g. kvm guest.
+ */
+#define DOMAIN_FLAG_VIRTUAL_MACHINE    (1 << 1)
+
+/* si_domain contains mulitple devices */
+#define DOMAIN_FLAG_STATIC_IDENTITY    (1 << 2)
+
+struct dmar_domain {
+       int     id;                     /* domain id */
+       int     nid;                    /* node id */
+       unsigned long iommu_bmp;        /* bitmap of iommus this domain uses*/
+
+       struct list_head devices;       /* all devices' list */
+       struct iova_domain iovad;       /* iova's that belong to this domain */
+
+       struct dma_pte  *pgd;           /* virtual address */
+       int             gaw;            /* max guest address width */
+
+       /* adjusted guest address width, 0 is level 2 30-bit */
+       int             agaw;
+
+       int             flags;          /* flags to find out type of domain */
+
+       int             iommu_coherency;/* indicate coherency of iommu access */
+       int             iommu_snooping; /* indicate snooping control feature*/
+       int             iommu_count;    /* reference count of iommu */
+       int             iommu_superpage;/* Level of superpages supported:
+                                          0 == 4KiB (no superpages), 1 == 2MiB,
+                                          2 == 1GiB, 3 == 512GiB, 4 == 1TiB */
+       spinlock_t      iommu_lock;     /* protect iommu set in domain */
+       u64             max_addr;       /* maximum mapped address */
+};
+
+/* PCI domain-device relationship */
+struct device_domain_info {
+       struct list_head link;  /* link to domain siblings */
+       struct list_head global; /* link to global list */
+       int segment;            /* PCI domain */
+       u8 bus;                 /* PCI bus number */
+       u8 devfn;               /* PCI devfn number */
+       struct pci_dev *dev; /* it's NULL for PCIe-to-PCI bridge */
+       struct intel_iommu *iommu; /* IOMMU used by this device */
+       struct dmar_domain *domain; /* pointer to domain */
+};
+
+static void flush_unmaps_timeout(unsigned long data);
+
+DEFINE_TIMER(unmap_timer,  flush_unmaps_timeout, 0, 0);
+
+#define HIGH_WATER_MARK 250
+struct deferred_flush_tables {
+       int next;
+       struct iova *iova[HIGH_WATER_MARK];
+       struct dmar_domain *domain[HIGH_WATER_MARK];
+};
+
+static struct deferred_flush_tables *deferred_flush;
+
+/* bitmap for indexing intel_iommus */
+static int g_num_of_iommus;
+
+static DEFINE_SPINLOCK(async_umap_flush_lock);
+static LIST_HEAD(unmaps_to_do);
+
+static int timer_on;
+static long list_size;
+
+static void domain_remove_dev_info(struct dmar_domain *domain);
+
+#ifdef CONFIG_DMAR_DEFAULT_ON
+int dmar_disabled = 0;
+#else
+int dmar_disabled = 1;
+#endif /*CONFIG_DMAR_DEFAULT_ON*/
+
+static int dmar_map_gfx = 1;
+static int dmar_forcedac;
+static int intel_iommu_strict;
+static int intel_iommu_superpage = 1;
+
+#define DUMMY_DEVICE_DOMAIN_INFO ((struct device_domain_info *)(-1))
+static DEFINE_SPINLOCK(device_domain_lock);
+static LIST_HEAD(device_domain_list);
+
+static struct iommu_ops intel_iommu_ops;
+
+static int __init intel_iommu_setup(char *str)
+{
+       if (!str)
+               return -EINVAL;
+       while (*str) {
+               if (!strncmp(str, "on", 2)) {
+                       dmar_disabled = 0;
+                       printk(KERN_INFO "Intel-IOMMU: enabled\n");
+               } else if (!strncmp(str, "off", 3)) {
+                       dmar_disabled = 1;
+                       printk(KERN_INFO "Intel-IOMMU: disabled\n");
+               } else if (!strncmp(str, "igfx_off", 8)) {
+                       dmar_map_gfx = 0;
+                       printk(KERN_INFO
+                               "Intel-IOMMU: disable GFX device mapping\n");
+               } else if (!strncmp(str, "forcedac", 8)) {
+                       printk(KERN_INFO
+                               "Intel-IOMMU: Forcing DAC for PCI devices\n");
+                       dmar_forcedac = 1;
+               } else if (!strncmp(str, "strict", 6)) {
+                       printk(KERN_INFO
+                               "Intel-IOMMU: disable batched IOTLB flush\n");
+                       intel_iommu_strict = 1;
+               } else if (!strncmp(str, "sp_off", 6)) {
+                       printk(KERN_INFO
+                               "Intel-IOMMU: disable supported super page\n");
+                       intel_iommu_superpage = 0;
+               }
+
+               str += strcspn(str, ",");
+               while (*str == ',')
+                       str++;
+       }
+       return 0;
+}
+__setup("intel_iommu=", intel_iommu_setup);
+
+static struct kmem_cache *iommu_domain_cache;
+static struct kmem_cache *iommu_devinfo_cache;
+static struct kmem_cache *iommu_iova_cache;
+
+static inline void *alloc_pgtable_page(int node)
+{
+       struct page *page;
+       void *vaddr = NULL;
+
+       page = alloc_pages_node(node, GFP_ATOMIC | __GFP_ZERO, 0);
+       if (page)
+               vaddr = page_address(page);
+       return vaddr;
+}
+
+static inline void free_pgtable_page(void *vaddr)
+{
+       free_page((unsigned long)vaddr);
+}
+
+static inline void *alloc_domain_mem(void)
+{
+       return kmem_cache_alloc(iommu_domain_cache, GFP_ATOMIC);
+}
+
+static void free_domain_mem(void *vaddr)
+{
+       kmem_cache_free(iommu_domain_cache, vaddr);
+}
+
+static inline void * alloc_devinfo_mem(void)
+{
+       return kmem_cache_alloc(iommu_devinfo_cache, GFP_ATOMIC);
+}
+
+static inline void free_devinfo_mem(void *vaddr)
+{
+       kmem_cache_free(iommu_devinfo_cache, vaddr);
+}
+
+struct iova *alloc_iova_mem(void)
+{
+       return kmem_cache_alloc(iommu_iova_cache, GFP_ATOMIC);
+}
+
+void free_iova_mem(struct iova *iova)
+{
+       kmem_cache_free(iommu_iova_cache, iova);
+}
+
+
+static int __iommu_calculate_agaw(struct intel_iommu *iommu, int max_gaw)
+{
+       unsigned long sagaw;
+       int agaw = -1;
+
+       sagaw = cap_sagaw(iommu->cap);
+       for (agaw = width_to_agaw(max_gaw);
+            agaw >= 0; agaw--) {
+               if (test_bit(agaw, &sagaw))
+                       break;
+       }
+
+       return agaw;
+}
+
+/*
+ * Calculate max SAGAW for each iommu.
+ */
+int iommu_calculate_max_sagaw(struct intel_iommu *iommu)
+{
+       return __iommu_calculate_agaw(iommu, MAX_AGAW_WIDTH);
+}
+
+/*
+ * calculate agaw for each iommu.
+ * "SAGAW" may be different across iommus, use a default agaw, and
+ * get a supported less agaw for iommus that don't support the default agaw.
+ */
+int iommu_calculate_agaw(struct intel_iommu *iommu)
+{
+       return __iommu_calculate_agaw(iommu, DEFAULT_DOMAIN_ADDRESS_WIDTH);
+}
+
+/* This functionin only returns single iommu in a domain */
+static struct intel_iommu *domain_get_iommu(struct dmar_domain *domain)
+{
+       int iommu_id;
+
+       /* si_domain and vm domain should not get here. */
+       BUG_ON(domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE);
+       BUG_ON(domain->flags & DOMAIN_FLAG_STATIC_IDENTITY);
+
+       iommu_id = find_first_bit(&domain->iommu_bmp, g_num_of_iommus);
+       if (iommu_id < 0 || iommu_id >= g_num_of_iommus)
+               return NULL;
+
+       return g_iommus[iommu_id];
+}
+
+static void domain_update_iommu_coherency(struct dmar_domain *domain)
+{
+       int i;
+
+       domain->iommu_coherency = 1;
+
+       for_each_set_bit(i, &domain->iommu_bmp, g_num_of_iommus) {
+               if (!ecap_coherent(g_iommus[i]->ecap)) {
+                       domain->iommu_coherency = 0;
+                       break;
+               }
+       }
+}
+
+static void domain_update_iommu_snooping(struct dmar_domain *domain)
+{
+       int i;
+
+       domain->iommu_snooping = 1;
+
+       for_each_set_bit(i, &domain->iommu_bmp, g_num_of_iommus) {
+               if (!ecap_sc_support(g_iommus[i]->ecap)) {
+                       domain->iommu_snooping = 0;
+                       break;
+               }
+       }
+}
+
+static void domain_update_iommu_superpage(struct dmar_domain *domain)
+{
+       int i, mask = 0xf;
+
+       if (!intel_iommu_superpage) {
+               domain->iommu_superpage = 0;
+               return;
+       }
+
+       domain->iommu_superpage = 4; /* 1TiB */
+
+       for_each_set_bit(i, &domain->iommu_bmp, g_num_of_iommus) {
+               mask |= cap_super_page_val(g_iommus[i]->cap);
+               if (!mask) {
+                       break;
+               }
+       }
+       domain->iommu_superpage = fls(mask);
+}
+
+/* Some capabilities may be different across iommus */
+static void domain_update_iommu_cap(struct dmar_domain *domain)
+{
+       domain_update_iommu_coherency(domain);
+       domain_update_iommu_snooping(domain);
+       domain_update_iommu_superpage(domain);
+}
+
+static struct intel_iommu *device_to_iommu(int segment, u8 bus, u8 devfn)
+{
+       struct dmar_drhd_unit *drhd = NULL;
+       int i;
+
+       for_each_drhd_unit(drhd) {
+               if (drhd->ignored)
+                       continue;
+               if (segment != drhd->segment)
+                       continue;
+
+               for (i = 0; i < drhd->devices_cnt; i++) {
+                       if (drhd->devices[i] &&
+                           drhd->devices[i]->bus->number == bus &&
+                           drhd->devices[i]->devfn == devfn)
+                               return drhd->iommu;
+                       if (drhd->devices[i] &&
+                           drhd->devices[i]->subordinate &&
+                           drhd->devices[i]->subordinate->number <= bus &&
+                           drhd->devices[i]->subordinate->subordinate >= bus)
+                               return drhd->iommu;
+               }
+
+               if (drhd->include_all)
+                       return drhd->iommu;
+       }
+
+       return NULL;
+}
+
+static void domain_flush_cache(struct dmar_domain *domain,
+                              void *addr, int size)
+{
+       if (!domain->iommu_coherency)
+               clflush_cache_range(addr, size);
+}
+
+/* Gets context entry for a given bus and devfn */
+static struct context_entry * device_to_context_entry(struct intel_iommu *iommu,
+               u8 bus, u8 devfn)
+{
+       struct root_entry *root;
+       struct context_entry *context;
+       unsigned long phy_addr;
+       unsigned long flags;
+
+       spin_lock_irqsave(&iommu->lock, flags);
+       root = &iommu->root_entry[bus];
+       context = get_context_addr_from_root(root);
+       if (!context) {
+               context = (struct context_entry *)
+                               alloc_pgtable_page(iommu->node);
+               if (!context) {
+                       spin_unlock_irqrestore(&iommu->lock, flags);
+                       return NULL;
+               }
+               __iommu_flush_cache(iommu, (void *)context, CONTEXT_SIZE);
+               phy_addr = virt_to_phys((void *)context);
+               set_root_value(root, phy_addr);
+               set_root_present(root);
+               __iommu_flush_cache(iommu, root, sizeof(*root));
+       }
+       spin_unlock_irqrestore(&iommu->lock, flags);
+       return &context[devfn];
+}
+
+static int device_context_mapped(struct intel_iommu *iommu, u8 bus, u8 devfn)
+{
+       struct root_entry *root;
+       struct context_entry *context;
+       int ret;
+       unsigned long flags;
+
+       spin_lock_irqsave(&iommu->lock, flags);
+       root = &iommu->root_entry[bus];
+       context = get_context_addr_from_root(root);
+       if (!context) {
+               ret = 0;
+               goto out;
+       }
+       ret = context_present(&context[devfn]);
+out:
+       spin_unlock_irqrestore(&iommu->lock, flags);
+       return ret;
+}
+
+static void clear_context_table(struct intel_iommu *iommu, u8 bus, u8 devfn)
+{
+       struct root_entry *root;
+       struct context_entry *context;
+       unsigned long flags;
+
+       spin_lock_irqsave(&iommu->lock, flags);
+       root = &iommu->root_entry[bus];
+       context = get_context_addr_from_root(root);
+       if (context) {
+               context_clear_entry(&context[devfn]);
+               __iommu_flush_cache(iommu, &context[devfn], \
+                       sizeof(*context));
+       }
+       spin_unlock_irqrestore(&iommu->lock, flags);
+}
+
+static void free_context_table(struct intel_iommu *iommu)
+{
+       struct root_entry *root;
+       int i;
+       unsigned long flags;
+       struct context_entry *context;
+
+       spin_lock_irqsave(&iommu->lock, flags);
+       if (!iommu->root_entry) {
+               goto out;
+       }
+       for (i = 0; i < ROOT_ENTRY_NR; i++) {
+               root = &iommu->root_entry[i];
+               context = get_context_addr_from_root(root);
+               if (context)
+                       free_pgtable_page(context);
+       }
+       free_pgtable_page(iommu->root_entry);
+       iommu->root_entry = NULL;
+out:
+       spin_unlock_irqrestore(&iommu->lock, flags);
+}
+
+static struct dma_pte *pfn_to_dma_pte(struct dmar_domain *domain,
+                                     unsigned long pfn, int large_level)
+{
+       int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT;
+       struct dma_pte *parent, *pte = NULL;
+       int level = agaw_to_level(domain->agaw);
+       int offset, target_level;
+
+       BUG_ON(!domain->pgd);
+       BUG_ON(addr_width < BITS_PER_LONG && pfn >> addr_width);
+       parent = domain->pgd;
+
+       /* Search pte */
+       if (!large_level)
+               target_level = 1;
+       else
+               target_level = large_level;
+
+       while (level > 0) {
+               void *tmp_page;
+
+               offset = pfn_level_offset(pfn, level);
+               pte = &parent[offset];
+               if (!large_level && (pte->val & DMA_PTE_LARGE_PAGE))
+                       break;
+               if (level == target_level)
+                       break;
+
+               if (!dma_pte_present(pte)) {
+                       uint64_t pteval;
+
+                       tmp_page = alloc_pgtable_page(domain->nid);
+
+                       if (!tmp_page)
+                               return NULL;
+
+                       domain_flush_cache(domain, tmp_page, VTD_PAGE_SIZE);
+                       pteval = ((uint64_t)virt_to_dma_pfn(tmp_page) << VTD_PAGE_SHIFT) | DMA_PTE_READ | DMA_PTE_WRITE;
+                       if (cmpxchg64(&pte->val, 0ULL, pteval)) {
+                               /* Someone else set it while we were thinking; use theirs. */
+                               free_pgtable_page(tmp_page);
+                       } else {
+                               dma_pte_addr(pte);
+                               domain_flush_cache(domain, pte, sizeof(*pte));
+                       }
+               }
+               parent = phys_to_virt(dma_pte_addr(pte));
+               level--;
+       }
+
+       return pte;
+}
+
+
+/* return address's pte at specific level */
+static struct dma_pte *dma_pfn_level_pte(struct dmar_domain *domain,
+                                        unsigned long pfn,
+                                        int level, int *large_page)
+{
+       struct dma_pte *parent, *pte = NULL;
+       int total = agaw_to_level(domain->agaw);
+       int offset;
+
+       parent = domain->pgd;
+       while (level <= total) {
+               offset = pfn_level_offset(pfn, total);
+               pte = &parent[offset];
+               if (level == total)
+                       return pte;
+
+               if (!dma_pte_present(pte)) {
+                       *large_page = total;
+                       break;
+               }
+
+               if (pte->val & DMA_PTE_LARGE_PAGE) {
+                       *large_page = total;
+                       return pte;
+               }
+
+               parent = phys_to_virt(dma_pte_addr(pte));
+               total--;
+       }
+       return NULL;
+}
+
+/* clear last level pte, a tlb flush should be followed */
+static void dma_pte_clear_range(struct dmar_domain *domain,
+                               unsigned long start_pfn,
+                               unsigned long last_pfn)
+{
+       int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT;
+       unsigned int large_page = 1;
+       struct dma_pte *first_pte, *pte;
+
+       BUG_ON(addr_width < BITS_PER_LONG && start_pfn >> addr_width);
+       BUG_ON(addr_width < BITS_PER_LONG && last_pfn >> addr_width);
+       BUG_ON(start_pfn > last_pfn);
+
+       /* we don't need lock here; nobody else touches the iova range */
+       do {
+               large_page = 1;
+               first_pte = pte = dma_pfn_level_pte(domain, start_pfn, 1, &large_page);
+               if (!pte) {
+                       start_pfn = align_to_level(start_pfn + 1, large_page + 1);
+                       continue;
+               }
+               do {
+                       dma_clear_pte(pte);
+                       start_pfn += lvl_to_nr_pages(large_page);
+                       pte++;
+               } while (start_pfn <= last_pfn && !first_pte_in_page(pte));
+
+               domain_flush_cache(domain, first_pte,
+                                  (void *)pte - (void *)first_pte);
+
+       } while (start_pfn && start_pfn <= last_pfn);
+}
+
+/* free page table pages. last level pte should already be cleared */
+static void dma_pte_free_pagetable(struct dmar_domain *domain,
+                                  unsigned long start_pfn,
+                                  unsigned long last_pfn)
+{
+       int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT;
+       struct dma_pte *first_pte, *pte;
+       int total = agaw_to_level(domain->agaw);
+       int level;
+       unsigned long tmp;
+       int large_page = 2;
+
+       BUG_ON(addr_width < BITS_PER_LONG && start_pfn >> addr_width);
+       BUG_ON(addr_width < BITS_PER_LONG && last_pfn >> addr_width);
+       BUG_ON(start_pfn > last_pfn);
+
+       /* We don't need lock here; nobody else touches the iova range */
+       level = 2;
+       while (level <= total) {
+               tmp = align_to_level(start_pfn, level);
+
+               /* If we can't even clear one PTE at this level, we're done */
+               if (tmp + level_size(level) - 1 > last_pfn)
+                       return;
+
+               do {
+                       large_page = level;
+                       first_pte = pte = dma_pfn_level_pte(domain, tmp, level, &large_page);
+                       if (large_page > level)
+                               level = large_page + 1;
+                       if (!pte) {
+                               tmp = align_to_level(tmp + 1, level + 1);
+                               continue;
+                       }
+                       do {
+                               if (dma_pte_present(pte)) {
+                                       free_pgtable_page(phys_to_virt(dma_pte_addr(pte)));
+                                       dma_clear_pte(pte);
+                               }
+                               pte++;
+                               tmp += level_size(level);
+                       } while (!first_pte_in_page(pte) &&
+                                tmp + level_size(level) - 1 <= last_pfn);
+
+                       domain_flush_cache(domain, first_pte,
+                                          (void *)pte - (void *)first_pte);
+                       
+               } while (tmp && tmp + level_size(level) - 1 <= last_pfn);
+               level++;
+       }
+       /* free pgd */
+       if (start_pfn == 0 && last_pfn == DOMAIN_MAX_PFN(domain->gaw)) {
+               free_pgtable_page(domain->pgd);
+               domain->pgd = NULL;
+       }
+}
+
+/* iommu handling */
+static int iommu_alloc_root_entry(struct intel_iommu *iommu)
+{
+       struct root_entry *root;
+       unsigned long flags;
+
+       root = (struct root_entry *)alloc_pgtable_page(iommu->node);
+       if (!root)
+               return -ENOMEM;
+
+       __iommu_flush_cache(iommu, root, ROOT_SIZE);
+
+       spin_lock_irqsave(&iommu->lock, flags);
+       iommu->root_entry = root;
+       spin_unlock_irqrestore(&iommu->lock, flags);
+
+       return 0;
+}
+
+static void iommu_set_root_entry(struct intel_iommu *iommu)
+{
+       void *addr;
+       u32 sts;
+       unsigned long flag;
+
+       addr = iommu->root_entry;
+
+       spin_lock_irqsave(&iommu->register_lock, flag);
+       dmar_writeq(iommu->reg + DMAR_RTADDR_REG, virt_to_phys(addr));
+
+       writel(iommu->gcmd | DMA_GCMD_SRTP, iommu->reg + DMAR_GCMD_REG);
+
+       /* Make sure hardware complete it */
+       IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
+                     readl, (sts & DMA_GSTS_RTPS), sts);
+
+       spin_unlock_irqrestore(&iommu->register_lock, flag);
+}
+
+static void iommu_flush_write_buffer(struct intel_iommu *iommu)
+{
+       u32 val;
+       unsigned long flag;
+
+       if (!rwbf_quirk && !cap_rwbf(iommu->cap))
+               return;
+
+       spin_lock_irqsave(&iommu->register_lock, flag);
+       writel(iommu->gcmd | DMA_GCMD_WBF, iommu->reg + DMAR_GCMD_REG);
+
+       /* Make sure hardware complete it */
+       IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
+                     readl, (!(val & DMA_GSTS_WBFS)), val);
+
+       spin_unlock_irqrestore(&iommu->register_lock, flag);
+}
+
+/* return value determine if we need a write buffer flush */
+static void __iommu_flush_context(struct intel_iommu *iommu,
+                                 u16 did, u16 source_id, u8 function_mask,
+                                 u64 type)
+{
+       u64 val = 0;
+       unsigned long flag;
+
+       switch (type) {
+       case DMA_CCMD_GLOBAL_INVL:
+               val = DMA_CCMD_GLOBAL_INVL;
+               break;
+       case DMA_CCMD_DOMAIN_INVL:
+               val = DMA_CCMD_DOMAIN_INVL|DMA_CCMD_DID(did);
+               break;
+       case DMA_CCMD_DEVICE_INVL:
+               val = DMA_CCMD_DEVICE_INVL|DMA_CCMD_DID(did)
+                       | DMA_CCMD_SID(source_id) | DMA_CCMD_FM(function_mask);
+               break;
+       default:
+               BUG();
+       }
+       val |= DMA_CCMD_ICC;
+
+       spin_lock_irqsave(&iommu->register_lock, flag);
+       dmar_writeq(iommu->reg + DMAR_CCMD_REG, val);
+
+       /* Make sure hardware complete it */
+       IOMMU_WAIT_OP(iommu, DMAR_CCMD_REG,
+               dmar_readq, (!(val & DMA_CCMD_ICC)), val);
+
+       spin_unlock_irqrestore(&iommu->register_lock, flag);
+}
+
+/* return value determine if we need a write buffer flush */
+static void __iommu_flush_iotlb(struct intel_iommu *iommu, u16 did,
+                               u64 addr, unsigned int size_order, u64 type)
+{
+       int tlb_offset = ecap_iotlb_offset(iommu->ecap);
+       u64 val = 0, val_iva = 0;
+       unsigned long flag;
+
+       switch (type) {
+       case DMA_TLB_GLOBAL_FLUSH:
+               /* global flush doesn't need set IVA_REG */
+               val = DMA_TLB_GLOBAL_FLUSH|DMA_TLB_IVT;
+               break;
+       case DMA_TLB_DSI_FLUSH:
+               val = DMA_TLB_DSI_FLUSH|DMA_TLB_IVT|DMA_TLB_DID(did);
+               break;
+       case DMA_TLB_PSI_FLUSH:
+               val = DMA_TLB_PSI_FLUSH|DMA_TLB_IVT|DMA_TLB_DID(did);
+               /* Note: always flush non-leaf currently */
+               val_iva = size_order | addr;
+               break;
+       default:
+               BUG();
+       }
+       /* Note: set drain read/write */
+#if 0
+       /*
+        * This is probably to be super secure.. Looks like we can
+        * ignore it without any impact.
+        */
+       if (cap_read_drain(iommu->cap))
+               val |= DMA_TLB_READ_DRAIN;
+#endif
+       if (cap_write_drain(iommu->cap))
+               val |= DMA_TLB_WRITE_DRAIN;
+
+       spin_lock_irqsave(&iommu->register_lock, flag);
+       /* Note: Only uses first TLB reg currently */
+       if (val_iva)
+               dmar_writeq(iommu->reg + tlb_offset, val_iva);
+       dmar_writeq(iommu->reg + tlb_offset + 8, val);
+
+       /* Make sure hardware complete it */
+       IOMMU_WAIT_OP(iommu, tlb_offset + 8,
+               dmar_readq, (!(val & DMA_TLB_IVT)), val);
+
+       spin_unlock_irqrestore(&iommu->register_lock, flag);
+
+       /* check IOTLB invalidation granularity */
+       if (DMA_TLB_IAIG(val) == 0)
+               printk(KERN_ERR"IOMMU: flush IOTLB failed\n");
+       if (DMA_TLB_IAIG(val) != DMA_TLB_IIRG(type))
+               pr_debug("IOMMU: tlb flush request %Lx, actual %Lx\n",
+                       (unsigned long long)DMA_TLB_IIRG(type),
+                       (unsigned long long)DMA_TLB_IAIG(val));
+}
+
+static struct device_domain_info *iommu_support_dev_iotlb(
+       struct dmar_domain *domain, int segment, u8 bus, u8 devfn)
+{
+       int found = 0;
+       unsigned long flags;
+       struct device_domain_info *info;
+       struct intel_iommu *iommu = device_to_iommu(segment, bus, devfn);
+
+       if (!ecap_dev_iotlb_support(iommu->ecap))
+               return NULL;
+
+       if (!iommu->qi)
+               return NULL;
+
+       spin_lock_irqsave(&device_domain_lock, flags);
+       list_for_each_entry(info, &domain->devices, link)
+               if (info->bus == bus && info->devfn == devfn) {
+                       found = 1;
+                       break;
+               }
+       spin_unlock_irqrestore(&device_domain_lock, flags);
+
+       if (!found || !info->dev)
+               return NULL;
+
+       if (!pci_find_ext_capability(info->dev, PCI_EXT_CAP_ID_ATS))
+               return NULL;
+
+       if (!dmar_find_matched_atsr_unit(info->dev))
+               return NULL;
+
+       info->iommu = iommu;
+
+       return info;
+}
+
+static void iommu_enable_dev_iotlb(struct device_domain_info *info)
+{
+       if (!info)
+               return;
+
+       pci_enable_ats(info->dev, VTD_PAGE_SHIFT);
+}
+
+static void iommu_disable_dev_iotlb(struct device_domain_info *info)
+{
+       if (!info->dev || !pci_ats_enabled(info->dev))
+               return;
+
+       pci_disable_ats(info->dev);
+}
+
+static void iommu_flush_dev_iotlb(struct dmar_domain *domain,
+                                 u64 addr, unsigned mask)
+{
+       u16 sid, qdep;
+       unsigned long flags;
+       struct device_domain_info *info;
+
+       spin_lock_irqsave(&device_domain_lock, flags);
+       list_for_each_entry(info, &domain->devices, link) {
+               if (!info->dev || !pci_ats_enabled(info->dev))
+                       continue;
+
+               sid = info->bus << 8 | info->devfn;
+               qdep = pci_ats_queue_depth(info->dev);
+               qi_flush_dev_iotlb(info->iommu, sid, qdep, addr, mask);
+       }
+       spin_unlock_irqrestore(&device_domain_lock, flags);
+}
+
+static void iommu_flush_iotlb_psi(struct intel_iommu *iommu, u16 did,
+                                 unsigned long pfn, unsigned int pages, int map)
+{
+       unsigned int mask = ilog2(__roundup_pow_of_two(pages));
+       uint64_t addr = (uint64_t)pfn << VTD_PAGE_SHIFT;
+
+       BUG_ON(pages == 0);
+
+       /*
+        * Fallback to domain selective flush if no PSI support or the size is
+        * too big.
+        * PSI requires page size to be 2 ^ x, and the base address is naturally
+        * aligned to the size
+        */
+       if (!cap_pgsel_inv(iommu->cap) || mask > cap_max_amask_val(iommu->cap))
+               iommu->flush.flush_iotlb(iommu, did, 0, 0,
+                                               DMA_TLB_DSI_FLUSH);
+       else
+               iommu->flush.flush_iotlb(iommu, did, addr, mask,
+                                               DMA_TLB_PSI_FLUSH);
+
+       /*
+        * In caching mode, changes of pages from non-present to present require
+        * flush. However, device IOTLB doesn't need to be flushed in this case.
+        */
+       if (!cap_caching_mode(iommu->cap) || !map)
+               iommu_flush_dev_iotlb(iommu->domains[did], addr, mask);
+}
+
+static void iommu_disable_protect_mem_regions(struct intel_iommu *iommu)
+{
+       u32 pmen;
+       unsigned long flags;
+
+       spin_lock_irqsave(&iommu->register_lock, flags);
+       pmen = readl(iommu->reg + DMAR_PMEN_REG);
+       pmen &= ~DMA_PMEN_EPM;
+       writel(pmen, iommu->reg + DMAR_PMEN_REG);
+
+       /* wait for the protected region status bit to clear */
+       IOMMU_WAIT_OP(iommu, DMAR_PMEN_REG,
+               readl, !(pmen & DMA_PMEN_PRS), pmen);
+
+       spin_unlock_irqrestore(&iommu->register_lock, flags);
+}
+
+static int iommu_enable_translation(struct intel_iommu *iommu)
+{
+       u32 sts;
+       unsigned long flags;
+
+       spin_lock_irqsave(&iommu->register_lock, flags);
+       iommu->gcmd |= DMA_GCMD_TE;
+       writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
+
+       /* Make sure hardware complete it */
+       IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
+                     readl, (sts & DMA_GSTS_TES), sts);
+
+       spin_unlock_irqrestore(&iommu->register_lock, flags);
+       return 0;
+}
+
+static int iommu_disable_translation(struct intel_iommu *iommu)
+{
+       u32 sts;
+       unsigned long flag;
+
+       spin_lock_irqsave(&iommu->register_lock, flag);
+       iommu->gcmd &= ~DMA_GCMD_TE;
+       writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
+
+       /* Make sure hardware complete it */
+       IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
+                     readl, (!(sts & DMA_GSTS_TES)), sts);
+
+       spin_unlock_irqrestore(&iommu->register_lock, flag);
+       return 0;
+}
+
+
+static int iommu_init_domains(struct intel_iommu *iommu)
+{
+       unsigned long ndomains;
+       unsigned long nlongs;
+
+       ndomains = cap_ndoms(iommu->cap);
+       pr_debug("IOMMU %d: Number of Domains supportd <%ld>\n", iommu->seq_id,
+                       ndomains);
+       nlongs = BITS_TO_LONGS(ndomains);
+
+       spin_lock_init(&iommu->lock);
+
+       /* TBD: there might be 64K domains,
+        * consider other allocation for future chip
+        */
+       iommu->domain_ids = kcalloc(nlongs, sizeof(unsigned long), GFP_KERNEL);
+       if (!iommu->domain_ids) {
+               printk(KERN_ERR "Allocating domain id array failed\n");
+               return -ENOMEM;
+       }
+       iommu->domains = kcalloc(ndomains, sizeof(struct dmar_domain *),
+                       GFP_KERNEL);
+       if (!iommu->domains) {
+               printk(KERN_ERR "Allocating domain array failed\n");
+               return -ENOMEM;
+       }
+
+       /*
+        * if Caching mode is set, then invalid translations are tagged
+        * with domainid 0. Hence we need to pre-allocate it.
+        */
+       if (cap_caching_mode(iommu->cap))
+               set_bit(0, iommu->domain_ids);
+       return 0;
+}
+
+
+static void domain_exit(struct dmar_domain *domain);
+static void vm_domain_exit(struct dmar_domain *domain);
+
+void free_dmar_iommu(struct intel_iommu *iommu)
+{
+       struct dmar_domain *domain;
+       int i;
+       unsigned long flags;
+
+       if ((iommu->domains) && (iommu->domain_ids)) {
+               for_each_set_bit(i, iommu->domain_ids, cap_ndoms(iommu->cap)) {
+                       domain = iommu->domains[i];
+                       clear_bit(i, iommu->domain_ids);
+
+                       spin_lock_irqsave(&domain->iommu_lock, flags);
+                       if (--domain->iommu_count == 0) {
+                               if (domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE)
+                                       vm_domain_exit(domain);
+                               else
+                                       domain_exit(domain);
+                       }
+                       spin_unlock_irqrestore(&domain->iommu_lock, flags);
+               }
+       }
+
+       if (iommu->gcmd & DMA_GCMD_TE)
+               iommu_disable_translation(iommu);
+
+       if (iommu->irq) {
+               irq_set_handler_data(iommu->irq, NULL);
+               /* This will mask the irq */
+               free_irq(iommu->irq, iommu);
+               destroy_irq(iommu->irq);
+       }
+
+       kfree(iommu->domains);
+       kfree(iommu->domain_ids);
+
+       g_iommus[iommu->seq_id] = NULL;
+
+       /* if all iommus are freed, free g_iommus */
+       for (i = 0; i < g_num_of_iommus; i++) {
+               if (g_iommus[i])
+                       break;
+       }
+
+       if (i == g_num_of_iommus)
+               kfree(g_iommus);
+
+       /* free context mapping */
+       free_context_table(iommu);
+}
+
+static struct dmar_domain *alloc_domain(void)
+{
+       struct dmar_domain *domain;
+
+       domain = alloc_domain_mem();
+       if (!domain)
+               return NULL;
+
+       domain->nid = -1;
+       memset(&domain->iommu_bmp, 0, sizeof(unsigned long));
+       domain->flags = 0;
+
+       return domain;
+}
+
+static int iommu_attach_domain(struct dmar_domain *domain,
+                              struct intel_iommu *iommu)
+{
+       int num;
+       unsigned long ndomains;
+       unsigned long flags;
+
+       ndomains = cap_ndoms(iommu->cap);
+
+       spin_lock_irqsave(&iommu->lock, flags);
+
+       num = find_first_zero_bit(iommu->domain_ids, ndomains);
+       if (num >= ndomains) {
+               spin_unlock_irqrestore(&iommu->lock, flags);
+               printk(KERN_ERR "IOMMU: no free domain ids\n");
+               return -ENOMEM;
+       }
+
+       domain->id = num;
+       set_bit(num, iommu->domain_ids);
+       set_bit(iommu->seq_id, &domain->iommu_bmp);
+       iommu->domains[num] = domain;
+       spin_unlock_irqrestore(&iommu->lock, flags);
+
+       return 0;
+}
+
+static void iommu_detach_domain(struct dmar_domain *domain,
+                               struct intel_iommu *iommu)
+{
+       unsigned long flags;
+       int num, ndomains;
+       int found = 0;
+
+       spin_lock_irqsave(&iommu->lock, flags);
+       ndomains = cap_ndoms(iommu->cap);
+       for_each_set_bit(num, iommu->domain_ids, ndomains) {
+               if (iommu->domains[num] == domain) {
+                       found = 1;
+                       break;
+               }
+       }
+
+       if (found) {
+               clear_bit(num, iommu->domain_ids);
+               clear_bit(iommu->seq_id, &domain->iommu_bmp);
+               iommu->domains[num] = NULL;
+       }
+       spin_unlock_irqrestore(&iommu->lock, flags);
+}
+
+static struct iova_domain reserved_iova_list;
+static struct lock_class_key reserved_rbtree_key;
+
+static int dmar_init_reserved_ranges(void)
+{
+       struct pci_dev *pdev = NULL;
+       struct iova *iova;
+       int i;
+
+       init_iova_domain(&reserved_iova_list, DMA_32BIT_PFN);
+
+       lockdep_set_class(&reserved_iova_list.iova_rbtree_lock,
+               &reserved_rbtree_key);
+
+       /* IOAPIC ranges shouldn't be accessed by DMA */
+       iova = reserve_iova(&reserved_iova_list, IOVA_PFN(IOAPIC_RANGE_START),
+               IOVA_PFN(IOAPIC_RANGE_END));
+       if (!iova) {
+               printk(KERN_ERR "Reserve IOAPIC range failed\n");
+               return -ENODEV;
+       }
+
+       /* Reserve all PCI MMIO to avoid peer-to-peer access */
+       for_each_pci_dev(pdev) {
+               struct resource *r;
+
+               for (i = 0; i < PCI_NUM_RESOURCES; i++) {
+                       r = &pdev->resource[i];
+                       if (!r->flags || !(r->flags & IORESOURCE_MEM))
+                               continue;
+                       iova = reserve_iova(&reserved_iova_list,
+                                           IOVA_PFN(r->start),
+                                           IOVA_PFN(r->end));
+                       if (!iova) {
+                               printk(KERN_ERR "Reserve iova failed\n");
+                               return -ENODEV;
+                       }
+               }
+       }
+       return 0;
+}
+
+static void domain_reserve_special_ranges(struct dmar_domain *domain)
+{
+       copy_reserved_iova(&reserved_iova_list, &domain->iovad);
+}
+
+static inline int guestwidth_to_adjustwidth(int gaw)
+{
+       int agaw;
+       int r = (gaw - 12) % 9;
+
+       if (r == 0)
+               agaw = gaw;
+       else
+               agaw = gaw + 9 - r;
+       if (agaw > 64)
+               agaw = 64;
+       return agaw;
+}
+
+static int domain_init(struct dmar_domain *domain, int guest_width)
+{
+       struct intel_iommu *iommu;
+       int adjust_width, agaw;
+       unsigned long sagaw;
+
+       init_iova_domain(&domain->iovad, DMA_32BIT_PFN);
+       spin_lock_init(&domain->iommu_lock);
+
+       domain_reserve_special_ranges(domain);
+
+       /* calculate AGAW */
+       iommu = domain_get_iommu(domain);
+       if (guest_width > cap_mgaw(iommu->cap))
+               guest_width = cap_mgaw(iommu->cap);
+       domain->gaw = guest_width;
+       adjust_width = guestwidth_to_adjustwidth(guest_width);
+       agaw = width_to_agaw(adjust_width);
+       sagaw = cap_sagaw(iommu->cap);
+       if (!test_bit(agaw, &sagaw)) {
+               /* hardware doesn't support it, choose a bigger one */
+               pr_debug("IOMMU: hardware doesn't support agaw %d\n", agaw);
+               agaw = find_next_bit(&sagaw, 5, agaw);
+               if (agaw >= 5)
+                       return -ENODEV;
+       }
+       domain->agaw = agaw;
+       INIT_LIST_HEAD(&domain->devices);
+
+       if (ecap_coherent(iommu->ecap))
+               domain->iommu_coherency = 1;
+       else
+               domain->iommu_coherency = 0;
+
+       if (ecap_sc_support(iommu->ecap))
+               domain->iommu_snooping = 1;
+       else
+               domain->iommu_snooping = 0;
+
+       domain->iommu_superpage = fls(cap_super_page_val(iommu->cap));
+       domain->iommu_count = 1;
+       domain->nid = iommu->node;
+
+       /* always allocate the top pgd */
+       domain->pgd = (struct dma_pte *)alloc_pgtable_page(domain->nid);
+       if (!domain->pgd)
+               return -ENOMEM;
+       __iommu_flush_cache(iommu, domain->pgd, PAGE_SIZE);
+       return 0;
+}
+
+static void domain_exit(struct dmar_domain *domain)
+{
+       struct dmar_drhd_unit *drhd;
+       struct intel_iommu *iommu;
+
+       /* Domain 0 is reserved, so dont process it */
+       if (!domain)
+               return;
+
+       /* Flush any lazy unmaps that may reference this domain */
+       if (!intel_iommu_strict)
+               flush_unmaps_timeout(0);
+
+       domain_remove_dev_info(domain);
+       /* destroy iovas */
+       put_iova_domain(&domain->iovad);
+
+       /* clear ptes */
+       dma_pte_clear_range(domain, 0, DOMAIN_MAX_PFN(domain->gaw));
+
+       /* free page tables */
+       dma_pte_free_pagetable(domain, 0, DOMAIN_MAX_PFN(domain->gaw));
+
+       for_each_active_iommu(iommu, drhd)
+               if (test_bit(iommu->seq_id, &domain->iommu_bmp))
+                       iommu_detach_domain(domain, iommu);
+
+       free_domain_mem(domain);
+}
+
+static int domain_context_mapping_one(struct dmar_domain *domain, int segment,
+                                u8 bus, u8 devfn, int translation)
+{
+       struct context_entry *context;
+       unsigned long flags;
+       struct intel_iommu *iommu;
+       struct dma_pte *pgd;
+       unsigned long num;
+       unsigned long ndomains;
+       int id;
+       int agaw;
+       struct device_domain_info *info = NULL;
+
+       pr_debug("Set context mapping for %02x:%02x.%d\n",
+               bus, PCI_SLOT(devfn), PCI_FUNC(devfn));
+
+       BUG_ON(!domain->pgd);
+       BUG_ON(translation != CONTEXT_TT_PASS_THROUGH &&
+              translation != CONTEXT_TT_MULTI_LEVEL);
+
+       iommu = device_to_iommu(segment, bus, devfn);
+       if (!iommu)
+               return -ENODEV;
+
+       context = device_to_context_entry(iommu, bus, devfn);
+       if (!context)
+               return -ENOMEM;
+       spin_lock_irqsave(&iommu->lock, flags);
+       if (context_present(context)) {
+               spin_unlock_irqrestore(&iommu->lock, flags);
+               return 0;
+       }
+
+       id = domain->id;
+       pgd = domain->pgd;
+
+       if (domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE ||
+           domain->flags & DOMAIN_FLAG_STATIC_IDENTITY) {
+               int found = 0;
+
+               /* find an available domain id for this device in iommu */
+               ndomains = cap_ndoms(iommu->cap);
+               for_each_set_bit(num, iommu->domain_ids, ndomains) {
+                       if (iommu->domains[num] == domain) {
+                               id = num;
+                               found = 1;
+                               break;
+                       }
+               }
+
+               if (found == 0) {
+                       num = find_first_zero_bit(iommu->domain_ids, ndomains);
+                       if (num >= ndomains) {
+                               spin_unlock_irqrestore(&iommu->lock, flags);
+                               printk(KERN_ERR "IOMMU: no free domain ids\n");
+                               return -EFAULT;
+                       }
+
+                       set_bit(num, iommu->domain_ids);
+                       iommu->domains[num] = domain;
+                       id = num;
+               }
+
+               /* Skip top levels of page tables for
+                * iommu which has less agaw than default.
+                * Unnecessary for PT mode.
+                */
+               if (translation != CONTEXT_TT_PASS_THROUGH) {
+                       for (agaw = domain->agaw; agaw != iommu->agaw; agaw--) {
+                               pgd = phys_to_virt(dma_pte_addr(pgd));
+                               if (!dma_pte_present(pgd)) {
+                                       spin_unlock_irqrestore(&iommu->lock, flags);
+                                       return -ENOMEM;
+                               }
+                       }
+               }
+       }
+
+       context_set_domain_id(context, id);
+
+       if (translation != CONTEXT_TT_PASS_THROUGH) {
+               info = iommu_support_dev_iotlb(domain, segment, bus, devfn);
+               translation = info ? CONTEXT_TT_DEV_IOTLB :
+                                    CONTEXT_TT_MULTI_LEVEL;
+       }
+       /*
+        * In pass through mode, AW must be programmed to indicate the largest
+        * AGAW value supported by hardware. And ASR is ignored by hardware.
+        */
+       if (unlikely(translation == CONTEXT_TT_PASS_THROUGH))
+               context_set_address_width(context, iommu->msagaw);
+       else {
+               context_set_address_root(context, virt_to_phys(pgd));
+               context_set_address_width(context, iommu->agaw);
+       }
+
+       context_set_translation_type(context, translation);
+       context_set_fault_enable(context);
+       context_set_present(context);
+       domain_flush_cache(domain, context, sizeof(*context));
+
+       /*
+        * It's a non-present to present mapping. If hardware doesn't cache
+        * non-present entry we only need to flush the write-buffer. If the
+        * _does_ cache non-present entries, then it does so in the special
+        * domain #0, which we have to flush:
+        */
+       if (cap_caching_mode(iommu->cap)) {
+               iommu->flush.flush_context(iommu, 0,
+                                          (((u16)bus) << 8) | devfn,
+                                          DMA_CCMD_MASK_NOBIT,
+                                          DMA_CCMD_DEVICE_INVL);
+               iommu->flush.flush_iotlb(iommu, domain->id, 0, 0, DMA_TLB_DSI_FLUSH);
+       } else {
+               iommu_flush_write_buffer(iommu);
+       }
+       iommu_enable_dev_iotlb(info);
+       spin_unlock_irqrestore(&iommu->lock, flags);
+
+       spin_lock_irqsave(&domain->iommu_lock, flags);
+       if (!test_and_set_bit(iommu->seq_id, &domain->iommu_bmp)) {
+               domain->iommu_count++;
+               if (domain->iommu_count == 1)
+                       domain->nid = iommu->node;
+               domain_update_iommu_cap(domain);
+       }
+       spin_unlock_irqrestore(&domain->iommu_lock, flags);
+       return 0;
+}
+
+static int
+domain_context_mapping(struct dmar_domain *domain, struct pci_dev *pdev,
+                       int translation)
+{
+       int ret;
+       struct pci_dev *tmp, *parent;
+
+       ret = domain_context_mapping_one(domain, pci_domain_nr(pdev->bus),
+                                        pdev->bus->number, pdev->devfn,
+                                        translation);
+       if (ret)
+               return ret;
+
+       /* dependent device mapping */
+       tmp = pci_find_upstream_pcie_bridge(pdev);
+       if (!tmp)
+               return 0;
+       /* Secondary interface's bus number and devfn 0 */
+       parent = pdev->bus->self;
+       while (parent != tmp) {
+               ret = domain_context_mapping_one(domain,
+                                                pci_domain_nr(parent->bus),
+                                                parent->bus->number,
+                                                parent->devfn, translation);
+               if (ret)
+                       return ret;
+               parent = parent->bus->self;
+       }
+       if (pci_is_pcie(tmp)) /* this is a PCIe-to-PCI bridge */
+               return domain_context_mapping_one(domain,
+                                       pci_domain_nr(tmp->subordinate),
+                                       tmp->subordinate->number, 0,
+                                       translation);
+       else /* this is a legacy PCI bridge */
+               return domain_context_mapping_one(domain,
+                                                 pci_domain_nr(tmp->bus),
+                                                 tmp->bus->number,
+                                                 tmp->devfn,
+                                                 translation);
+}
+
+static int domain_context_mapped(struct pci_dev *pdev)
+{
+       int ret;
+       struct pci_dev *tmp, *parent;
+       struct intel_iommu *iommu;
+
+       iommu = device_to_iommu(pci_domain_nr(pdev->bus), pdev->bus->number,
+                               pdev->devfn);
+       if (!iommu)
+               return -ENODEV;
+
+       ret = device_context_mapped(iommu, pdev->bus->number, pdev->devfn);
+       if (!ret)
+               return ret;
+       /* dependent device mapping */
+       tmp = pci_find_upstream_pcie_bridge(pdev);
+       if (!tmp)
+               return ret;
+       /* Secondary interface's bus number and devfn 0 */
+       parent = pdev->bus->self;
+       while (parent != tmp) {
+               ret = device_context_mapped(iommu, parent->bus->number,
+                                           parent->devfn);
+               if (!ret)
+                       return ret;
+               parent = parent->bus->self;
+       }
+       if (pci_is_pcie(tmp))
+               return device_context_mapped(iommu, tmp->subordinate->number,
+                                            0);
+       else
+               return device_context_mapped(iommu, tmp->bus->number,
+                                            tmp->devfn);
+}
+
+/* Returns a number of VTD pages, but aligned to MM page size */
+static inline unsigned long aligned_nrpages(unsigned long host_addr,
+                                           size_t size)
+{
+       host_addr &= ~PAGE_MASK;
+       return PAGE_ALIGN(host_addr + size) >> VTD_PAGE_SHIFT;
+}
+
+/* Return largest possible superpage level for a given mapping */
+static inline int hardware_largepage_caps(struct dmar_domain *domain,
+                                         unsigned long iov_pfn,
+                                         unsigned long phy_pfn,
+                                         unsigned long pages)
+{
+       int support, level = 1;
+       unsigned long pfnmerge;
+
+       support = domain->iommu_superpage;
+
+       /* To use a large page, the virtual *and* physical addresses
+          must be aligned to 2MiB/1GiB/etc. Lower bits set in either
+          of them will mean we have to use smaller pages. So just
+          merge them and check both at once. */
+       pfnmerge = iov_pfn | phy_pfn;
+
+       while (support && !(pfnmerge & ~VTD_STRIDE_MASK)) {
+               pages >>= VTD_STRIDE_SHIFT;
+               if (!pages)
+                       break;
+               pfnmerge >>= VTD_STRIDE_SHIFT;
+               level++;
+               support--;
+       }
+       return level;
+}
+
+static int __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
+                           struct scatterlist *sg, unsigned long phys_pfn,
+                           unsigned long nr_pages, int prot)
+{
+       struct dma_pte *first_pte = NULL, *pte = NULL;
+       phys_addr_t uninitialized_var(pteval);
+       int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT;
+       unsigned long sg_res;
+       unsigned int largepage_lvl = 0;
+       unsigned long lvl_pages = 0;
+
+       BUG_ON(addr_width < BITS_PER_LONG && (iov_pfn + nr_pages - 1) >> addr_width);
+
+       if ((prot & (DMA_PTE_READ|DMA_PTE_WRITE)) == 0)
+               return -EINVAL;
+
+       prot &= DMA_PTE_READ | DMA_PTE_WRITE | DMA_PTE_SNP;
+
+       if (sg)
+               sg_res = 0;
+       else {
+               sg_res = nr_pages + 1;
+               pteval = ((phys_addr_t)phys_pfn << VTD_PAGE_SHIFT) | prot;
+       }
+
+       while (nr_pages > 0) {
+               uint64_t tmp;
+
+               if (!sg_res) {
+                       sg_res = aligned_nrpages(sg->offset, sg->length);
+                       sg->dma_address = ((dma_addr_t)iov_pfn << VTD_PAGE_SHIFT) + sg->offset;
+                       sg->dma_length = sg->length;
+                       pteval = page_to_phys(sg_page(sg)) | prot;
+                       phys_pfn = pteval >> VTD_PAGE_SHIFT;
+               }
+
+               if (!pte) {
+                       largepage_lvl = hardware_largepage_caps(domain, iov_pfn, phys_pfn, sg_res);
+
+                       first_pte = pte = pfn_to_dma_pte(domain, iov_pfn, largepage_lvl);
+                       if (!pte)
+                               return -ENOMEM;
+                       /* It is large page*/
+                       if (largepage_lvl > 1)
+                               pteval |= DMA_PTE_LARGE_PAGE;
+                       else
+                               pteval &= ~(uint64_t)DMA_PTE_LARGE_PAGE;
+
+               }
+               /* We don't need lock here, nobody else
+                * touches the iova range
+                */
+               tmp = cmpxchg64_local(&pte->val, 0ULL, pteval);
+               if (tmp) {
+                       static int dumps = 5;
+                       printk(KERN_CRIT "ERROR: DMA PTE for vPFN 0x%lx already set (to %llx not %llx)\n",
+                              iov_pfn, tmp, (unsigned long long)pteval);
+                       if (dumps) {
+                               dumps--;
+                               debug_dma_dump_mappings(NULL);
+                       }
+                       WARN_ON(1);
+               }
+
+               lvl_pages = lvl_to_nr_pages(largepage_lvl);
+
+               BUG_ON(nr_pages < lvl_pages);
+               BUG_ON(sg_res < lvl_pages);
+
+               nr_pages -= lvl_pages;
+               iov_pfn += lvl_pages;
+               phys_pfn += lvl_pages;
+               pteval += lvl_pages * VTD_PAGE_SIZE;
+               sg_res -= lvl_pages;
+
+               /* If the next PTE would be the first in a new page, then we
+                  need to flush the cache on the entries we've just written.
+                  And then we'll need to recalculate 'pte', so clear it and
+                  let it get set again in the if (!pte) block above.
+
+                  If we're done (!nr_pages) we need to flush the cache too.
+
+                  Also if we've been setting superpages, we may need to
+                  recalculate 'pte' and switch back to smaller pages for the
+                  end of the mapping, if the trailing size is not enough to
+                  use another superpage (i.e. sg_res < lvl_pages). */
+               pte++;
+               if (!nr_pages || first_pte_in_page(pte) ||
+                   (largepage_lvl > 1 && sg_res < lvl_pages)) {
+                       domain_flush_cache(domain, first_pte,
+                                          (void *)pte - (void *)first_pte);
+                       pte = NULL;
+               }
+
+               if (!sg_res && nr_pages)
+                       sg = sg_next(sg);
+       }
+       return 0;
+}
+
+static inline int domain_sg_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
+                                   struct scatterlist *sg, unsigned long nr_pages,
+                                   int prot)
+{
+       return __domain_mapping(domain, iov_pfn, sg, 0, nr_pages, prot);
+}
+
+static inline int domain_pfn_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
+                                    unsigned long phys_pfn, unsigned long nr_pages,
+                                    int prot)
+{
+       return __domain_mapping(domain, iov_pfn, NULL, phys_pfn, nr_pages, prot);
+}
+
+static void iommu_detach_dev(struct intel_iommu *iommu, u8 bus, u8 devfn)
+{
+       if (!iommu)
+               return;
+
+       clear_context_table(iommu, bus, devfn);
+       iommu->flush.flush_context(iommu, 0, 0, 0,
+                                          DMA_CCMD_GLOBAL_INVL);
+       iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
+}
+
+static void domain_remove_dev_info(struct dmar_domain *domain)
+{
+       struct device_domain_info *info;
+       unsigned long flags;
+       struct intel_iommu *iommu;
+
+       spin_lock_irqsave(&device_domain_lock, flags);
+       while (!list_empty(&domain->devices)) {
+               info = list_entry(domain->devices.next,
+                       struct device_domain_info, link);
+               list_del(&info->link);
+               list_del(&info->global);
+               if (info->dev)
+                       info->dev->dev.archdata.iommu = NULL;
+               spin_unlock_irqrestore(&device_domain_lock, flags);
+
+               iommu_disable_dev_iotlb(info);
+               iommu = device_to_iommu(info->segment, info->bus, info->devfn);
+               iommu_detach_dev(iommu, info->bus, info->devfn);
+               free_devinfo_mem(info);
+
+               spin_lock_irqsave(&device_domain_lock, flags);
+       }
+       spin_unlock_irqrestore(&device_domain_lock, flags);
+}
+
+/*
+ * find_domain
+ * Note: we use struct pci_dev->dev.archdata.iommu stores the info
+ */
+static struct dmar_domain *
+find_domain(struct pci_dev *pdev)
+{
+       struct device_domain_info *info;
+
+       /* No lock here, assumes no domain exit in normal case */
+       info = pdev->dev.archdata.iommu;
+       if (info)
+               return info->domain;
+       return NULL;
+}
+
+/* domain is initialized */
+static struct dmar_domain *get_domain_for_dev(struct pci_dev *pdev, int gaw)
+{
+       struct dmar_domain *domain, *found = NULL;
+       struct intel_iommu *iommu;
+       struct dmar_drhd_unit *drhd;
+       struct device_domain_info *info, *tmp;
+       struct pci_dev *dev_tmp;
+       unsigned long flags;
+       int bus = 0, devfn = 0;
+       int segment;
+       int ret;
+
+       domain = find_domain(pdev);
+       if (domain)
+               return domain;
+
+       segment = pci_domain_nr(pdev->bus);
+
+       dev_tmp = pci_find_upstream_pcie_bridge(pdev);
+       if (dev_tmp) {
+               if (pci_is_pcie(dev_tmp)) {
+                       bus = dev_tmp->subordinate->number;
+                       devfn = 0;
+               } else {
+                       bus = dev_tmp->bus->number;
+                       devfn = dev_tmp->devfn;
+               }
+               spin_lock_irqsave(&device_domain_lock, flags);
+               list_for_each_entry(info, &device_domain_list, global) {
+                       if (info->segment == segment &&
+                           info->bus == bus && info->devfn == devfn) {
+                               found = info->domain;
+                               break;
+                       }
+               }
+               spin_unlock_irqrestore(&device_domain_lock, flags);
+               /* pcie-pci bridge already has a domain, uses it */
+               if (found) {
+                       domain = found;
+                       goto found_domain;
+               }
+       }
+
+       domain = alloc_domain();
+       if (!domain)
+               goto error;
+
+       /* Allocate new domain for the device */
+       drhd = dmar_find_matched_drhd_unit(pdev);
+       if (!drhd) {
+               printk(KERN_ERR "IOMMU: can't find DMAR for device %s\n",
+                       pci_name(pdev));
+               return NULL;
+       }
+       iommu = drhd->iommu;
+
+       ret = iommu_attach_domain(domain, iommu);
+       if (ret) {
+               free_domain_mem(domain);
+               goto error;
+       }
+
+       if (domain_init(domain, gaw)) {
+               domain_exit(domain);
+               goto error;
+       }
+
+       /* register pcie-to-pci device */
+       if (dev_tmp) {
+               info = alloc_devinfo_mem();
+               if (!info) {
+                       domain_exit(domain);
+                       goto error;
+               }
+               info->segment = segment;
+               info->bus = bus;
+               info->devfn = devfn;
+               info->dev = NULL;
+               info->domain = domain;
+               /* This domain is shared by devices under p2p bridge */
+               domain->flags |= DOMAIN_FLAG_P2P_MULTIPLE_DEVICES;
+
+               /* pcie-to-pci bridge already has a domain, uses it */
+               found = NULL;
+               spin_lock_irqsave(&device_domain_lock, flags);
+               list_for_each_entry(tmp, &device_domain_list, global) {
+                       if (tmp->segment == segment &&
+                           tmp->bus == bus && tmp->devfn == devfn) {
+                               found = tmp->domain;
+                               break;
+                       }
+               }
+               if (found) {
+                       spin_unlock_irqrestore(&device_domain_lock, flags);
+                       free_devinfo_mem(info);
+                       domain_exit(domain);
+                       domain = found;
+               } else {
+                       list_add(&info->link, &domain->devices);
+                       list_add(&info->global, &device_domain_list);
+                       spin_unlock_irqrestore(&device_domain_lock, flags);
+               }
+       }
+
+found_domain:
+       info = alloc_devinfo_mem();
+       if (!info)
+               goto error;
+       info->segment = segment;
+       info->bus = pdev->bus->number;
+       info->devfn = pdev->devfn;
+       info->dev = pdev;
+       info->domain = domain;
+       spin_lock_irqsave(&device_domain_lock, flags);
+       /* somebody is fast */
+       found = find_domain(pdev);
+       if (found != NULL) {
+               spin_unlock_irqrestore(&device_domain_lock, flags);
+               if (found != domain) {
+                       domain_exit(domain);
+                       domain = found;
+               }
+               free_devinfo_mem(info);
+               return domain;
+       }
+       list_add(&info->link, &domain->devices);
+       list_add(&info->global, &device_domain_list);
+       pdev->dev.archdata.iommu = info;
+       spin_unlock_irqrestore(&device_domain_lock, flags);
+       return domain;
+error:
+       /* recheck it here, maybe others set it */
+       return find_domain(pdev);
+}
+
+static int iommu_identity_mapping;
+#define IDENTMAP_ALL           1
+#define IDENTMAP_GFX           2
+#define IDENTMAP_AZALIA                4
+
+static int iommu_domain_identity_map(struct dmar_domain *domain,
+                                    unsigned long long start,
+                                    unsigned long long end)
+{
+       unsigned long first_vpfn = start >> VTD_PAGE_SHIFT;
+       unsigned long last_vpfn = end >> VTD_PAGE_SHIFT;
+
+       if (!reserve_iova(&domain->iovad, dma_to_mm_pfn(first_vpfn),
+                         dma_to_mm_pfn(last_vpfn))) {
+               printk(KERN_ERR "IOMMU: reserve iova failed\n");
+               return -ENOMEM;
+       }
+
+       pr_debug("Mapping reserved region %llx-%llx for domain %d\n",
+                start, end, domain->id);
+       /*
+        * RMRR range might have overlap with physical memory range,
+        * clear it first
+        */
+       dma_pte_clear_range(domain, first_vpfn, last_vpfn);
+
+       return domain_pfn_mapping(domain, first_vpfn, first_vpfn,
+                                 last_vpfn - first_vpfn + 1,
+                                 DMA_PTE_READ|DMA_PTE_WRITE);
+}
+
+static int iommu_prepare_identity_map(struct pci_dev *pdev,
+                                     unsigned long long start,
+                                     unsigned long long end)
+{
+       struct dmar_domain *domain;
+       int ret;
+
+       domain = get_domain_for_dev(pdev, DEFAULT_DOMAIN_ADDRESS_WIDTH);
+       if (!domain)
+               return -ENOMEM;
+
+       /* For _hardware_ passthrough, don't bother. But for software
+          passthrough, we do it anyway -- it may indicate a memory
+          range which is reserved in E820, so which didn't get set
+          up to start with in si_domain */
+       if (domain == si_domain && hw_pass_through) {
+               printk("Ignoring identity map for HW passthrough device %s [0x%Lx - 0x%Lx]\n",
+                      pci_name(pdev), start, end);
+               return 0;
+       }
+
+       printk(KERN_INFO
+              "IOMMU: Setting identity map for device %s [0x%Lx - 0x%Lx]\n",
+              pci_name(pdev), start, end);
+       
+       if (end < start) {
+               WARN(1, "Your BIOS is broken; RMRR ends before it starts!\n"
+                       "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
+                       dmi_get_system_info(DMI_BIOS_VENDOR),
+                       dmi_get_system_info(DMI_BIOS_VERSION),
+                    dmi_get_system_info(DMI_PRODUCT_VERSION));
+               ret = -EIO;
+               goto error;
+       }
+
+       if (end >> agaw_to_width(domain->agaw)) {
+               WARN(1, "Your BIOS is broken; RMRR exceeds permitted address width (%d bits)\n"
+                    "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
+                    agaw_to_width(domain->agaw),
+                    dmi_get_system_info(DMI_BIOS_VENDOR),
+                    dmi_get_system_info(DMI_BIOS_VERSION),
+                    dmi_get_system_info(DMI_PRODUCT_VERSION));
+               ret = -EIO;
+               goto error;
+       }
+
+       ret = iommu_domain_identity_map(domain, start, end);
+       if (ret)
+               goto error;
+
+       /* context entry init */
+       ret = domain_context_mapping(domain, pdev, CONTEXT_TT_MULTI_LEVEL);
+       if (ret)
+               goto error;
+
+       return 0;
+
+ error:
+       domain_exit(domain);
+       return ret;
+}
+
+static inline int iommu_prepare_rmrr_dev(struct dmar_rmrr_unit *rmrr,
+       struct pci_dev *pdev)
+{
+       if (pdev->dev.archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO)
+               return 0;
+       return iommu_prepare_identity_map(pdev, rmrr->base_address,
+               rmrr->end_address);
+}
+
+#ifdef CONFIG_DMAR_FLOPPY_WA
+static inline void iommu_prepare_isa(void)
+{
+       struct pci_dev *pdev;
+       int ret;
+
+       pdev = pci_get_class(PCI_CLASS_BRIDGE_ISA << 8, NULL);
+       if (!pdev)
+               return;
+
+       printk(KERN_INFO "IOMMU: Prepare 0-16MiB unity mapping for LPC\n");
+       ret = iommu_prepare_identity_map(pdev, 0, 16*1024*1024 - 1);
+
+       if (ret)
+               printk(KERN_ERR "IOMMU: Failed to create 0-16MiB identity map; "
+                      "floppy might not work\n");
+
+}
+#else
+static inline void iommu_prepare_isa(void)
+{
+       return;
+}
+#endif /* !CONFIG_DMAR_FLPY_WA */
+
+static int md_domain_init(struct dmar_domain *domain, int guest_width);
+
+static int __init si_domain_work_fn(unsigned long start_pfn,
+                                   unsigned long end_pfn, void *datax)
+{
+       int *ret = datax;
+
+       *ret = iommu_domain_identity_map(si_domain,
+                                        (uint64_t)start_pfn << PAGE_SHIFT,
+                                        (uint64_t)end_pfn << PAGE_SHIFT);
+       return *ret;
+
+}
+
+static int __init si_domain_init(int hw)
+{
+       struct dmar_drhd_unit *drhd;
+       struct intel_iommu *iommu;
+       int nid, ret = 0;
+
+       si_domain = alloc_domain();
+       if (!si_domain)
+               return -EFAULT;
+
+       pr_debug("Identity mapping domain is domain %d\n", si_domain->id);
+
+       for_each_active_iommu(iommu, drhd) {
+               ret = iommu_attach_domain(si_domain, iommu);
+               if (ret) {
+                       domain_exit(si_domain);
+                       return -EFAULT;
+               }
+       }
+
+       if (md_domain_init(si_domain, DEFAULT_DOMAIN_ADDRESS_WIDTH)) {
+               domain_exit(si_domain);
+               return -EFAULT;
+       }
+
+       si_domain->flags = DOMAIN_FLAG_STATIC_IDENTITY;
+
+       if (hw)
+               return 0;
+
+       for_each_online_node(nid) {
+               work_with_active_regions(nid, si_domain_work_fn, &ret);
+               if (ret)
+                       return ret;
+       }
+
+       return 0;
+}
+
+static void domain_remove_one_dev_info(struct dmar_domain *domain,
+                                         struct pci_dev *pdev);
+static int identity_mapping(struct pci_dev *pdev)
+{
+       struct device_domain_info *info;
+
+       if (likely(!iommu_identity_mapping))
+               return 0;
+
+       info = pdev->dev.archdata.iommu;
+       if (info && info != DUMMY_DEVICE_DOMAIN_INFO)
+               return (info->domain == si_domain);
+
+       return 0;
+}
+
+static int domain_add_dev_info(struct dmar_domain *domain,
+                              struct pci_dev *pdev,
+                              int translation)
+{
+       struct device_domain_info *info;
+       unsigned long flags;
+       int ret;
+
+       info = alloc_devinfo_mem();
+       if (!info)
+               return -ENOMEM;
+
+       ret = domain_context_mapping(domain, pdev, translation);
+       if (ret) {
+               free_devinfo_mem(info);
+               return ret;
+       }
+
+       info->segment = pci_domain_nr(pdev->bus);
+       info->bus = pdev->bus->number;
+       info->devfn = pdev->devfn;
+       info->dev = pdev;
+       info->domain = domain;
+
+       spin_lock_irqsave(&device_domain_lock, flags);
+       list_add(&info->link, &domain->devices);
+       list_add(&info->global, &device_domain_list);
+       pdev->dev.archdata.iommu = info;
+       spin_unlock_irqrestore(&device_domain_lock, flags);
+
+       return 0;
+}
+
+static int iommu_should_identity_map(struct pci_dev *pdev, int startup)
+{
+       if ((iommu_identity_mapping & IDENTMAP_AZALIA) && IS_AZALIA(pdev))
+               return 1;
+
+       if ((iommu_identity_mapping & IDENTMAP_GFX) && IS_GFX_DEVICE(pdev))
+               return 1;
+
+       if (!(iommu_identity_mapping & IDENTMAP_ALL))
+               return 0;
+
+       /*
+        * We want to start off with all devices in the 1:1 domain, and
+        * take them out later if we find they can't access all of memory.
+        *
+        * However, we can't do this for PCI devices behind bridges,
+        * because all PCI devices behind the same bridge will end up
+        * with the same source-id on their transactions.
+        *
+        * Practically speaking, we can't change things around for these
+        * devices at run-time, because we can't be sure there'll be no
+        * DMA transactions in flight for any of their siblings.
+        * 
+        * So PCI devices (unless they're on the root bus) as well as
+        * their parent PCI-PCI or PCIe-PCI bridges must be left _out_ of
+        * the 1:1 domain, just in _case_ one of their siblings turns out
+        * not to be able to map all of memory.
+        */
+       if (!pci_is_pcie(pdev)) {
+               if (!pci_is_root_bus(pdev->bus))
+                       return 0;
+               if (pdev->class >> 8 == PCI_CLASS_BRIDGE_PCI)
+                       return 0;
+       } else if (pdev->pcie_type == PCI_EXP_TYPE_PCI_BRIDGE)
+               return 0;
+
+       /* 
+        * At boot time, we don't yet know if devices will be 64-bit capable.
+        * Assume that they will -- if they turn out not to be, then we can 
+        * take them out of the 1:1 domain later.
+        */
+       if (!startup) {
+               /*
+                * If the device's dma_mask is less than the system's memory
+                * size then this is not a candidate for identity mapping.
+                */
+               u64 dma_mask = pdev->dma_mask;
+
+               if (pdev->dev.coherent_dma_mask &&
+                   pdev->dev.coherent_dma_mask < dma_mask)
+                       dma_mask = pdev->dev.coherent_dma_mask;
+
+               return dma_mask >= dma_get_required_mask(&pdev->dev);
+       }
+
+       return 1;
+}
+
+static int __init iommu_prepare_static_identity_mapping(int hw)
+{
+       struct pci_dev *pdev = NULL;
+       int ret;
+
+       ret = si_domain_init(hw);
+       if (ret)
+               return -EFAULT;
+
+       for_each_pci_dev(pdev) {
+               /* Skip Host/PCI Bridge devices */
+               if (IS_BRIDGE_HOST_DEVICE(pdev))
+                       continue;
+               if (iommu_should_identity_map(pdev, 1)) {
+                       printk(KERN_INFO "IOMMU: %s identity mapping for device %s\n",
+                              hw ? "hardware" : "software", pci_name(pdev));
+
+                       ret = domain_add_dev_info(si_domain, pdev,
+                                                    hw ? CONTEXT_TT_PASS_THROUGH :
+                                                    CONTEXT_TT_MULTI_LEVEL);
+                       if (ret)
+                               return ret;
+               }
+       }
+
+       return 0;
+}
+
+static int __init init_dmars(void)
+{
+       struct dmar_drhd_unit *drhd;
+       struct dmar_rmrr_unit *rmrr;
+       struct pci_dev *pdev;
+       struct intel_iommu *iommu;
+       int i, ret;
+
+       /*
+        * for each drhd
+        *    allocate root
+        *    initialize and program root entry to not present
+        * endfor
+        */
+       for_each_drhd_unit(drhd) {
+               g_num_of_iommus++;
+               /*
+                * lock not needed as this is only incremented in the single
+                * threaded kernel __init code path all other access are read
+                * only
+                */
+       }
+
+       g_iommus = kcalloc(g_num_of_iommus, sizeof(struct intel_iommu *),
+                       GFP_KERNEL);
+       if (!g_iommus) {
+               printk(KERN_ERR "Allocating global iommu array failed\n");
+               ret = -ENOMEM;
+               goto error;
+       }
+
+       deferred_flush = kzalloc(g_num_of_iommus *
+               sizeof(struct deferred_flush_tables), GFP_KERNEL);
+       if (!deferred_flush) {
+               ret = -ENOMEM;
+               goto error;
+       }
+
+       for_each_drhd_unit(drhd) {
+               if (drhd->ignored)
+                       continue;
+
+               iommu = drhd->iommu;
+               g_iommus[iommu->seq_id] = iommu;
+
+               ret = iommu_init_domains(iommu);
+               if (ret)
+                       goto error;
+
+               /*
+                * TBD:
+                * we could share the same root & context tables
+                * among all IOMMU's. Need to Split it later.
+                */
+               ret = iommu_alloc_root_entry(iommu);
+               if (ret) {
+                       printk(KERN_ERR "IOMMU: allocate root entry failed\n");
+                       goto error;
+               }
+               if (!ecap_pass_through(iommu->ecap))
+                       hw_pass_through = 0;
+       }
+
+       /*
+        * Start from the sane iommu hardware state.
+        */
+       for_each_drhd_unit(drhd) {
+               if (drhd->ignored)
+                       continue;
+
+               iommu = drhd->iommu;
+
+               /*
+                * If the queued invalidation is already initialized by us
+                * (for example, while enabling interrupt-remapping) then
+                * we got the things already rolling from a sane state.
+                */
+               if (iommu->qi)
+                       continue;
+
+               /*
+                * Clear any previous faults.
+                */
+               dmar_fault(-1, iommu);
+               /*
+                * Disable queued invalidation if supported and already enabled
+                * before OS handover.
+                */
+               dmar_disable_qi(iommu);
+       }
+
+       for_each_drhd_unit(drhd) {
+               if (drhd->ignored)
+                       continue;
+
+               iommu = drhd->iommu;
+
+               if (dmar_enable_qi(iommu)) {
+                       /*
+                        * Queued Invalidate not enabled, use Register Based
+                        * Invalidate
+                        */
+                       iommu->flush.flush_context = __iommu_flush_context;
+                       iommu->flush.flush_iotlb = __iommu_flush_iotlb;
+                       printk(KERN_INFO "IOMMU %d 0x%Lx: using Register based "
+                              "invalidation\n",
+                               iommu->seq_id,
+                              (unsigned long long)drhd->reg_base_addr);
+               } else {
+                       iommu->flush.flush_context = qi_flush_context;
+                       iommu->flush.flush_iotlb = qi_flush_iotlb;
+                       printk(KERN_INFO "IOMMU %d 0x%Lx: using Queued "
+                              "invalidation\n",
+                               iommu->seq_id,
+                              (unsigned long long)drhd->reg_base_addr);
+               }
+       }
+
+       if (iommu_pass_through)
+               iommu_identity_mapping |= IDENTMAP_ALL;
+
+#ifdef CONFIG_DMAR_BROKEN_GFX_WA
+       iommu_identity_mapping |= IDENTMAP_GFX;
+#endif
+
+       check_tylersburg_isoch();
+
+       /*
+        * If pass through is not set or not enabled, setup context entries for
+        * identity mappings for rmrr, gfx, and isa and may fall back to static
+        * identity mapping if iommu_identity_mapping is set.
+        */
+       if (iommu_identity_mapping) {
+               ret = iommu_prepare_static_identity_mapping(hw_pass_through);
+               if (ret) {
+                       printk(KERN_CRIT "Failed to setup IOMMU pass-through\n");
+                       goto error;
+               }
+       }
+       /*
+        * For each rmrr
+        *   for each dev attached to rmrr
+        *   do
+        *     locate drhd for dev, alloc domain for dev
+        *     allocate free domain
+        *     allocate page table entries for rmrr
+        *     if context not allocated for bus
+        *           allocate and init context
+        *           set present in root table for this bus
+        *     init context with domain, translation etc
+        *    endfor
+        * endfor
+        */
+       printk(KERN_INFO "IOMMU: Setting RMRR:\n");
+       for_each_rmrr_units(rmrr) {
+               for (i = 0; i < rmrr->devices_cnt; i++) {
+                       pdev = rmrr->devices[i];
+                       /*
+                        * some BIOS lists non-exist devices in DMAR
+                        * table.
+                        */
+                       if (!pdev)
+                               continue;
+                       ret = iommu_prepare_rmrr_dev(rmrr, pdev);
+                       if (ret)
+                               printk(KERN_ERR
+                                      "IOMMU: mapping reserved region failed\n");
+               }
+       }
+
+       iommu_prepare_isa();
+
+       /*
+        * for each drhd
+        *   enable fault log
+        *   global invalidate context cache
+        *   global invalidate iotlb
+        *   enable translation
+        */
+       for_each_drhd_unit(drhd) {
+               if (drhd->ignored) {
+                       /*
+                        * we always have to disable PMRs or DMA may fail on
+                        * this device
+                        */
+                       if (force_on)
+                               iommu_disable_protect_mem_regions(drhd->iommu);
+                       continue;
+               }
+               iommu = drhd->iommu;
+
+               iommu_flush_write_buffer(iommu);
+
+               ret = dmar_set_interrupt(iommu);
+               if (ret)
+                       goto error;
+
+               iommu_set_root_entry(iommu);
+
+               iommu->flush.flush_context(iommu, 0, 0, 0, DMA_CCMD_GLOBAL_INVL);
+               iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
+
+               ret = iommu_enable_translation(iommu);
+               if (ret)
+                       goto error;
+
+               iommu_disable_protect_mem_regions(iommu);
+       }
+
+       return 0;
+error:
+       for_each_drhd_unit(drhd) {
+               if (drhd->ignored)
+                       continue;
+               iommu = drhd->iommu;
+               free_iommu(iommu);
+       }
+       kfree(g_iommus);
+       return ret;
+}
+
+/* This takes a number of _MM_ pages, not VTD pages */
+static struct iova *intel_alloc_iova(struct device *dev,
+                                    struct dmar_domain *domain,
+                                    unsigned long nrpages, uint64_t dma_mask)
+{
+       struct pci_dev *pdev = to_pci_dev(dev);
+       struct iova *iova = NULL;
+
+       /* Restrict dma_mask to the width that the iommu can handle */
+       dma_mask = min_t(uint64_t, DOMAIN_MAX_ADDR(domain->gaw), dma_mask);
+
+       if (!dmar_forcedac && dma_mask > DMA_BIT_MASK(32)) {
+               /*
+                * First try to allocate an io virtual address in
+                * DMA_BIT_MASK(32) and if that fails then try allocating
+                * from higher range
+                */
+               iova = alloc_iova(&domain->iovad, nrpages,
+                                 IOVA_PFN(DMA_BIT_MASK(32)), 1);
+               if (iova)
+                       return iova;
+       }
+       iova = alloc_iova(&domain->iovad, nrpages, IOVA_PFN(dma_mask), 1);
+       if (unlikely(!iova)) {
+               printk(KERN_ERR "Allocating %ld-page iova for %s failed",
+                      nrpages, pci_name(pdev));
+               return NULL;
+       }
+
+       return iova;
+}
+
+static struct dmar_domain *__get_valid_domain_for_dev(struct pci_dev *pdev)
+{
+       struct dmar_domain *domain;
+       int ret;
+
+       domain = get_domain_for_dev(pdev,
+                       DEFAULT_DOMAIN_ADDRESS_WIDTH);
+       if (!domain) {
+               printk(KERN_ERR
+                       "Allocating domain for %s failed", pci_name(pdev));
+               return NULL;
+       }
+
+       /* make sure context mapping is ok */
+       if (unlikely(!domain_context_mapped(pdev))) {
+               ret = domain_context_mapping(domain, pdev,
+                                            CONTEXT_TT_MULTI_LEVEL);
+               if (ret) {
+                       printk(KERN_ERR
+                               "Domain context map for %s failed",
+                               pci_name(pdev));
+                       return NULL;
+               }
+       }
+
+       return domain;
+}
+
+static inline struct dmar_domain *get_valid_domain_for_dev(struct pci_dev *dev)
+{
+       struct device_domain_info *info;
+
+       /* No lock here, assumes no domain exit in normal case */
+       info = dev->dev.archdata.iommu;
+       if (likely(info))
+               return info->domain;
+
+       return __get_valid_domain_for_dev(dev);
+}
+
+static int iommu_dummy(struct pci_dev *pdev)
+{
+       return pdev->dev.archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO;
+}
+
+/* Check if the pdev needs to go through non-identity map and unmap process.*/
+static int iommu_no_mapping(struct device *dev)
+{
+       struct pci_dev *pdev;
+       int found;
+
+       if (unlikely(dev->bus != &pci_bus_type))
+               return 1;
+
+       pdev = to_pci_dev(dev);
+       if (iommu_dummy(pdev))
+               return 1;
+
+       if (!iommu_identity_mapping)
+               return 0;
+
+       found = identity_mapping(pdev);
+       if (found) {
+               if (iommu_should_identity_map(pdev, 0))
+                       return 1;
+               else {
+                       /*
+                        * 32 bit DMA is removed from si_domain and fall back
+                        * to non-identity mapping.
+                        */
+                       domain_remove_one_dev_info(si_domain, pdev);
+                       printk(KERN_INFO "32bit %s uses non-identity mapping\n",
+                              pci_name(pdev));
+                       return 0;
+               }
+       } else {
+               /*
+                * In case of a detached 64 bit DMA device from vm, the device
+                * is put into si_domain for identity mapping.
+                */
+               if (iommu_should_identity_map(pdev, 0)) {
+                       int ret;
+                       ret = domain_add_dev_info(si_domain, pdev,
+                                                 hw_pass_through ?
+                                                 CONTEXT_TT_PASS_THROUGH :
+                                                 CONTEXT_TT_MULTI_LEVEL);
+                       if (!ret) {
+                               printk(KERN_INFO "64bit %s uses identity mapping\n",
+                                      pci_name(pdev));
+                               return 1;
+                       }
+               }
+       }
+
+       return 0;
+}
+
+static dma_addr_t __intel_map_single(struct device *hwdev, phys_addr_t paddr,
+                                    size_t size, int dir, u64 dma_mask)
+{
+       struct pci_dev *pdev = to_pci_dev(hwdev);
+       struct dmar_domain *domain;
+       phys_addr_t start_paddr;
+       struct iova *iova;
+       int prot = 0;
+       int ret;
+       struct intel_iommu *iommu;
+       unsigned long paddr_pfn = paddr >> PAGE_SHIFT;
+
+       BUG_ON(dir == DMA_NONE);
+
+       if (iommu_no_mapping(hwdev))
+               return paddr;
+
+       domain = get_valid_domain_for_dev(pdev);
+       if (!domain)
+               return 0;
+
+       iommu = domain_get_iommu(domain);
+       size = aligned_nrpages(paddr, size);
+
+       iova = intel_alloc_iova(hwdev, domain, dma_to_mm_pfn(size), dma_mask);
+       if (!iova)
+               goto error;
+
+       /*
+        * Check if DMAR supports zero-length reads on write only
+        * mappings..
+        */
+       if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL || \
+                       !cap_zlr(iommu->cap))
+               prot |= DMA_PTE_READ;
+       if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL)
+               prot |= DMA_PTE_WRITE;
+       /*
+        * paddr - (paddr + size) might be partial page, we should map the whole
+        * page.  Note: if two part of one page are separately mapped, we
+        * might have two guest_addr mapping to the same host paddr, but this
+        * is not a big problem
+        */
+       ret = domain_pfn_mapping(domain, mm_to_dma_pfn(iova->pfn_lo),
+                                mm_to_dma_pfn(paddr_pfn), size, prot);
+       if (ret)
+               goto error;
+
+       /* it's a non-present to present mapping. Only flush if caching mode */
+       if (cap_caching_mode(iommu->cap))
+               iommu_flush_iotlb_psi(iommu, domain->id, mm_to_dma_pfn(iova->pfn_lo), size, 1);
+       else
+               iommu_flush_write_buffer(iommu);
+
+       start_paddr = (phys_addr_t)iova->pfn_lo << PAGE_SHIFT;
+       start_paddr += paddr & ~PAGE_MASK;
+       return start_paddr;
+
+error:
+       if (iova)
+               __free_iova(&domain->iovad, iova);
+       printk(KERN_ERR"Device %s request: %zx@%llx dir %d --- failed\n",
+               pci_name(pdev), size, (unsigned long long)paddr, dir);
+       return 0;
+}
+
+static dma_addr_t intel_map_page(struct device *dev, struct page *page,
+                                unsigned long offset, size_t size,
+                                enum dma_data_direction dir,
+                                struct dma_attrs *attrs)
+{
+       return __intel_map_single(dev, page_to_phys(page) + offset, size,
+                                 dir, to_pci_dev(dev)->dma_mask);
+}
+
+static void flush_unmaps(void)
+{
+       int i, j;
+
+       timer_on = 0;
+
+       /* just flush them all */
+       for (i = 0; i < g_num_of_iommus; i++) {
+               struct intel_iommu *iommu = g_iommus[i];
+               if (!iommu)
+                       continue;
+
+               if (!deferred_flush[i].next)
+                       continue;
+
+               /* In caching mode, global flushes turn emulation expensive */
+               if (!cap_caching_mode(iommu->cap))
+                       iommu->flush.flush_iotlb(iommu, 0, 0, 0,
+                                        DMA_TLB_GLOBAL_FLUSH);
+               for (j = 0; j < deferred_flush[i].next; j++) {
+                       unsigned long mask;
+                       struct iova *iova = deferred_flush[i].iova[j];
+                       struct dmar_domain *domain = deferred_flush[i].domain[j];
+
+                       /* On real hardware multiple invalidations are expensive */
+                       if (cap_caching_mode(iommu->cap))
+                               iommu_flush_iotlb_psi(iommu, domain->id,
+                               iova->pfn_lo, iova->pfn_hi - iova->pfn_lo + 1, 0);
+                       else {
+                               mask = ilog2(mm_to_dma_pfn(iova->pfn_hi - iova->pfn_lo + 1));
+                               iommu_flush_dev_iotlb(deferred_flush[i].domain[j],
+                                               (uint64_t)iova->pfn_lo << PAGE_SHIFT, mask);
+                       }
+                       __free_iova(&deferred_flush[i].domain[j]->iovad, iova);
+               }
+               deferred_flush[i].next = 0;
+       }
+
+       list_size = 0;
+}
+
+static void flush_unmaps_timeout(unsigned long data)
+{
+       unsigned long flags;
+
+       spin_lock_irqsave(&async_umap_flush_lock, flags);
+       flush_unmaps();
+       spin_unlock_irqrestore(&async_umap_flush_lock, flags);
+}
+
+static void add_unmap(struct dmar_domain *dom, struct iova *iova)
+{
+       unsigned long flags;
+       int next, iommu_id;
+       struct intel_iommu *iommu;
+
+       spin_lock_irqsave(&async_umap_flush_lock, flags);
+       if (list_size == HIGH_WATER_MARK)
+               flush_unmaps();
+
+       iommu = domain_get_iommu(dom);
+       iommu_id = iommu->seq_id;
+
+       next = deferred_flush[iommu_id].next;
+       deferred_flush[iommu_id].domain[next] = dom;
+       deferred_flush[iommu_id].iova[next] = iova;
+       deferred_flush[iommu_id].next++;
+
+       if (!timer_on) {
+               mod_timer(&unmap_timer, jiffies + msecs_to_jiffies(10));
+               timer_on = 1;
+       }
+       list_size++;
+       spin_unlock_irqrestore(&async_umap_flush_lock, flags);
+}
+
+static void intel_unmap_page(struct device *dev, dma_addr_t dev_addr,
+                            size_t size, enum dma_data_direction dir,
+                            struct dma_attrs *attrs)
+{
+       struct pci_dev *pdev = to_pci_dev(dev);
+       struct dmar_domain *domain;
+       unsigned long start_pfn, last_pfn;
+       struct iova *iova;
+       struct intel_iommu *iommu;
+
+       if (iommu_no_mapping(dev))
+               return;
+
+       domain = find_domain(pdev);
+       BUG_ON(!domain);
+
+       iommu = domain_get_iommu(domain);
+
+       iova = find_iova(&domain->iovad, IOVA_PFN(dev_addr));
+       if (WARN_ONCE(!iova, "Driver unmaps unmatched page at PFN %llx\n",
+                     (unsigned long long)dev_addr))
+               return;
+
+       start_pfn = mm_to_dma_pfn(iova->pfn_lo);
+       last_pfn = mm_to_dma_pfn(iova->pfn_hi + 1) - 1;
+
+       pr_debug("Device %s unmapping: pfn %lx-%lx\n",
+                pci_name(pdev), start_pfn, last_pfn);
+
+       /*  clear the whole page */
+       dma_pte_clear_range(domain, start_pfn, last_pfn);
+
+       /* free page tables */
+       dma_pte_free_pagetable(domain, start_pfn, last_pfn);
+
+       if (intel_iommu_strict) {
+               iommu_flush_iotlb_psi(iommu, domain->id, start_pfn,
+                                     last_pfn - start_pfn + 1, 0);
+               /* free iova */
+               __free_iova(&domain->iovad, iova);
+       } else {
+               add_unmap(domain, iova);
+               /*
+                * queue up the release of the unmap to save the 1/6th of the
+                * cpu used up by the iotlb flush operation...
+                */
+       }
+}
+
+static void *intel_alloc_coherent(struct device *hwdev, size_t size,
+                                 dma_addr_t *dma_handle, gfp_t flags)
+{
+       void *vaddr;
+       int order;
+
+       size = PAGE_ALIGN(size);
+       order = get_order(size);
+
+       if (!iommu_no_mapping(hwdev))
+               flags &= ~(GFP_DMA | GFP_DMA32);
+       else if (hwdev->coherent_dma_mask < dma_get_required_mask(hwdev)) {
+               if (hwdev->coherent_dma_mask < DMA_BIT_MASK(32))
+                       flags |= GFP_DMA;
+               else
+                       flags |= GFP_DMA32;
+       }
+
+       vaddr = (void *)__get_free_pages(flags, order);
+       if (!vaddr)
+               return NULL;
+       memset(vaddr, 0, size);
+
+       *dma_handle = __intel_map_single(hwdev, virt_to_bus(vaddr), size,
+                                        DMA_BIDIRECTIONAL,
+                                        hwdev->coherent_dma_mask);
+       if (*dma_handle)
+               return vaddr;
+       free_pages((unsigned long)vaddr, order);
+       return NULL;
+}
+
+static void intel_free_coherent(struct device *hwdev, size_t size, void *vaddr,
+                               dma_addr_t dma_handle)
+{
+       int order;
+
+       size = PAGE_ALIGN(size);
+       order = get_order(size);
+
+       intel_unmap_page(hwdev, dma_handle, size, DMA_BIDIRECTIONAL, NULL);
+       free_pages((unsigned long)vaddr, order);
+}
+
+static void intel_unmap_sg(struct device *hwdev, struct scatterlist *sglist,
+                          int nelems, enum dma_data_direction dir,
+                          struct dma_attrs *attrs)
+{
+       struct pci_dev *pdev = to_pci_dev(hwdev);
+       struct dmar_domain *domain;
+       unsigned long start_pfn, last_pfn;
+       struct iova *iova;
+       struct intel_iommu *iommu;
+
+       if (iommu_no_mapping(hwdev))
+               return;
+
+       domain = find_domain(pdev);
+       BUG_ON(!domain);
+
+       iommu = domain_get_iommu(domain);
+
+       iova = find_iova(&domain->iovad, IOVA_PFN(sglist[0].dma_address));
+       if (WARN_ONCE(!iova, "Driver unmaps unmatched sglist at PFN %llx\n",
+                     (unsigned long long)sglist[0].dma_address))
+               return;
+
+       start_pfn = mm_to_dma_pfn(iova->pfn_lo);
+       last_pfn = mm_to_dma_pfn(iova->pfn_hi + 1) - 1;
+
+       /*  clear the whole page */
+       dma_pte_clear_range(domain, start_pfn, last_pfn);
+
+       /* free page tables */
+       dma_pte_free_pagetable(domain, start_pfn, last_pfn);
+
+       if (intel_iommu_strict) {
+               iommu_flush_iotlb_psi(iommu, domain->id, start_pfn,
+                                     last_pfn - start_pfn + 1, 0);
+               /* free iova */
+               __free_iova(&domain->iovad, iova);
+       } else {
+               add_unmap(domain, iova);
+               /*
+                * queue up the release of the unmap to save the 1/6th of the
+                * cpu used up by the iotlb flush operation...
+                */
+       }
+}
+
+static int intel_nontranslate_map_sg(struct device *hddev,
+       struct scatterlist *sglist, int nelems, int dir)
+{
+       int i;
+       struct scatterlist *sg;
+
+       for_each_sg(sglist, sg, nelems, i) {
+               BUG_ON(!sg_page(sg));
+               sg->dma_address = page_to_phys(sg_page(sg)) + sg->offset;
+               sg->dma_length = sg->length;
+       }
+       return nelems;
+}
+
+static int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, int nelems,
+                       enum dma_data_direction dir, struct dma_attrs *attrs)
+{
+       int i;
+       struct pci_dev *pdev = to_pci_dev(hwdev);
+       struct dmar_domain *domain;
+       size_t size = 0;
+       int prot = 0;
+       struct iova *iova = NULL;
+       int ret;
+       struct scatterlist *sg;
+       unsigned long start_vpfn;
+       struct intel_iommu *iommu;
+
+       BUG_ON(dir == DMA_NONE);
+       if (iommu_no_mapping(hwdev))
+               return intel_nontranslate_map_sg(hwdev, sglist, nelems, dir);
+
+       domain = get_valid_domain_for_dev(pdev);
+       if (!domain)
+               return 0;
+
+       iommu = domain_get_iommu(domain);
+
+       for_each_sg(sglist, sg, nelems, i)
+               size += aligned_nrpages(sg->offset, sg->length);
+
+       iova = intel_alloc_iova(hwdev, domain, dma_to_mm_pfn(size),
+                               pdev->dma_mask);
+       if (!iova) {
+               sglist->dma_length = 0;
+               return 0;
+       }
+
+       /*
+        * Check if DMAR supports zero-length reads on write only
+        * mappings..
+        */
+       if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL || \
+                       !cap_zlr(iommu->cap))
+               prot |= DMA_PTE_READ;
+       if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL)
+               prot |= DMA_PTE_WRITE;
+
+       start_vpfn = mm_to_dma_pfn(iova->pfn_lo);
+
+       ret = domain_sg_mapping(domain, start_vpfn, sglist, size, prot);
+       if (unlikely(ret)) {
+               /*  clear the page */
+               dma_pte_clear_range(domain, start_vpfn,
+                                   start_vpfn + size - 1);
+               /* free page tables */
+               dma_pte_free_pagetable(domain, start_vpfn,
+                                      start_vpfn + size - 1);
+               /* free iova */
+               __free_iova(&domain->iovad, iova);
+               return 0;
+       }
+
+       /* it's a non-present to present mapping. Only flush if caching mode */
+       if (cap_caching_mode(iommu->cap))
+               iommu_flush_iotlb_psi(iommu, domain->id, start_vpfn, size, 1);
+       else
+               iommu_flush_write_buffer(iommu);
+
+       return nelems;
+}
+
+static int intel_mapping_error(struct device *dev, dma_addr_t dma_addr)
+{
+       return !dma_addr;
+}
+
+struct dma_map_ops intel_dma_ops = {
+       .alloc_coherent = intel_alloc_coherent,
+       .free_coherent = intel_free_coherent,
+       .map_sg = intel_map_sg,
+       .unmap_sg = intel_unmap_sg,
+       .map_page = intel_map_page,
+       .unmap_page = intel_unmap_page,
+       .mapping_error = intel_mapping_error,
+};
+
+static inline int iommu_domain_cache_init(void)
+{
+       int ret = 0;
+
+       iommu_domain_cache = kmem_cache_create("iommu_domain",
+                                        sizeof(struct dmar_domain),
+                                        0,
+                                        SLAB_HWCACHE_ALIGN,
+
+                                        NULL);
+       if (!iommu_domain_cache) {
+               printk(KERN_ERR "Couldn't create iommu_domain cache\n");
+               ret = -ENOMEM;
+       }
+
+       return ret;
+}
+
+static inline int iommu_devinfo_cache_init(void)
+{
+       int ret = 0;
+
+       iommu_devinfo_cache = kmem_cache_create("iommu_devinfo",
+                                        sizeof(struct device_domain_info),
+                                        0,
+                                        SLAB_HWCACHE_ALIGN,
+                                        NULL);
+       if (!iommu_devinfo_cache) {
+               printk(KERN_ERR "Couldn't create devinfo cache\n");
+               ret = -ENOMEM;
+       }
+
+       return ret;
+}
+
+static inline int iommu_iova_cache_init(void)
+{
+       int ret = 0;
+
+       iommu_iova_cache = kmem_cache_create("iommu_iova",
+                                        sizeof(struct iova),
+                                        0,
+                                        SLAB_HWCACHE_ALIGN,
+                                        NULL);
+       if (!iommu_iova_cache) {
+               printk(KERN_ERR "Couldn't create iova cache\n");
+               ret = -ENOMEM;
+       }
+
+       return ret;
+}
+
+static int __init iommu_init_mempool(void)
+{
+       int ret;
+       ret = iommu_iova_cache_init();
+       if (ret)
+               return ret;
+
+       ret = iommu_domain_cache_init();
+       if (ret)
+               goto domain_error;
+
+       ret = iommu_devinfo_cache_init();
+       if (!ret)
+               return ret;
+
+       kmem_cache_destroy(iommu_domain_cache);
+domain_error:
+       kmem_cache_destroy(iommu_iova_cache);
+
+       return -ENOMEM;
+}
+
+static void __init iommu_exit_mempool(void)
+{
+       kmem_cache_destroy(iommu_devinfo_cache);
+       kmem_cache_destroy(iommu_domain_cache);
+       kmem_cache_destroy(iommu_iova_cache);
+
+}
+
+static void quirk_ioat_snb_local_iommu(struct pci_dev *pdev)
+{
+       struct dmar_drhd_unit *drhd;
+       u32 vtbar;
+       int rc;
+
+       /* We know that this device on this chipset has its own IOMMU.
+        * If we find it under a different IOMMU, then the BIOS is lying
+        * to us. Hope that the IOMMU for this device is actually
+        * disabled, and it needs no translation...
+        */
+       rc = pci_bus_read_config_dword(pdev->bus, PCI_DEVFN(0, 0), 0xb0, &vtbar);
+       if (rc) {
+               /* "can't" happen */
+               dev_info(&pdev->dev, "failed to run vt-d quirk\n");
+               return;
+       }
+       vtbar &= 0xffff0000;
+
+       /* we know that the this iommu should be at offset 0xa000 from vtbar */
+       drhd = dmar_find_matched_drhd_unit(pdev);
+       if (WARN_TAINT_ONCE(!drhd || drhd->reg_base_addr - vtbar != 0xa000,
+                           TAINT_FIRMWARE_WORKAROUND,
+                           "BIOS assigned incorrect VT-d unit for Intel(R) QuickData Technology device\n"))
+               pdev->dev.archdata.iommu = DUMMY_DEVICE_DOMAIN_INFO;
+}
+DECLARE_PCI_FIXUP_ENABLE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB, quirk_ioat_snb_local_iommu);
+
+static void __init init_no_remapping_devices(void)
+{
+       struct dmar_drhd_unit *drhd;
+
+       for_each_drhd_unit(drhd) {
+               if (!drhd->include_all) {
+                       int i;
+                       for (i = 0; i < drhd->devices_cnt; i++)
+                               if (drhd->devices[i] != NULL)
+                                       break;
+                       /* ignore DMAR unit if no pci devices exist */
+                       if (i == drhd->devices_cnt)
+                               drhd->ignored = 1;
+               }
+       }
+
+       if (dmar_map_gfx)
+               return;
+
+       for_each_drhd_unit(drhd) {
+               int i;
+               if (drhd->ignored || drhd->include_all)
+                       continue;
+
+               for (i = 0; i < drhd->devices_cnt; i++)
+                       if (drhd->devices[i] &&
+                               !IS_GFX_DEVICE(drhd->devices[i]))
+                               break;
+
+               if (i < drhd->devices_cnt)
+                       continue;
+
+               /* bypass IOMMU if it is just for gfx devices */
+               drhd->ignored = 1;
+               for (i = 0; i < drhd->devices_cnt; i++) {
+                       if (!drhd->devices[i])
+                               continue;
+                       drhd->devices[i]->dev.archdata.iommu = DUMMY_DEVICE_DOMAIN_INFO;
+               }
+       }
+}
+
+#ifdef CONFIG_SUSPEND
+static int init_iommu_hw(void)
+{
+       struct dmar_drhd_unit *drhd;
+       struct intel_iommu *iommu = NULL;
+
+       for_each_active_iommu(iommu, drhd)
+               if (iommu->qi)
+                       dmar_reenable_qi(iommu);
+
+       for_each_iommu(iommu, drhd) {
+               if (drhd->ignored) {
+                       /*
+                        * we always have to disable PMRs or DMA may fail on
+                        * this device
+                        */
+                       if (force_on)
+                               iommu_disable_protect_mem_regions(iommu);
+                       continue;
+               }
+       
+               iommu_flush_write_buffer(iommu);
+
+               iommu_set_root_entry(iommu);
+
+               iommu->flush.flush_context(iommu, 0, 0, 0,
+                                          DMA_CCMD_GLOBAL_INVL);
+               iommu->flush.flush_iotlb(iommu, 0, 0, 0,
+                                        DMA_TLB_GLOBAL_FLUSH);
+               if (iommu_enable_translation(iommu))
+                       return 1;
+               iommu_disable_protect_mem_regions(iommu);
+       }
+
+       return 0;
+}
+
+static void iommu_flush_all(void)
+{
+       struct dmar_drhd_unit *drhd;
+       struct intel_iommu *iommu;
+
+       for_each_active_iommu(iommu, drhd) {
+               iommu->flush.flush_context(iommu, 0, 0, 0,
+                                          DMA_CCMD_GLOBAL_INVL);
+               iommu->flush.flush_iotlb(iommu, 0, 0, 0,
+                                        DMA_TLB_GLOBAL_FLUSH);
+       }
+}
+
+static int iommu_suspend(void)
+{
+       struct dmar_drhd_unit *drhd;
+       struct intel_iommu *iommu = NULL;
+       unsigned long flag;
+
+       for_each_active_iommu(iommu, drhd) {
+               iommu->iommu_state = kzalloc(sizeof(u32) * MAX_SR_DMAR_REGS,
+                                                GFP_ATOMIC);
+               if (!iommu->iommu_state)
+                       goto nomem;
+       }
+
+       iommu_flush_all();
+
+       for_each_active_iommu(iommu, drhd) {
+               iommu_disable_translation(iommu);
+
+               spin_lock_irqsave(&iommu->register_lock, flag);
+
+               iommu->iommu_state[SR_DMAR_FECTL_REG] =
+                       readl(iommu->reg + DMAR_FECTL_REG);
+               iommu->iommu_state[SR_DMAR_FEDATA_REG] =
+                       readl(iommu->reg + DMAR_FEDATA_REG);
+               iommu->iommu_state[SR_DMAR_FEADDR_REG] =
+                       readl(iommu->reg + DMAR_FEADDR_REG);
+               iommu->iommu_state[SR_DMAR_FEUADDR_REG] =
+                       readl(iommu->reg + DMAR_FEUADDR_REG);
+
+               spin_unlock_irqrestore(&iommu->register_lock, flag);
+       }
+       return 0;
+
+nomem:
+       for_each_active_iommu(iommu, drhd)
+               kfree(iommu->iommu_state);
+
+       return -ENOMEM;
+}
+
+static void iommu_resume(void)
+{
+       struct dmar_drhd_unit *drhd;
+       struct intel_iommu *iommu = NULL;
+       unsigned long flag;
+
+       if (init_iommu_hw()) {
+               if (force_on)
+                       panic("tboot: IOMMU setup failed, DMAR can not resume!\n");
+               else
+                       WARN(1, "IOMMU setup failed, DMAR can not resume!\n");
+               return;
+       }
+
+       for_each_active_iommu(iommu, drhd) {
+
+               spin_lock_irqsave(&iommu->register_lock, flag);
+
+               writel(iommu->iommu_state[SR_DMAR_FECTL_REG],
+                       iommu->reg + DMAR_FECTL_REG);
+               writel(iommu->iommu_state[SR_DMAR_FEDATA_REG],
+                       iommu->reg + DMAR_FEDATA_REG);
+               writel(iommu->iommu_state[SR_DMAR_FEADDR_REG],
+                       iommu->reg + DMAR_FEADDR_REG);
+               writel(iommu->iommu_state[SR_DMAR_FEUADDR_REG],
+                       iommu->reg + DMAR_FEUADDR_REG);
+
+               spin_unlock_irqrestore(&iommu->register_lock, flag);
+       }
+
+       for_each_active_iommu(iommu, drhd)
+               kfree(iommu->iommu_state);
+}
+
+static struct syscore_ops iommu_syscore_ops = {
+       .resume         = iommu_resume,
+       .suspend        = iommu_suspend,
+};
+
+static void __init init_iommu_pm_ops(void)
+{
+       register_syscore_ops(&iommu_syscore_ops);
+}
+
+#else
+static inline void init_iommu_pm_ops(void) {}
+#endif /* CONFIG_PM */
+
+/*
+ * Here we only respond to action of unbound device from driver.
+ *
+ * Added device is not attached to its DMAR domain here yet. That will happen
+ * when mapping the device to iova.
+ */
+static int device_notifier(struct notifier_block *nb,
+                                 unsigned long action, void *data)
+{
+       struct device *dev = data;
+       struct pci_dev *pdev = to_pci_dev(dev);
+       struct dmar_domain *domain;
+
+       if (iommu_no_mapping(dev))
+               return 0;
+
+       domain = find_domain(pdev);
+       if (!domain)
+               return 0;
+
+       if (action == BUS_NOTIFY_UNBOUND_DRIVER && !iommu_pass_through) {
+               domain_remove_one_dev_info(domain, pdev);
+
+               if (!(domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE) &&
+                   !(domain->flags & DOMAIN_FLAG_STATIC_IDENTITY) &&
+                   list_empty(&domain->devices))
+                       domain_exit(domain);
+       }
+
+       return 0;
+}
+
+static struct notifier_block device_nb = {
+       .notifier_call = device_notifier,
+};
+
+int __init intel_iommu_init(void)
+{
+       int ret = 0;
+
+       /* VT-d is required for a TXT/tboot launch, so enforce that */
+       force_on = tboot_force_iommu();
+
+       if (dmar_table_init()) {
+               if (force_on)
+                       panic("tboot: Failed to initialize DMAR table\n");
+               return  -ENODEV;
+       }
+
+       if (dmar_dev_scope_init()) {
+               if (force_on)
+                       panic("tboot: Failed to initialize DMAR device scope\n");
+               return  -ENODEV;
+       }
+
+       /*
+        * Check the need for DMA-remapping initialization now.
+        * Above initialization will also be used by Interrupt-remapping.
+        */
+       if (no_iommu || dmar_disabled)
+               return -ENODEV;
+
+       if (iommu_init_mempool()) {
+               if (force_on)
+                       panic("tboot: Failed to initialize iommu memory\n");
+               return  -ENODEV;
+       }
+
+       if (dmar_init_reserved_ranges()) {
+               if (force_on)
+                       panic("tboot: Failed to reserve iommu ranges\n");
+               return  -ENODEV;
+       }
+
+       init_no_remapping_devices();
+
+       ret = init_dmars();
+       if (ret) {
+               if (force_on)
+                       panic("tboot: Failed to initialize DMARs\n");
+               printk(KERN_ERR "IOMMU: dmar init failed\n");
+               put_iova_domain(&reserved_iova_list);
+               iommu_exit_mempool();
+               return ret;
+       }
+       printk(KERN_INFO
+       "PCI-DMA: Intel(R) Virtualization Technology for Directed I/O\n");
+
+       init_timer(&unmap_timer);
+#ifdef CONFIG_SWIOTLB
+       swiotlb = 0;
+#endif
+       dma_ops = &intel_dma_ops;
+
+       init_iommu_pm_ops();
+
+       register_iommu(&intel_iommu_ops);
+
+       bus_register_notifier(&pci_bus_type, &device_nb);
+
+       return 0;
+}
+
+static void iommu_detach_dependent_devices(struct intel_iommu *iommu,
+                                          struct pci_dev *pdev)
+{
+       struct pci_dev *tmp, *parent;
+
+       if (!iommu || !pdev)
+               return;
+
+       /* dependent device detach */
+       tmp = pci_find_upstream_pcie_bridge(pdev);
+       /* Secondary interface's bus number and devfn 0 */
+       if (tmp) {
+               parent = pdev->bus->self;
+               while (parent != tmp) {
+                       iommu_detach_dev(iommu, parent->bus->number,
+                                        parent->devfn);
+                       parent = parent->bus->self;
+               }
+               if (pci_is_pcie(tmp)) /* this is a PCIe-to-PCI bridge */
+                       iommu_detach_dev(iommu,
+                               tmp->subordinate->number, 0);
+               else /* this is a legacy PCI bridge */
+                       iommu_detach_dev(iommu, tmp->bus->number,
+                                        tmp->devfn);
+       }
+}
+
+static void domain_remove_one_dev_info(struct dmar_domain *domain,
+                                         struct pci_dev *pdev)
+{
+       struct device_domain_info *info;
+       struct intel_iommu *iommu;
+       unsigned long flags;
+       int found = 0;
+       struct list_head *entry, *tmp;
+
+       iommu = device_to_iommu(pci_domain_nr(pdev->bus), pdev->bus->number,
+                               pdev->devfn);
+       if (!iommu)
+               return;
+
+       spin_lock_irqsave(&device_domain_lock, flags);
+       list_for_each_safe(entry, tmp, &domain->devices) {
+               info = list_entry(entry, struct device_domain_info, link);
+               if (info->segment == pci_domain_nr(pdev->bus) &&
+                   info->bus == pdev->bus->number &&
+                   info->devfn == pdev->devfn) {
+                       list_del(&info->link);
+                       list_del(&info->global);
+                       if (info->dev)
+                               info->dev->dev.archdata.iommu = NULL;
+                       spin_unlock_irqrestore(&device_domain_lock, flags);
+
+                       iommu_disable_dev_iotlb(info);
+                       iommu_detach_dev(iommu, info->bus, info->devfn);
+                       iommu_detach_dependent_devices(iommu, pdev);
+                       free_devinfo_mem(info);
+
+                       spin_lock_irqsave(&device_domain_lock, flags);
+
+                       if (found)
+                               break;
+                       else
+                               continue;
+               }
+
+               /* if there is no other devices under the same iommu
+                * owned by this domain, clear this iommu in iommu_bmp
+                * update iommu count and coherency
+                */
+               if (iommu == device_to_iommu(info->segment, info->bus,
+                                           info->devfn))
+                       found = 1;
+       }
+
+       if (found == 0) {
+               unsigned long tmp_flags;
+               spin_lock_irqsave(&domain->iommu_lock, tmp_flags);
+               clear_bit(iommu->seq_id, &domain->iommu_bmp);
+               domain->iommu_count--;
+               domain_update_iommu_cap(domain);
+               spin_unlock_irqrestore(&domain->iommu_lock, tmp_flags);
+
+               if (!(domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE) &&
+                   !(domain->flags & DOMAIN_FLAG_STATIC_IDENTITY)) {
+                       spin_lock_irqsave(&iommu->lock, tmp_flags);
+                       clear_bit(domain->id, iommu->domain_ids);
+                       iommu->domains[domain->id] = NULL;
+                       spin_unlock_irqrestore(&iommu->lock, tmp_flags);
+               }
+       }
+
+       spin_unlock_irqrestore(&device_domain_lock, flags);
+}
+
+static void vm_domain_remove_all_dev_info(struct dmar_domain *domain)
+{
+       struct device_domain_info *info;
+       struct intel_iommu *iommu;
+       unsigned long flags1, flags2;
+
+       spin_lock_irqsave(&device_domain_lock, flags1);
+       while (!list_empty(&domain->devices)) {
+               info = list_entry(domain->devices.next,
+                       struct device_domain_info, link);
+               list_del(&info->link);
+               list_del(&info->global);
+               if (info->dev)
+                       info->dev->dev.archdata.iommu = NULL;
+
+               spin_unlock_irqrestore(&device_domain_lock, flags1);
+
+               iommu_disable_dev_iotlb(info);
+               iommu = device_to_iommu(info->segment, info->bus, info->devfn);
+               iommu_detach_dev(iommu, info->bus, info->devfn);
+               iommu_detach_dependent_devices(iommu, info->dev);
+
+               /* clear this iommu in iommu_bmp, update iommu count
+                * and capabilities
+                */
+               spin_lock_irqsave(&domain->iommu_lock, flags2);
+               if (test_and_clear_bit(iommu->seq_id,
+                                      &domain->iommu_bmp)) {
+                       domain->iommu_count--;
+                       domain_update_iommu_cap(domain);
+               }
+               spin_unlock_irqrestore(&domain->iommu_lock, flags2);
+
+               free_devinfo_mem(info);
+               spin_lock_irqsave(&device_domain_lock, flags1);
+       }
+       spin_unlock_irqrestore(&device_domain_lock, flags1);
+}
+
+/* domain id for virtual machine, it won't be set in context */
+static unsigned long vm_domid;
+
+static struct dmar_domain *iommu_alloc_vm_domain(void)
+{
+       struct dmar_domain *domain;
+
+       domain = alloc_domain_mem();
+       if (!domain)
+               return NULL;
+
+       domain->id = vm_domid++;
+       domain->nid = -1;
+       memset(&domain->iommu_bmp, 0, sizeof(unsigned long));
+       domain->flags = DOMAIN_FLAG_VIRTUAL_MACHINE;
+
+       return domain;
+}
+
+static int md_domain_init(struct dmar_domain *domain, int guest_width)
+{
+       int adjust_width;
+
+       init_iova_domain(&domain->iovad, DMA_32BIT_PFN);
+       spin_lock_init(&domain->iommu_lock);
+
+       domain_reserve_special_ranges(domain);
+
+       /* calculate AGAW */
+       domain->gaw = guest_width;
+       adjust_width = guestwidth_to_adjustwidth(guest_width);
+       domain->agaw = width_to_agaw(adjust_width);
+
+       INIT_LIST_HEAD(&domain->devices);
+
+       domain->iommu_count = 0;
+       domain->iommu_coherency = 0;
+       domain->iommu_snooping = 0;
+       domain->iommu_superpage = 0;
+       domain->max_addr = 0;
+       domain->nid = -1;
+
+       /* always allocate the top pgd */
+       domain->pgd = (struct dma_pte *)alloc_pgtable_page(domain->nid);
+       if (!domain->pgd)
+               return -ENOMEM;
+       domain_flush_cache(domain, domain->pgd, PAGE_SIZE);
+       return 0;
+}
+
+static void iommu_free_vm_domain(struct dmar_domain *domain)
+{
+       unsigned long flags;
+       struct dmar_drhd_unit *drhd;
+       struct intel_iommu *iommu;
+       unsigned long i;
+       unsigned long ndomains;
+
+       for_each_drhd_unit(drhd) {
+               if (drhd->ignored)
+                       continue;
+               iommu = drhd->iommu;
+
+               ndomains = cap_ndoms(iommu->cap);
+               for_each_set_bit(i, iommu->domain_ids, ndomains) {
+                       if (iommu->domains[i] == domain) {
+                               spin_lock_irqsave(&iommu->lock, flags);
+                               clear_bit(i, iommu->domain_ids);
+                               iommu->domains[i] = NULL;
+                               spin_unlock_irqrestore(&iommu->lock, flags);
+                               break;
+                       }
+               }
+       }
+}
+
+static void vm_domain_exit(struct dmar_domain *domain)
+{
+       /* Domain 0 is reserved, so dont process it */
+       if (!domain)
+               return;
+
+       vm_domain_remove_all_dev_info(domain);
+       /* destroy iovas */
+       put_iova_domain(&domain->iovad);
+
+       /* clear ptes */
+       dma_pte_clear_range(domain, 0, DOMAIN_MAX_PFN(domain->gaw));
+
+       /* free page tables */
+       dma_pte_free_pagetable(domain, 0, DOMAIN_MAX_PFN(domain->gaw));
+
+       iommu_free_vm_domain(domain);
+       free_domain_mem(domain);
+}
+
+static int intel_iommu_domain_init(struct iommu_domain *domain)
+{
+       struct dmar_domain *dmar_domain;
+
+       dmar_domain = iommu_alloc_vm_domain();
+       if (!dmar_domain) {
+               printk(KERN_ERR
+                       "intel_iommu_domain_init: dmar_domain == NULL\n");
+               return -ENOMEM;
+       }
+       if (md_domain_init(dmar_domain, DEFAULT_DOMAIN_ADDRESS_WIDTH)) {
+               printk(KERN_ERR
+                       "intel_iommu_domain_init() failed\n");
+               vm_domain_exit(dmar_domain);
+               return -ENOMEM;
+       }
+       domain->priv = dmar_domain;
+
+       return 0;
+}
+
+static void intel_iommu_domain_destroy(struct iommu_domain *domain)
+{
+       struct dmar_domain *dmar_domain = domain->priv;
+
+       domain->priv = NULL;
+       vm_domain_exit(dmar_domain);
+}
+
+static int intel_iommu_attach_device(struct iommu_domain *domain,
+                                    struct device *dev)
+{
+       struct dmar_domain *dmar_domain = domain->priv;
+       struct pci_dev *pdev = to_pci_dev(dev);
+       struct intel_iommu *iommu;
+       int addr_width;
+
+       /* normally pdev is not mapped */
+       if (unlikely(domain_context_mapped(pdev))) {
+               struct dmar_domain *old_domain;
+
+               old_domain = find_domain(pdev);
+               if (old_domain) {
+                       if (dmar_domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE ||
+                           dmar_domain->flags & DOMAIN_FLAG_STATIC_IDENTITY)
+                               domain_remove_one_dev_info(old_domain, pdev);
+                       else
+                               domain_remove_dev_info(old_domain);
+               }
+       }
+
+       iommu = device_to_iommu(pci_domain_nr(pdev->bus), pdev->bus->number,
+                               pdev->devfn);
+       if (!iommu)
+               return -ENODEV;
+
+       /* check if this iommu agaw is sufficient for max mapped address */
+       addr_width = agaw_to_width(iommu->agaw);
+       if (addr_width > cap_mgaw(iommu->cap))
+               addr_width = cap_mgaw(iommu->cap);
+
+       if (dmar_domain->max_addr > (1LL << addr_width)) {
+               printk(KERN_ERR "%s: iommu width (%d) is not "
+                      "sufficient for the mapped address (%llx)\n",
+                      __func__, addr_width, dmar_domain->max_addr);
+               return -EFAULT;
+       }
+       dmar_domain->gaw = addr_width;
+
+       /*
+        * Knock out extra levels of page tables if necessary
+        */
+       while (iommu->agaw < dmar_domain->agaw) {
+               struct dma_pte *pte;
+
+               pte = dmar_domain->pgd;
+               if (dma_pte_present(pte)) {
+                       dmar_domain->pgd = (struct dma_pte *)
+                               phys_to_virt(dma_pte_addr(pte));
+                       free_pgtable_page(pte);
+               }
+               dmar_domain->agaw--;
+       }
+
+       return domain_add_dev_info(dmar_domain, pdev, CONTEXT_TT_MULTI_LEVEL);
+}
+
+static void intel_iommu_detach_device(struct iommu_domain *domain,
+                                     struct device *dev)
+{
+       struct dmar_domain *dmar_domain = domain->priv;
+       struct pci_dev *pdev = to_pci_dev(dev);
+
+       domain_remove_one_dev_info(dmar_domain, pdev);
+}
+
+static int intel_iommu_map(struct iommu_domain *domain,
+                          unsigned long iova, phys_addr_t hpa,
+                          int gfp_order, int iommu_prot)
+{
+       struct dmar_domain *dmar_domain = domain->priv;
+       u64 max_addr;
+       int prot = 0;
+       size_t size;
+       int ret;
+
+       if (iommu_prot & IOMMU_READ)
+               prot |= DMA_PTE_READ;
+       if (iommu_prot & IOMMU_WRITE)
+               prot |= DMA_PTE_WRITE;
+       if ((iommu_prot & IOMMU_CACHE) && dmar_domain->iommu_snooping)
+               prot |= DMA_PTE_SNP;
+
+       size     = PAGE_SIZE << gfp_order;
+       max_addr = iova + size;
+       if (dmar_domain->max_addr < max_addr) {
+               u64 end;
+
+               /* check if minimum agaw is sufficient for mapped address */
+               end = __DOMAIN_MAX_ADDR(dmar_domain->gaw) + 1;
+               if (end < max_addr) {
+                       printk(KERN_ERR "%s: iommu width (%d) is not "
+                              "sufficient for the mapped address (%llx)\n",
+                              __func__, dmar_domain->gaw, max_addr);
+                       return -EFAULT;
+               }
+               dmar_domain->max_addr = max_addr;
+       }
+       /* Round up size to next multiple of PAGE_SIZE, if it and
+          the low bits of hpa would take us onto the next page */
+       size = aligned_nrpages(hpa, size);
+       ret = domain_pfn_mapping(dmar_domain, iova >> VTD_PAGE_SHIFT,
+                                hpa >> VTD_PAGE_SHIFT, size, prot);
+       return ret;
+}
+
+static int intel_iommu_unmap(struct iommu_domain *domain,
+                            unsigned long iova, int gfp_order)
+{
+       struct dmar_domain *dmar_domain = domain->priv;
+       size_t size = PAGE_SIZE << gfp_order;
+
+       dma_pte_clear_range(dmar_domain, iova >> VTD_PAGE_SHIFT,
+                           (iova + size - 1) >> VTD_PAGE_SHIFT);
+
+       if (dmar_domain->max_addr == iova + size)
+               dmar_domain->max_addr = iova;
+
+       return gfp_order;
+}
+
+static phys_addr_t intel_iommu_iova_to_phys(struct iommu_domain *domain,
+                                           unsigned long iova)
+{
+       struct dmar_domain *dmar_domain = domain->priv;
+       struct dma_pte *pte;
+       u64 phys = 0;
+
+       pte = pfn_to_dma_pte(dmar_domain, iova >> VTD_PAGE_SHIFT, 0);
+       if (pte)
+               phys = dma_pte_addr(pte);
+
+       return phys;
+}
+
+static int intel_iommu_domain_has_cap(struct iommu_domain *domain,
+                                     unsigned long cap)
+{
+       struct dmar_domain *dmar_domain = domain->priv;
+
+       if (cap == IOMMU_CAP_CACHE_COHERENCY)
+               return dmar_domain->iommu_snooping;
+       if (cap == IOMMU_CAP_INTR_REMAP)
+               return intr_remapping_enabled;
+
+       return 0;
+}
+
+static struct iommu_ops intel_iommu_ops = {
+       .domain_init    = intel_iommu_domain_init,
+       .domain_destroy = intel_iommu_domain_destroy,
+       .attach_dev     = intel_iommu_attach_device,
+       .detach_dev     = intel_iommu_detach_device,
+       .map            = intel_iommu_map,
+       .unmap          = intel_iommu_unmap,
+       .iova_to_phys   = intel_iommu_iova_to_phys,
+       .domain_has_cap = intel_iommu_domain_has_cap,
+};
+
+static void __devinit quirk_iommu_rwbf(struct pci_dev *dev)
+{
+       /*
+        * Mobile 4 Series Chipset neglects to set RWBF capability,
+        * but needs it:
+        */
+       printk(KERN_INFO "DMAR: Forcing write-buffer flush capability\n");
+       rwbf_quirk = 1;
+
+       /* https://bugzilla.redhat.com/show_bug.cgi?id=538163 */
+       if (dev->revision == 0x07) {
+               printk(KERN_INFO "DMAR: Disabling IOMMU for graphics on this chipset\n");
+               dmar_map_gfx = 0;
+       }
+}
+
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2a40, quirk_iommu_rwbf);
+
+#define GGC 0x52
+#define GGC_MEMORY_SIZE_MASK   (0xf << 8)
+#define GGC_MEMORY_SIZE_NONE   (0x0 << 8)
+#define GGC_MEMORY_SIZE_1M     (0x1 << 8)
+#define GGC_MEMORY_SIZE_2M     (0x3 << 8)
+#define GGC_MEMORY_VT_ENABLED  (0x8 << 8)
+#define GGC_MEMORY_SIZE_2M_VT  (0x9 << 8)
+#define GGC_MEMORY_SIZE_3M_VT  (0xa << 8)
+#define GGC_MEMORY_SIZE_4M_VT  (0xb << 8)
+
+static void __devinit quirk_calpella_no_shadow_gtt(struct pci_dev *dev)
+{
+       unsigned short ggc;
+
+       if (pci_read_config_word(dev, GGC, &ggc))
+               return;
+
+       if (!(ggc & GGC_MEMORY_VT_ENABLED)) {
+               printk(KERN_INFO "DMAR: BIOS has allocated no shadow GTT; disabling IOMMU for graphics\n");
+               dmar_map_gfx = 0;
+       }
+}
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0040, quirk_calpella_no_shadow_gtt);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0044, quirk_calpella_no_shadow_gtt);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0062, quirk_calpella_no_shadow_gtt);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x006a, quirk_calpella_no_shadow_gtt);
+
+/* On Tylersburg chipsets, some BIOSes have been known to enable the
+   ISOCH DMAR unit for the Azalia sound device, but not give it any
+   TLB entries, which causes it to deadlock. Check for that.  We do
+   this in a function called from init_dmars(), instead of in a PCI
+   quirk, because we don't want to print the obnoxious "BIOS broken"
+   message if VT-d is actually disabled.
+*/
+static void __init check_tylersburg_isoch(void)
+{
+       struct pci_dev *pdev;
+       uint32_t vtisochctrl;
+
+       /* If there's no Azalia in the system anyway, forget it. */
+       pdev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x3a3e, NULL);
+       if (!pdev)
+               return;
+       pci_dev_put(pdev);
+
+       /* System Management Registers. Might be hidden, in which case
+          we can't do the sanity check. But that's OK, because the
+          known-broken BIOSes _don't_ actually hide it, so far. */
+       pdev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x342e, NULL);
+       if (!pdev)
+               return;
+
+       if (pci_read_config_dword(pdev, 0x188, &vtisochctrl)) {
+               pci_dev_put(pdev);
+               return;
+       }
+
+       pci_dev_put(pdev);
+
+       /* If Azalia DMA is routed to the non-isoch DMAR unit, fine. */
+       if (vtisochctrl & 1)
+               return;
+
+       /* Drop all bits other than the number of TLB entries */
+       vtisochctrl &= 0x1c;
+
+       /* If we have the recommended number of TLB entries (16), fine. */
+       if (vtisochctrl == 0x10)
+               return;
+
+       /* Zero TLB entries? You get to ride the short bus to school. */
+       if (!vtisochctrl) {
+               WARN(1, "Your BIOS is broken; DMA routed to ISOCH DMAR unit but no TLB space.\n"
+                    "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
+                    dmi_get_system_info(DMI_BIOS_VENDOR),
+                    dmi_get_system_info(DMI_BIOS_VERSION),
+                    dmi_get_system_info(DMI_PRODUCT_VERSION));
+               iommu_identity_mapping |= IDENTMAP_AZALIA;
+               return;
+       }
+       
+       printk(KERN_WARNING "DMAR: Recommended TLB entries for ISOCH unit is 16; your BIOS set %d\n",
+              vtisochctrl);
+}
diff --git a/drivers/iommu/intr_remapping.c b/drivers/iommu/intr_remapping.c
new file mode 100644 (file)
index 0000000..1a89d4a
--- /dev/null
@@ -0,0 +1,797 @@
+#include <linux/interrupt.h>
+#include <linux/dmar.h>
+#include <linux/spinlock.h>
+#include <linux/slab.h>
+#include <linux/jiffies.h>
+#include <linux/hpet.h>
+#include <linux/pci.h>
+#include <linux/irq.h>
+#include <asm/io_apic.h>
+#include <asm/smp.h>
+#include <asm/cpu.h>
+#include <linux/intel-iommu.h>
+#include "intr_remapping.h"
+#include <acpi/acpi.h>
+#include <asm/pci-direct.h>
+
+static struct ioapic_scope ir_ioapic[MAX_IO_APICS];
+static struct hpet_scope ir_hpet[MAX_HPET_TBS];
+static int ir_ioapic_num, ir_hpet_num;
+int intr_remapping_enabled;
+
+static int disable_intremap;
+static int disable_sourceid_checking;
+
+static __init int setup_nointremap(char *str)
+{
+       disable_intremap = 1;
+       return 0;
+}
+early_param("nointremap", setup_nointremap);
+
+static __init int setup_intremap(char *str)
+{
+       if (!str)
+               return -EINVAL;
+
+       if (!strncmp(str, "on", 2))
+               disable_intremap = 0;
+       else if (!strncmp(str, "off", 3))
+               disable_intremap = 1;
+       else if (!strncmp(str, "nosid", 5))
+               disable_sourceid_checking = 1;
+
+       return 0;
+}
+early_param("intremap", setup_intremap);
+
+static DEFINE_SPINLOCK(irq_2_ir_lock);
+
+static struct irq_2_iommu *irq_2_iommu(unsigned int irq)
+{
+       struct irq_cfg *cfg = irq_get_chip_data(irq);
+       return cfg ? &cfg->irq_2_iommu : NULL;
+}
+
+int get_irte(int irq, struct irte *entry)
+{
+       struct irq_2_iommu *irq_iommu = irq_2_iommu(irq);
+       unsigned long flags;
+       int index;
+
+       if (!entry || !irq_iommu)
+               return -1;
+
+       spin_lock_irqsave(&irq_2_ir_lock, flags);
+
+       index = irq_iommu->irte_index + irq_iommu->sub_handle;
+       *entry = *(irq_iommu->iommu->ir_table->base + index);
+
+       spin_unlock_irqrestore(&irq_2_ir_lock, flags);
+       return 0;
+}
+
+int alloc_irte(struct intel_iommu *iommu, int irq, u16 count)
+{
+       struct ir_table *table = iommu->ir_table;
+       struct irq_2_iommu *irq_iommu = irq_2_iommu(irq);
+       u16 index, start_index;
+       unsigned int mask = 0;
+       unsigned long flags;
+       int i;
+
+       if (!count || !irq_iommu)
+               return -1;
+
+       /*
+        * start the IRTE search from index 0.
+        */
+       index = start_index = 0;
+
+       if (count > 1) {
+               count = __roundup_pow_of_two(count);
+               mask = ilog2(count);
+       }
+
+       if (mask > ecap_max_handle_mask(iommu->ecap)) {
+               printk(KERN_ERR
+                      "Requested mask %x exceeds the max invalidation handle"
+                      " mask value %Lx\n", mask,
+                      ecap_max_handle_mask(iommu->ecap));
+               return -1;
+       }
+
+       spin_lock_irqsave(&irq_2_ir_lock, flags);
+       do {
+               for (i = index; i < index + count; i++)
+                       if  (table->base[i].present)
+                               break;
+               /* empty index found */
+               if (i == index + count)
+                       break;
+
+               index = (index + count) % INTR_REMAP_TABLE_ENTRIES;
+
+               if (index == start_index) {
+                       spin_unlock_irqrestore(&irq_2_ir_lock, flags);
+                       printk(KERN_ERR "can't allocate an IRTE\n");
+                       return -1;
+               }
+       } while (1);
+
+       for (i = index; i < index + count; i++)
+               table->base[i].present = 1;
+
+       irq_iommu->iommu = iommu;
+       irq_iommu->irte_index =  index;
+       irq_iommu->sub_handle = 0;
+       irq_iommu->irte_mask = mask;
+
+       spin_unlock_irqrestore(&irq_2_ir_lock, flags);
+
+       return index;
+}
+
+static int qi_flush_iec(struct intel_iommu *iommu, int index, int mask)
+{
+       struct qi_desc desc;
+
+       desc.low = QI_IEC_IIDEX(index) | QI_IEC_TYPE | QI_IEC_IM(mask)
+                  | QI_IEC_SELECTIVE;
+       desc.high = 0;
+
+       return qi_submit_sync(&desc, iommu);
+}
+
+int map_irq_to_irte_handle(int irq, u16 *sub_handle)
+{
+       struct irq_2_iommu *irq_iommu = irq_2_iommu(irq);
+       unsigned long flags;
+       int index;
+
+       if (!irq_iommu)
+               return -1;
+
+       spin_lock_irqsave(&irq_2_ir_lock, flags);
+       *sub_handle = irq_iommu->sub_handle;
+       index = irq_iommu->irte_index;
+       spin_unlock_irqrestore(&irq_2_ir_lock, flags);
+       return index;
+}
+
+int set_irte_irq(int irq, struct intel_iommu *iommu, u16 index, u16 subhandle)
+{
+       struct irq_2_iommu *irq_iommu = irq_2_iommu(irq);
+       unsigned long flags;
+
+       if (!irq_iommu)
+               return -1;
+
+       spin_lock_irqsave(&irq_2_ir_lock, flags);
+
+       irq_iommu->iommu = iommu;
+       irq_iommu->irte_index = index;
+       irq_iommu->sub_handle = subhandle;
+       irq_iommu->irte_mask = 0;
+
+       spin_unlock_irqrestore(&irq_2_ir_lock, flags);
+
+       return 0;
+}
+
+int modify_irte(int irq, struct irte *irte_modified)
+{
+       struct irq_2_iommu *irq_iommu = irq_2_iommu(irq);
+       struct intel_iommu *iommu;
+       unsigned long flags;
+       struct irte *irte;
+       int rc, index;
+
+       if (!irq_iommu)
+               return -1;
+
+       spin_lock_irqsave(&irq_2_ir_lock, flags);
+
+       iommu = irq_iommu->iommu;
+
+       index = irq_iommu->irte_index + irq_iommu->sub_handle;
+       irte = &iommu->ir_table->base[index];
+
+       set_64bit(&irte->low, irte_modified->low);
+       set_64bit(&irte->high, irte_modified->high);
+       __iommu_flush_cache(iommu, irte, sizeof(*irte));
+
+       rc = qi_flush_iec(iommu, index, 0);
+       spin_unlock_irqrestore(&irq_2_ir_lock, flags);
+
+       return rc;
+}
+
+struct intel_iommu *map_hpet_to_ir(u8 hpet_id)
+{
+       int i;
+
+       for (i = 0; i < MAX_HPET_TBS; i++)
+               if (ir_hpet[i].id == hpet_id)
+                       return ir_hpet[i].iommu;
+       return NULL;
+}
+
+struct intel_iommu *map_ioapic_to_ir(int apic)
+{
+       int i;
+
+       for (i = 0; i < MAX_IO_APICS; i++)
+               if (ir_ioapic[i].id == apic)
+                       return ir_ioapic[i].iommu;
+       return NULL;
+}
+
+struct intel_iommu *map_dev_to_ir(struct pci_dev *dev)
+{
+       struct dmar_drhd_unit *drhd;
+
+       drhd = dmar_find_matched_drhd_unit(dev);
+       if (!drhd)
+               return NULL;
+
+       return drhd->iommu;
+}
+
+static int clear_entries(struct irq_2_iommu *irq_iommu)
+{
+       struct irte *start, *entry, *end;
+       struct intel_iommu *iommu;
+       int index;
+
+       if (irq_iommu->sub_handle)
+               return 0;
+
+       iommu = irq_iommu->iommu;
+       index = irq_iommu->irte_index + irq_iommu->sub_handle;
+
+       start = iommu->ir_table->base + index;
+       end = start + (1 << irq_iommu->irte_mask);
+
+       for (entry = start; entry < end; entry++) {
+               set_64bit(&entry->low, 0);
+               set_64bit(&entry->high, 0);
+       }
+
+       return qi_flush_iec(iommu, index, irq_iommu->irte_mask);
+}
+
+int free_irte(int irq)
+{
+       struct irq_2_iommu *irq_iommu = irq_2_iommu(irq);
+       unsigned long flags;
+       int rc;
+
+       if (!irq_iommu)
+               return -1;
+
+       spin_lock_irqsave(&irq_2_ir_lock, flags);
+
+       rc = clear_entries(irq_iommu);
+
+       irq_iommu->iommu = NULL;
+       irq_iommu->irte_index = 0;
+       irq_iommu->sub_handle = 0;
+       irq_iommu->irte_mask = 0;
+
+       spin_unlock_irqrestore(&irq_2_ir_lock, flags);
+
+       return rc;
+}
+
+/*
+ * source validation type
+ */
+#define SVT_NO_VERIFY          0x0  /* no verification is required */
+#define SVT_VERIFY_SID_SQ      0x1  /* verify using SID and SQ fields */
+#define SVT_VERIFY_BUS         0x2  /* verify bus of request-id */
+
+/*
+ * source-id qualifier
+ */
+#define SQ_ALL_16      0x0  /* verify all 16 bits of request-id */
+#define SQ_13_IGNORE_1 0x1  /* verify most significant 13 bits, ignore
+                             * the third least significant bit
+                             */
+#define SQ_13_IGNORE_2 0x2  /* verify most significant 13 bits, ignore
+                             * the second and third least significant bits
+                             */
+#define SQ_13_IGNORE_3 0x3  /* verify most significant 13 bits, ignore
+                             * the least three significant bits
+                             */
+
+/*
+ * set SVT, SQ and SID fields of irte to verify
+ * source ids of interrupt requests
+ */
+static void set_irte_sid(struct irte *irte, unsigned int svt,
+                        unsigned int sq, unsigned int sid)
+{
+       if (disable_sourceid_checking)
+               svt = SVT_NO_VERIFY;
+       irte->svt = svt;
+       irte->sq = sq;
+       irte->sid = sid;
+}
+
+int set_ioapic_sid(struct irte *irte, int apic)
+{
+       int i;
+       u16 sid = 0;
+
+       if (!irte)
+               return -1;
+
+       for (i = 0; i < MAX_IO_APICS; i++) {
+               if (ir_ioapic[i].id == apic) {
+                       sid = (ir_ioapic[i].bus << 8) | ir_ioapic[i].devfn;
+                       break;
+               }
+       }
+
+       if (sid == 0) {
+               pr_warning("Failed to set source-id of IOAPIC (%d)\n", apic);
+               return -1;
+       }
+
+       set_irte_sid(irte, 1, 0, sid);
+
+       return 0;
+}
+
+int set_hpet_sid(struct irte *irte, u8 id)
+{
+       int i;
+       u16 sid = 0;
+
+       if (!irte)
+               return -1;
+
+       for (i = 0; i < MAX_HPET_TBS; i++) {
+               if (ir_hpet[i].id == id) {
+                       sid = (ir_hpet[i].bus << 8) | ir_hpet[i].devfn;
+                       break;
+               }
+       }
+
+       if (sid == 0) {
+               pr_warning("Failed to set source-id of HPET block (%d)\n", id);
+               return -1;
+       }
+
+       /*
+        * Should really use SQ_ALL_16. Some platforms are broken.
+        * While we figure out the right quirks for these broken platforms, use
+        * SQ_13_IGNORE_3 for now.
+        */
+       set_irte_sid(irte, SVT_VERIFY_SID_SQ, SQ_13_IGNORE_3, sid);
+
+       return 0;
+}
+
+int set_msi_sid(struct irte *irte, struct pci_dev *dev)
+{
+       struct pci_dev *bridge;
+
+       if (!irte || !dev)
+               return -1;
+
+       /* PCIe device or Root Complex integrated PCI device */
+       if (pci_is_pcie(dev) || !dev->bus->parent) {
+               set_irte_sid(irte, SVT_VERIFY_SID_SQ, SQ_ALL_16,
+                            (dev->bus->number << 8) | dev->devfn);
+               return 0;
+       }
+
+       bridge = pci_find_upstream_pcie_bridge(dev);
+       if (bridge) {
+               if (pci_is_pcie(bridge))/* this is a PCIe-to-PCI/PCIX bridge */
+                       set_irte_sid(irte, SVT_VERIFY_BUS, SQ_ALL_16,
+                               (bridge->bus->number << 8) | dev->bus->number);
+               else /* this is a legacy PCI bridge */
+                       set_irte_sid(irte, SVT_VERIFY_SID_SQ, SQ_ALL_16,
+                               (bridge->bus->number << 8) | bridge->devfn);
+       }
+
+       return 0;
+}
+
+static void iommu_set_intr_remapping(struct intel_iommu *iommu, int mode)
+{
+       u64 addr;
+       u32 sts;
+       unsigned long flags;
+
+       addr = virt_to_phys((void *)iommu->ir_table->base);
+
+       spin_lock_irqsave(&iommu->register_lock, flags);
+
+       dmar_writeq(iommu->reg + DMAR_IRTA_REG,
+                   (addr) | IR_X2APIC_MODE(mode) | INTR_REMAP_TABLE_REG_SIZE);
+
+       /* Set interrupt-remapping table pointer */
+       iommu->gcmd |= DMA_GCMD_SIRTP;
+       writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
+
+       IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
+                     readl, (sts & DMA_GSTS_IRTPS), sts);
+       spin_unlock_irqrestore(&iommu->register_lock, flags);
+
+       /*
+        * global invalidation of interrupt entry cache before enabling
+        * interrupt-remapping.
+        */
+       qi_global_iec(iommu);
+
+       spin_lock_irqsave(&iommu->register_lock, flags);
+
+       /* Enable interrupt-remapping */
+       iommu->gcmd |= DMA_GCMD_IRE;
+       writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
+
+       IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
+                     readl, (sts & DMA_GSTS_IRES), sts);
+
+       spin_unlock_irqrestore(&iommu->register_lock, flags);
+}
+
+
+static int setup_intr_remapping(struct intel_iommu *iommu, int mode)
+{
+       struct ir_table *ir_table;
+       struct page *pages;
+
+       ir_table = iommu->ir_table = kzalloc(sizeof(struct ir_table),
+                                            GFP_ATOMIC);
+
+       if (!iommu->ir_table)
+               return -ENOMEM;
+
+       pages = alloc_pages_node(iommu->node, GFP_ATOMIC | __GFP_ZERO,
+                                INTR_REMAP_PAGE_ORDER);
+
+       if (!pages) {
+               printk(KERN_ERR "failed to allocate pages of order %d\n",
+                      INTR_REMAP_PAGE_ORDER);
+               kfree(iommu->ir_table);
+               return -ENOMEM;
+       }
+
+       ir_table->base = page_address(pages);
+
+       iommu_set_intr_remapping(iommu, mode);
+       return 0;
+}
+
+/*
+ * Disable Interrupt Remapping.
+ */
+static void iommu_disable_intr_remapping(struct intel_iommu *iommu)
+{
+       unsigned long flags;
+       u32 sts;
+
+       if (!ecap_ir_support(iommu->ecap))
+               return;
+
+       /*
+        * global invalidation of interrupt entry cache before disabling
+        * interrupt-remapping.
+        */
+       qi_global_iec(iommu);
+
+       spin_lock_irqsave(&iommu->register_lock, flags);
+
+       sts = dmar_readq(iommu->reg + DMAR_GSTS_REG);
+       if (!(sts & DMA_GSTS_IRES))
+               goto end;
+
+       iommu->gcmd &= ~DMA_GCMD_IRE;
+       writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
+
+       IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
+                     readl, !(sts & DMA_GSTS_IRES), sts);
+
+end:
+       spin_unlock_irqrestore(&iommu->register_lock, flags);
+}
+
+int __init intr_remapping_supported(void)
+{
+       struct dmar_drhd_unit *drhd;
+
+       if (disable_intremap)
+               return 0;
+
+       if (!dmar_ir_support())
+               return 0;
+
+       for_each_drhd_unit(drhd) {
+               struct intel_iommu *iommu = drhd->iommu;
+
+               if (!ecap_ir_support(iommu->ecap))
+                       return 0;
+       }
+
+       return 1;
+}
+
+int __init enable_intr_remapping(int eim)
+{
+       struct dmar_drhd_unit *drhd;
+       int setup = 0;
+
+       if (parse_ioapics_under_ir() != 1) {
+               printk(KERN_INFO "Not enable interrupt remapping\n");
+               return -1;
+       }
+
+       for_each_drhd_unit(drhd) {
+               struct intel_iommu *iommu = drhd->iommu;
+
+               /*
+                * If the queued invalidation is already initialized,
+                * shouldn't disable it.
+                */
+               if (iommu->qi)
+                       continue;
+
+               /*
+                * Clear previous faults.
+                */
+               dmar_fault(-1, iommu);
+
+               /*
+                * Disable intr remapping and queued invalidation, if already
+                * enabled prior to OS handover.
+                */
+               iommu_disable_intr_remapping(iommu);
+
+               dmar_disable_qi(iommu);
+       }
+
+       /*
+        * check for the Interrupt-remapping support
+        */
+       for_each_drhd_unit(drhd) {
+               struct intel_iommu *iommu = drhd->iommu;
+
+               if (!ecap_ir_support(iommu->ecap))
+                       continue;
+
+               if (eim && !ecap_eim_support(iommu->ecap)) {
+                       printk(KERN_INFO "DRHD %Lx: EIM not supported by DRHD, "
+                              " ecap %Lx\n", drhd->reg_base_addr, iommu->ecap);
+                       return -1;
+               }
+       }
+
+       /*
+        * Enable queued invalidation for all the DRHD's.
+        */
+       for_each_drhd_unit(drhd) {
+               int ret;
+               struct intel_iommu *iommu = drhd->iommu;
+               ret = dmar_enable_qi(iommu);
+
+               if (ret) {
+                       printk(KERN_ERR "DRHD %Lx: failed to enable queued, "
+                              " invalidation, ecap %Lx, ret %d\n",
+                              drhd->reg_base_addr, iommu->ecap, ret);
+                       return -1;
+               }
+       }
+
+       /*
+        * Setup Interrupt-remapping for all the DRHD's now.
+        */
+       for_each_drhd_unit(drhd) {
+               struct intel_iommu *iommu = drhd->iommu;
+
+               if (!ecap_ir_support(iommu->ecap))
+                       continue;
+
+               if (setup_intr_remapping(iommu, eim))
+                       goto error;
+
+               setup = 1;
+       }
+
+       if (!setup)
+               goto error;
+
+       intr_remapping_enabled = 1;
+
+       return 0;
+
+error:
+       /*
+        * handle error condition gracefully here!
+        */
+       return -1;
+}
+
+static void ir_parse_one_hpet_scope(struct acpi_dmar_device_scope *scope,
+                                     struct intel_iommu *iommu)
+{
+       struct acpi_dmar_pci_path *path;
+       u8 bus;
+       int count;
+
+       bus = scope->bus;
+       path = (struct acpi_dmar_pci_path *)(scope + 1);
+       count = (scope->length - sizeof(struct acpi_dmar_device_scope))
+               / sizeof(struct acpi_dmar_pci_path);
+
+       while (--count > 0) {
+               /*
+                * Access PCI directly due to the PCI
+                * subsystem isn't initialized yet.
+                */
+               bus = read_pci_config_byte(bus, path->dev, path->fn,
+                                          PCI_SECONDARY_BUS);
+               path++;
+       }
+       ir_hpet[ir_hpet_num].bus   = bus;
+       ir_hpet[ir_hpet_num].devfn = PCI_DEVFN(path->dev, path->fn);
+       ir_hpet[ir_hpet_num].iommu = iommu;
+       ir_hpet[ir_hpet_num].id    = scope->enumeration_id;
+       ir_hpet_num++;
+}
+
+static void ir_parse_one_ioapic_scope(struct acpi_dmar_device_scope *scope,
+                                     struct intel_iommu *iommu)
+{
+       struct acpi_dmar_pci_path *path;
+       u8 bus;
+       int count;
+
+       bus = scope->bus;
+       path = (struct acpi_dmar_pci_path *)(scope + 1);
+       count = (scope->length - sizeof(struct acpi_dmar_device_scope))
+               / sizeof(struct acpi_dmar_pci_path);
+
+       while (--count > 0) {
+               /*
+                * Access PCI directly due to the PCI
+                * subsystem isn't initialized yet.
+                */
+               bus = read_pci_config_byte(bus, path->dev, path->fn,
+                                          PCI_SECONDARY_BUS);
+               path++;
+       }
+
+       ir_ioapic[ir_ioapic_num].bus   = bus;
+       ir_ioapic[ir_ioapic_num].devfn = PCI_DEVFN(path->dev, path->fn);
+       ir_ioapic[ir_ioapic_num].iommu = iommu;
+       ir_ioapic[ir_ioapic_num].id    = scope->enumeration_id;
+       ir_ioapic_num++;
+}
+
+static int ir_parse_ioapic_hpet_scope(struct acpi_dmar_header *header,
+                                     struct intel_iommu *iommu)
+{
+       struct acpi_dmar_hardware_unit *drhd;
+       struct acpi_dmar_device_scope *scope;
+       void *start, *end;
+
+       drhd = (struct acpi_dmar_hardware_unit *)header;
+
+       start = (void *)(drhd + 1);
+       end = ((void *)drhd) + header->length;
+
+       while (start < end) {
+               scope = start;
+               if (scope->entry_type == ACPI_DMAR_SCOPE_TYPE_IOAPIC) {
+                       if (ir_ioapic_num == MAX_IO_APICS) {
+                               printk(KERN_WARNING "Exceeded Max IO APICS\n");
+                               return -1;
+                       }
+
+                       printk(KERN_INFO "IOAPIC id %d under DRHD base "
+                              " 0x%Lx IOMMU %d\n", scope->enumeration_id,
+                              drhd->address, iommu->seq_id);
+
+                       ir_parse_one_ioapic_scope(scope, iommu);
+               } else if (scope->entry_type == ACPI_DMAR_SCOPE_TYPE_HPET) {
+                       if (ir_hpet_num == MAX_HPET_TBS) {
+                               printk(KERN_WARNING "Exceeded Max HPET blocks\n");
+                               return -1;
+                       }
+
+                       printk(KERN_INFO "HPET id %d under DRHD base"
+                              " 0x%Lx\n", scope->enumeration_id,
+                              drhd->address);
+
+                       ir_parse_one_hpet_scope(scope, iommu);
+               }
+               start += scope->length;
+       }
+
+       return 0;
+}
+
+/*
+ * Finds the assocaition between IOAPIC's and its Interrupt-remapping
+ * hardware unit.
+ */
+int __init parse_ioapics_under_ir(void)
+{
+       struct dmar_drhd_unit *drhd;
+       int ir_supported = 0;
+
+       for_each_drhd_unit(drhd) {
+               struct intel_iommu *iommu = drhd->iommu;
+
+               if (ecap_ir_support(iommu->ecap)) {
+                       if (ir_parse_ioapic_hpet_scope(drhd->hdr, iommu))
+                               return -1;
+
+                       ir_supported = 1;
+               }
+       }
+
+       if (ir_supported && ir_ioapic_num != nr_ioapics) {
+               printk(KERN_WARNING
+                      "Not all IO-APIC's listed under remapping hardware\n");
+               return -1;
+       }
+
+       return ir_supported;
+}
+
+void disable_intr_remapping(void)
+{
+       struct dmar_drhd_unit *drhd;
+       struct intel_iommu *iommu = NULL;
+
+       /*
+        * Disable Interrupt-remapping for all the DRHD's now.
+        */
+       for_each_iommu(iommu, drhd) {
+               if (!ecap_ir_support(iommu->ecap))
+                       continue;
+
+               iommu_disable_intr_remapping(iommu);
+       }
+}
+
+int reenable_intr_remapping(int eim)
+{
+       struct dmar_drhd_unit *drhd;
+       int setup = 0;
+       struct intel_iommu *iommu = NULL;
+
+       for_each_iommu(iommu, drhd)
+               if (iommu->qi)
+                       dmar_reenable_qi(iommu);
+
+       /*
+        * Setup Interrupt-remapping for all the DRHD's now.
+        */
+       for_each_iommu(iommu, drhd) {
+               if (!ecap_ir_support(iommu->ecap))
+                       continue;
+
+               /* Set up interrupt remapping for iommu.*/
+               iommu_set_intr_remapping(iommu, eim);
+               setup = 1;
+       }
+
+       if (!setup)
+               goto error;
+
+       return 0;
+
+error:
+       /*
+        * handle error condition gracefully here!
+        */
+       return -1;
+}
+
diff --git a/drivers/iommu/intr_remapping.h b/drivers/iommu/intr_remapping.h
new file mode 100644 (file)
index 0000000..5662fec
--- /dev/null
@@ -0,0 +1,17 @@
+#include <linux/intel-iommu.h>
+
+struct ioapic_scope {
+       struct intel_iommu *iommu;
+       unsigned int id;
+       unsigned int bus;       /* PCI bus number */
+       unsigned int devfn;     /* PCI devfn number */
+};
+
+struct hpet_scope {
+       struct intel_iommu *iommu;
+       u8 id;
+       unsigned int bus;
+       unsigned int devfn;
+};
+
+#define IR_X2APIC_MODE(mode) (mode ? (1 << 11) : 0)
diff --git a/drivers/iommu/iova.c b/drivers/iommu/iova.c
new file mode 100644 (file)
index 0000000..c5c274a
--- /dev/null
@@ -0,0 +1,435 @@
+/*
+ * Copyright © 2006-2009, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
+ * Place - Suite 330, Boston, MA 02111-1307 USA.
+ *
+ * Author: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
+ */
+
+#include <linux/iova.h>
+
+void
+init_iova_domain(struct iova_domain *iovad, unsigned long pfn_32bit)
+{
+       spin_lock_init(&iovad->iova_rbtree_lock);
+       iovad->rbroot = RB_ROOT;
+       iovad->cached32_node = NULL;
+       iovad->dma_32bit_pfn = pfn_32bit;
+}
+
+static struct rb_node *
+__get_cached_rbnode(struct iova_domain *iovad, unsigned long *limit_pfn)
+{
+       if ((*limit_pfn != iovad->dma_32bit_pfn) ||
+               (iovad->cached32_node == NULL))
+               return rb_last(&iovad->rbroot);
+       else {
+               struct rb_node *prev_node = rb_prev(iovad->cached32_node);
+               struct iova *curr_iova =
+                       container_of(iovad->cached32_node, struct iova, node);
+               *limit_pfn = curr_iova->pfn_lo - 1;
+               return prev_node;
+       }
+}
+
+static void
+__cached_rbnode_insert_update(struct iova_domain *iovad,
+       unsigned long limit_pfn, struct iova *new)
+{
+       if (limit_pfn != iovad->dma_32bit_pfn)
+               return;
+       iovad->cached32_node = &new->node;
+}
+
+static void
+__cached_rbnode_delete_update(struct iova_domain *iovad, struct iova *free)
+{
+       struct iova *cached_iova;
+       struct rb_node *curr;
+
+       if (!iovad->cached32_node)
+               return;
+       curr = iovad->cached32_node;
+       cached_iova = container_of(curr, struct iova, node);
+
+       if (free->pfn_lo >= cached_iova->pfn_lo) {
+               struct rb_node *node = rb_next(&free->node);
+               struct iova *iova = container_of(node, struct iova, node);
+
+               /* only cache if it's below 32bit pfn */
+               if (node && iova->pfn_lo < iovad->dma_32bit_pfn)
+                       iovad->cached32_node = node;
+               else
+                       iovad->cached32_node = NULL;
+       }
+}
+
+/* Computes the padding size required, to make the
+ * the start address naturally aligned on its size
+ */
+static int
+iova_get_pad_size(int size, unsigned int limit_pfn)
+{
+       unsigned int pad_size = 0;
+       unsigned int order = ilog2(size);
+
+       if (order)
+               pad_size = (limit_pfn + 1) % (1 << order);
+
+       return pad_size;
+}
+
+static int __alloc_and_insert_iova_range(struct iova_domain *iovad,
+               unsigned long size, unsigned long limit_pfn,
+                       struct iova *new, bool size_aligned)
+{
+       struct rb_node *prev, *curr = NULL;
+       unsigned long flags;
+       unsigned long saved_pfn;
+       unsigned int pad_size = 0;
+
+       /* Walk the tree backwards */
+       spin_lock_irqsave(&iovad->iova_rbtree_lock, flags);
+       saved_pfn = limit_pfn;
+       curr = __get_cached_rbnode(iovad, &limit_pfn);
+       prev = curr;
+       while (curr) {
+               struct iova *curr_iova = container_of(curr, struct iova, node);
+
+               if (limit_pfn < curr_iova->pfn_lo)
+                       goto move_left;
+               else if (limit_pfn < curr_iova->pfn_hi)
+                       goto adjust_limit_pfn;
+               else {
+                       if (size_aligned)
+                               pad_size = iova_get_pad_size(size, limit_pfn);
+                       if ((curr_iova->pfn_hi + size + pad_size) <= limit_pfn)
+                               break;  /* found a free slot */
+               }
+adjust_limit_pfn:
+               limit_pfn = curr_iova->pfn_lo - 1;
+move_left:
+               prev = curr;
+               curr = rb_prev(curr);
+       }
+
+       if (!curr) {
+               if (size_aligned)
+                       pad_size = iova_get_pad_size(size, limit_pfn);
+               if ((IOVA_START_PFN + size + pad_size) > limit_pfn) {
+                       spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
+                       return -ENOMEM;
+               }
+       }
+
+       /* pfn_lo will point to size aligned address if size_aligned is set */
+       new->pfn_lo = limit_pfn - (size + pad_size) + 1;
+       new->pfn_hi = new->pfn_lo + size - 1;
+
+       /* Insert the new_iova into domain rbtree by holding writer lock */
+       /* Add new node and rebalance tree. */
+       {
+               struct rb_node **entry, *parent = NULL;
+
+               /* If we have 'prev', it's a valid place to start the
+                  insertion. Otherwise, start from the root. */
+               if (prev)
+                       entry = &prev;
+               else
+                       entry = &iovad->rbroot.rb_node;
+
+               /* Figure out where to put new node */
+               while (*entry) {
+                       struct iova *this = container_of(*entry,
+                                                       struct iova, node);
+                       parent = *entry;
+
+                       if (new->pfn_lo < this->pfn_lo)
+                               entry = &((*entry)->rb_left);
+                       else if (new->pfn_lo > this->pfn_lo)
+                               entry = &((*entry)->rb_right);
+                       else
+                               BUG(); /* this should not happen */
+               }
+
+               /* Add new node and rebalance tree. */
+               rb_link_node(&new->node, parent, entry);
+               rb_insert_color(&new->node, &iovad->rbroot);
+       }
+       __cached_rbnode_insert_update(iovad, saved_pfn, new);
+
+       spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
+
+
+       return 0;
+}
+
+static void
+iova_insert_rbtree(struct rb_root *root, struct iova *iova)
+{
+       struct rb_node **new = &(root->rb_node), *parent = NULL;
+       /* Figure out where to put new node */
+       while (*new) {
+               struct iova *this = container_of(*new, struct iova, node);
+               parent = *new;
+
+               if (iova->pfn_lo < this->pfn_lo)
+                       new = &((*new)->rb_left);
+               else if (iova->pfn_lo > this->pfn_lo)
+                       new = &((*new)->rb_right);
+               else
+                       BUG(); /* this should not happen */
+       }
+       /* Add new node and rebalance tree. */
+       rb_link_node(&iova->node, parent, new);
+       rb_insert_color(&iova->node, root);
+}
+
+/**
+ * alloc_iova - allocates an iova
+ * @iovad - iova domain in question
+ * @size - size of page frames to allocate
+ * @limit_pfn - max limit address
+ * @size_aligned - set if size_aligned address range is required
+ * This function allocates an iova in the range limit_pfn to IOVA_START_PFN
+ * looking from limit_pfn instead from IOVA_START_PFN. If the size_aligned
+ * flag is set then the allocated address iova->pfn_lo will be naturally
+ * aligned on roundup_power_of_two(size).
+ */
+struct iova *
+alloc_iova(struct iova_domain *iovad, unsigned long size,
+       unsigned long limit_pfn,
+       bool size_aligned)
+{
+       struct iova *new_iova;
+       int ret;
+
+       new_iova = alloc_iova_mem();
+       if (!new_iova)
+               return NULL;
+
+       /* If size aligned is set then round the size to
+        * to next power of two.
+        */
+       if (size_aligned)
+               size = __roundup_pow_of_two(size);
+
+       ret = __alloc_and_insert_iova_range(iovad, size, limit_pfn,
+                       new_iova, size_aligned);
+
+       if (ret) {
+               free_iova_mem(new_iova);
+               return NULL;
+       }
+
+       return new_iova;
+}
+
+/**
+ * find_iova - find's an iova for a given pfn
+ * @iovad - iova domain in question.
+ * pfn - page frame number
+ * This function finds and returns an iova belonging to the
+ * given doamin which matches the given pfn.
+ */
+struct iova *find_iova(struct iova_domain *iovad, unsigned long pfn)
+{
+       unsigned long flags;
+       struct rb_node *node;
+
+       /* Take the lock so that no other thread is manipulating the rbtree */
+       spin_lock_irqsave(&iovad->iova_rbtree_lock, flags);
+       node = iovad->rbroot.rb_node;
+       while (node) {
+               struct iova *iova = container_of(node, struct iova, node);
+
+               /* If pfn falls within iova's range, return iova */
+               if ((pfn >= iova->pfn_lo) && (pfn <= iova->pfn_hi)) {
+                       spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
+                       /* We are not holding the lock while this iova
+                        * is referenced by the caller as the same thread
+                        * which called this function also calls __free_iova()
+                        * and it is by desing that only one thread can possibly
+                        * reference a particular iova and hence no conflict.
+                        */
+                       return iova;
+               }
+
+               if (pfn < iova->pfn_lo)
+                       node = node->rb_left;
+               else if (pfn > iova->pfn_lo)
+                       node = node->rb_right;
+       }
+
+       spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
+       return NULL;
+}
+
+/**
+ * __free_iova - frees the given iova
+ * @iovad: iova domain in question.
+ * @iova: iova in question.
+ * Frees the given iova belonging to the giving domain
+ */
+void
+__free_iova(struct iova_domain *iovad, struct iova *iova)
+{
+       unsigned long flags;
+
+       spin_lock_irqsave(&iovad->iova_rbtree_lock, flags);
+       __cached_rbnode_delete_update(iovad, iova);
+       rb_erase(&iova->node, &iovad->rbroot);
+       spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
+       free_iova_mem(iova);
+}
+
+/**
+ * free_iova - finds and frees the iova for a given pfn
+ * @iovad: - iova domain in question.
+ * @pfn: - pfn that is allocated previously
+ * This functions finds an iova for a given pfn and then
+ * frees the iova from that domain.
+ */
+void
+free_iova(struct iova_domain *iovad, unsigned long pfn)
+{
+       struct iova *iova = find_iova(iovad, pfn);
+       if (iova)
+               __free_iova(iovad, iova);
+
+}
+
+/**
+ * put_iova_domain - destroys the iova doamin
+ * @iovad: - iova domain in question.
+ * All the iova's in that domain are destroyed.
+ */
+void put_iova_domain(struct iova_domain *iovad)
+{
+       struct rb_node *node;
+       unsigned long flags;
+
+       spin_lock_irqsave(&iovad->iova_rbtree_lock, flags);
+       node = rb_first(&iovad->rbroot);
+       while (node) {
+               struct iova *iova = container_of(node, struct iova, node);
+               rb_erase(node, &iovad->rbroot);
+               free_iova_mem(iova);
+               node = rb_first(&iovad->rbroot);
+       }
+       spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
+}
+
+static int
+__is_range_overlap(struct rb_node *node,
+       unsigned long pfn_lo, unsigned long pfn_hi)
+{
+       struct iova *iova = container_of(node, struct iova, node);
+
+       if ((pfn_lo <= iova->pfn_hi) && (pfn_hi >= iova->pfn_lo))
+               return 1;
+       return 0;
+}
+
+static struct iova *
+__insert_new_range(struct iova_domain *iovad,
+       unsigned long pfn_lo, unsigned long pfn_hi)
+{
+       struct iova *iova;
+
+       iova = alloc_iova_mem();
+       if (!iova)
+               return iova;
+
+       iova->pfn_hi = pfn_hi;
+       iova->pfn_lo = pfn_lo;
+       iova_insert_rbtree(&iovad->rbroot, iova);
+       return iova;
+}
+
+static void
+__adjust_overlap_range(struct iova *iova,
+       unsigned long *pfn_lo, unsigned long *pfn_hi)
+{
+       if (*pfn_lo < iova->pfn_lo)
+               iova->pfn_lo = *pfn_lo;
+       if (*pfn_hi > iova->pfn_hi)
+               *pfn_lo = iova->pfn_hi + 1;
+}
+
+/**
+ * reserve_iova - reserves an iova in the given range
+ * @iovad: - iova domain pointer
+ * @pfn_lo: - lower page frame address
+ * @pfn_hi:- higher pfn adderss
+ * This function allocates reserves the address range from pfn_lo to pfn_hi so
+ * that this address is not dished out as part of alloc_iova.
+ */
+struct iova *
+reserve_iova(struct iova_domain *iovad,
+       unsigned long pfn_lo, unsigned long pfn_hi)
+{
+       struct rb_node *node;
+       unsigned long flags;
+       struct iova *iova;
+       unsigned int overlap = 0;
+
+       spin_lock_irqsave(&iovad->iova_rbtree_lock, flags);
+       for (node = rb_first(&iovad->rbroot); node; node = rb_next(node)) {
+               if (__is_range_overlap(node, pfn_lo, pfn_hi)) {
+                       iova = container_of(node, struct iova, node);
+                       __adjust_overlap_range(iova, &pfn_lo, &pfn_hi);
+                       if ((pfn_lo >= iova->pfn_lo) &&
+                               (pfn_hi <= iova->pfn_hi))
+                               goto finish;
+                       overlap = 1;
+
+               } else if (overlap)
+                               break;
+       }
+
+       /* We are here either because this is the first reserver node
+        * or need to insert remaining non overlap addr range
+        */
+       iova = __insert_new_range(iovad, pfn_lo, pfn_hi);
+finish:
+
+       spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
+       return iova;
+}
+
+/**
+ * copy_reserved_iova - copies the reserved between domains
+ * @from: - source doamin from where to copy
+ * @to: - destination domin where to copy
+ * This function copies reserved iova's from one doamin to
+ * other.
+ */
+void
+copy_reserved_iova(struct iova_domain *from, struct iova_domain *to)
+{
+       unsigned long flags;
+       struct rb_node *node;
+
+       spin_lock_irqsave(&from->iova_rbtree_lock, flags);
+       for (node = rb_first(&from->rbroot); node; node = rb_next(node)) {
+               struct iova *iova = container_of(node, struct iova, node);
+               struct iova *new_iova;
+               new_iova = reserve_iova(to, iova->pfn_lo, iova->pfn_hi);
+               if (!new_iova)
+                       printk(KERN_ERR "Reserve iova range %lx@%lx failed\n",
+                               iova->pfn_lo, iova->pfn_lo);
+       }
+       spin_unlock_irqrestore(&from->iova_rbtree_lock, flags);
+}
index 094308e41be558b525a2198b3c7ee98cddc82359..825c02b40daa998b54768390275452425d0f5c52 100644 (file)
@@ -29,11 +29,6 @@ obj-$(CONFIG_PCI_MSI) += msi.o
 # Build the Hypertransport interrupt support
 obj-$(CONFIG_HT_IRQ) += htirq.o
 
-# Build Intel IOMMU support
-obj-$(CONFIG_DMAR) += dmar.o iova.o intel-iommu.o
-
-obj-$(CONFIG_INTR_REMAP) += dmar.o intr_remapping.o
-
 obj-$(CONFIG_PCI_IOV) += iov.o
 
 #
diff --git a/drivers/pci/dmar.c b/drivers/pci/dmar.c
deleted file mode 100644 (file)
index 3dc9bef..0000000
+++ /dev/null
@@ -1,1461 +0,0 @@
-/*
- * Copyright (c) 2006, Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
- * Place - Suite 330, Boston, MA 02111-1307 USA.
- *
- * Copyright (C) 2006-2008 Intel Corporation
- * Author: Ashok Raj <ashok.raj@intel.com>
- * Author: Shaohua Li <shaohua.li@intel.com>
- * Author: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
- *
- * This file implements early detection/parsing of Remapping Devices
- * reported to OS through BIOS via DMA remapping reporting (DMAR) ACPI
- * tables.
- *
- * These routines are used by both DMA-remapping and Interrupt-remapping
- */
-
-#include <linux/pci.h>
-#include <linux/dmar.h>
-#include <linux/iova.h>
-#include <linux/intel-iommu.h>
-#include <linux/timer.h>
-#include <linux/irq.h>
-#include <linux/interrupt.h>
-#include <linux/tboot.h>
-#include <linux/dmi.h>
-#include <linux/slab.h>
-#include <asm/iommu_table.h>
-
-#define PREFIX "DMAR: "
-
-/* No locks are needed as DMA remapping hardware unit
- * list is constructed at boot time and hotplug of
- * these units are not supported by the architecture.
- */
-LIST_HEAD(dmar_drhd_units);
-
-static struct acpi_table_header * __initdata dmar_tbl;
-static acpi_size dmar_tbl_size;
-
-static void __init dmar_register_drhd_unit(struct dmar_drhd_unit *drhd)
-{
-       /*
-        * add INCLUDE_ALL at the tail, so scan the list will find it at
-        * the very end.
-        */
-       if (drhd->include_all)
-               list_add_tail(&drhd->list, &dmar_drhd_units);
-       else
-               list_add(&drhd->list, &dmar_drhd_units);
-}
-
-static int __init dmar_parse_one_dev_scope(struct acpi_dmar_device_scope *scope,
-                                          struct pci_dev **dev, u16 segment)
-{
-       struct pci_bus *bus;
-       struct pci_dev *pdev = NULL;
-       struct acpi_dmar_pci_path *path;
-       int count;
-
-       bus = pci_find_bus(segment, scope->bus);
-       path = (struct acpi_dmar_pci_path *)(scope + 1);
-       count = (scope->length - sizeof(struct acpi_dmar_device_scope))
-               / sizeof(struct acpi_dmar_pci_path);
-
-       while (count) {
-               if (pdev)
-                       pci_dev_put(pdev);
-               /*
-                * Some BIOSes list non-exist devices in DMAR table, just
-                * ignore it
-                */
-               if (!bus) {
-                       printk(KERN_WARNING
-                       PREFIX "Device scope bus [%d] not found\n",
-                       scope->bus);
-                       break;
-               }
-               pdev = pci_get_slot(bus, PCI_DEVFN(path->dev, path->fn));
-               if (!pdev) {
-                       printk(KERN_WARNING PREFIX
-                       "Device scope device [%04x:%02x:%02x.%02x] not found\n",
-                               segment, bus->number, path->dev, path->fn);
-                       break;
-               }
-               path ++;
-               count --;
-               bus = pdev->subordinate;
-       }
-       if (!pdev) {
-               printk(KERN_WARNING PREFIX
-               "Device scope device [%04x:%02x:%02x.%02x] not found\n",
-               segment, scope->bus, path->dev, path->fn);
-               *dev = NULL;
-               return 0;
-       }
-       if ((scope->entry_type == ACPI_DMAR_SCOPE_TYPE_ENDPOINT && \
-                       pdev->subordinate) || (scope->entry_type == \
-                       ACPI_DMAR_SCOPE_TYPE_BRIDGE && !pdev->subordinate)) {
-               pci_dev_put(pdev);
-               printk(KERN_WARNING PREFIX
-                       "Device scope type does not match for %s\n",
-                        pci_name(pdev));
-               return -EINVAL;
-       }
-       *dev = pdev;
-       return 0;
-}
-
-static int __init dmar_parse_dev_scope(void *start, void *end, int *cnt,
-                                      struct pci_dev ***devices, u16 segment)
-{
-       struct acpi_dmar_device_scope *scope;
-       void * tmp = start;
-       int index;
-       int ret;
-
-       *cnt = 0;
-       while (start < end) {
-               scope = start;
-               if (scope->entry_type == ACPI_DMAR_SCOPE_TYPE_ENDPOINT ||
-                   scope->entry_type == ACPI_DMAR_SCOPE_TYPE_BRIDGE)
-                       (*cnt)++;
-               else if (scope->entry_type != ACPI_DMAR_SCOPE_TYPE_IOAPIC) {
-                       printk(KERN_WARNING PREFIX
-                              "Unsupported device scope\n");
-               }
-               start += scope->length;
-       }
-       if (*cnt == 0)
-               return 0;
-
-       *devices = kcalloc(*cnt, sizeof(struct pci_dev *), GFP_KERNEL);
-       if (!*devices)
-               return -ENOMEM;
-
-       start = tmp;
-       index = 0;
-       while (start < end) {
-               scope = start;
-               if (scope->entry_type == ACPI_DMAR_SCOPE_TYPE_ENDPOINT ||
-                   scope->entry_type == ACPI_DMAR_SCOPE_TYPE_BRIDGE) {
-                       ret = dmar_parse_one_dev_scope(scope,
-                               &(*devices)[index], segment);
-                       if (ret) {
-                               kfree(*devices);
-                               return ret;
-                       }
-                       index ++;
-               }
-               start += scope->length;
-       }
-
-       return 0;
-}
-
-/**
- * dmar_parse_one_drhd - parses exactly one DMA remapping hardware definition
- * structure which uniquely represent one DMA remapping hardware unit
- * present in the platform
- */
-static int __init
-dmar_parse_one_drhd(struct acpi_dmar_header *header)
-{
-       struct acpi_dmar_hardware_unit *drhd;
-       struct dmar_drhd_unit *dmaru;
-       int ret = 0;
-
-       drhd = (struct acpi_dmar_hardware_unit *)header;
-       dmaru = kzalloc(sizeof(*dmaru), GFP_KERNEL);
-       if (!dmaru)
-               return -ENOMEM;
-
-       dmaru->hdr = header;
-       dmaru->reg_base_addr = drhd->address;
-       dmaru->segment = drhd->segment;
-       dmaru->include_all = drhd->flags & 0x1; /* BIT0: INCLUDE_ALL */
-
-       ret = alloc_iommu(dmaru);
-       if (ret) {
-               kfree(dmaru);
-               return ret;
-       }
-       dmar_register_drhd_unit(dmaru);
-       return 0;
-}
-
-static int __init dmar_parse_dev(struct dmar_drhd_unit *dmaru)
-{
-       struct acpi_dmar_hardware_unit *drhd;
-       int ret = 0;
-
-       drhd = (struct acpi_dmar_hardware_unit *) dmaru->hdr;
-
-       if (dmaru->include_all)
-               return 0;
-
-       ret = dmar_parse_dev_scope((void *)(drhd + 1),
-                               ((void *)drhd) + drhd->header.length,
-                               &dmaru->devices_cnt, &dmaru->devices,
-                               drhd->segment);
-       if (ret) {
-               list_del(&dmaru->list);
-               kfree(dmaru);
-       }
-       return ret;
-}
-
-#ifdef CONFIG_DMAR
-LIST_HEAD(dmar_rmrr_units);
-
-static void __init dmar_register_rmrr_unit(struct dmar_rmrr_unit *rmrr)
-{
-       list_add(&rmrr->list, &dmar_rmrr_units);
-}
-
-
-static int __init
-dmar_parse_one_rmrr(struct acpi_dmar_header *header)
-{
-       struct acpi_dmar_reserved_memory *rmrr;
-       struct dmar_rmrr_unit *rmrru;
-
-       rmrru = kzalloc(sizeof(*rmrru), GFP_KERNEL);
-       if (!rmrru)
-               return -ENOMEM;
-
-       rmrru->hdr = header;
-       rmrr = (struct acpi_dmar_reserved_memory *)header;
-       rmrru->base_address = rmrr->base_address;
-       rmrru->end_address = rmrr->end_address;
-
-       dmar_register_rmrr_unit(rmrru);
-       return 0;
-}
-
-static int __init
-rmrr_parse_dev(struct dmar_rmrr_unit *rmrru)
-{
-       struct acpi_dmar_reserved_memory *rmrr;
-       int ret;
-
-       rmrr = (struct acpi_dmar_reserved_memory *) rmrru->hdr;
-       ret = dmar_parse_dev_scope((void *)(rmrr + 1),
-               ((void *)rmrr) + rmrr->header.length,
-               &rmrru->devices_cnt, &rmrru->devices, rmrr->segment);
-
-       if (ret || (rmrru->devices_cnt == 0)) {
-               list_del(&rmrru->list);
-               kfree(rmrru);
-       }
-       return ret;
-}
-
-static LIST_HEAD(dmar_atsr_units);
-
-static int __init dmar_parse_one_atsr(struct acpi_dmar_header *hdr)
-{
-       struct acpi_dmar_atsr *atsr;
-       struct dmar_atsr_unit *atsru;
-
-       atsr = container_of(hdr, struct acpi_dmar_atsr, header);
-       atsru = kzalloc(sizeof(*atsru), GFP_KERNEL);
-       if (!atsru)
-               return -ENOMEM;
-
-       atsru->hdr = hdr;
-       atsru->include_all = atsr->flags & 0x1;
-
-       list_add(&atsru->list, &dmar_atsr_units);
-
-       return 0;
-}
-
-static int __init atsr_parse_dev(struct dmar_atsr_unit *atsru)
-{
-       int rc;
-       struct acpi_dmar_atsr *atsr;
-
-       if (atsru->include_all)
-               return 0;
-
-       atsr = container_of(atsru->hdr, struct acpi_dmar_atsr, header);
-       rc = dmar_parse_dev_scope((void *)(atsr + 1),
-                               (void *)atsr + atsr->header.length,
-                               &atsru->devices_cnt, &atsru->devices,
-                               atsr->segment);
-       if (rc || !atsru->devices_cnt) {
-               list_del(&atsru->list);
-               kfree(atsru);
-       }
-
-       return rc;
-}
-
-int dmar_find_matched_atsr_unit(struct pci_dev *dev)
-{
-       int i;
-       struct pci_bus *bus;
-       struct acpi_dmar_atsr *atsr;
-       struct dmar_atsr_unit *atsru;
-
-       dev = pci_physfn(dev);
-
-       list_for_each_entry(atsru, &dmar_atsr_units, list) {
-               atsr = container_of(atsru->hdr, struct acpi_dmar_atsr, header);
-               if (atsr->segment == pci_domain_nr(dev->bus))
-                       goto found;
-       }
-
-       return 0;
-
-found:
-       for (bus = dev->bus; bus; bus = bus->parent) {
-               struct pci_dev *bridge = bus->self;
-
-               if (!bridge || !pci_is_pcie(bridge) ||
-                   bridge->pcie_type == PCI_EXP_TYPE_PCI_BRIDGE)
-                       return 0;
-
-               if (bridge->pcie_type == PCI_EXP_TYPE_ROOT_PORT) {
-                       for (i = 0; i < atsru->devices_cnt; i++)
-                               if (atsru->devices[i] == bridge)
-                                       return 1;
-                       break;
-               }
-       }
-
-       if (atsru->include_all)
-               return 1;
-
-       return 0;
-}
-#endif
-
-#ifdef CONFIG_ACPI_NUMA
-static int __init
-dmar_parse_one_rhsa(struct acpi_dmar_header *header)
-{
-       struct acpi_dmar_rhsa *rhsa;
-       struct dmar_drhd_unit *drhd;
-
-       rhsa = (struct acpi_dmar_rhsa *)header;
-       for_each_drhd_unit(drhd) {
-               if (drhd->reg_base_addr == rhsa->base_address) {
-                       int node = acpi_map_pxm_to_node(rhsa->proximity_domain);
-
-                       if (!node_online(node))
-                               node = -1;
-                       drhd->iommu->node = node;
-                       return 0;
-               }
-       }
-       WARN_TAINT(
-               1, TAINT_FIRMWARE_WORKAROUND,
-               "Your BIOS is broken; RHSA refers to non-existent DMAR unit at %llx\n"
-               "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
-               drhd->reg_base_addr,
-               dmi_get_system_info(DMI_BIOS_VENDOR),
-               dmi_get_system_info(DMI_BIOS_VERSION),
-               dmi_get_system_info(DMI_PRODUCT_VERSION));
-
-       return 0;
-}
-#endif
-
-static void __init
-dmar_table_print_dmar_entry(struct acpi_dmar_header *header)
-{
-       struct acpi_dmar_hardware_unit *drhd;
-       struct acpi_dmar_reserved_memory *rmrr;
-       struct acpi_dmar_atsr *atsr;
-       struct acpi_dmar_rhsa *rhsa;
-
-       switch (header->type) {
-       case ACPI_DMAR_TYPE_HARDWARE_UNIT:
-               drhd = container_of(header, struct acpi_dmar_hardware_unit,
-                                   header);
-               printk (KERN_INFO PREFIX
-                       "DRHD base: %#016Lx flags: %#x\n",
-                       (unsigned long long)drhd->address, drhd->flags);
-               break;
-       case ACPI_DMAR_TYPE_RESERVED_MEMORY:
-               rmrr = container_of(header, struct acpi_dmar_reserved_memory,
-                                   header);
-               printk (KERN_INFO PREFIX
-                       "RMRR base: %#016Lx end: %#016Lx\n",
-                       (unsigned long long)rmrr->base_address,
-                       (unsigned long long)rmrr->end_address);
-               break;
-       case ACPI_DMAR_TYPE_ATSR:
-               atsr = container_of(header, struct acpi_dmar_atsr, header);
-               printk(KERN_INFO PREFIX "ATSR flags: %#x\n", atsr->flags);
-               break;
-       case ACPI_DMAR_HARDWARE_AFFINITY:
-               rhsa = container_of(header, struct acpi_dmar_rhsa, header);
-               printk(KERN_INFO PREFIX "RHSA base: %#016Lx proximity domain: %#x\n",
-                      (unsigned long long)rhsa->base_address,
-                      rhsa->proximity_domain);
-               break;
-       }
-}
-
-/**
- * dmar_table_detect - checks to see if the platform supports DMAR devices
- */
-static int __init dmar_table_detect(void)
-{
-       acpi_status status = AE_OK;
-
-       /* if we could find DMAR table, then there are DMAR devices */
-       status = acpi_get_table_with_size(ACPI_SIG_DMAR, 0,
-                               (struct acpi_table_header **)&dmar_tbl,
-                               &dmar_tbl_size);
-
-       if (ACPI_SUCCESS(status) && !dmar_tbl) {
-               printk (KERN_WARNING PREFIX "Unable to map DMAR\n");
-               status = AE_NOT_FOUND;
-       }
-
-       return (ACPI_SUCCESS(status) ? 1 : 0);
-}
-
-/**
- * parse_dmar_table - parses the DMA reporting table
- */
-static int __init
-parse_dmar_table(void)
-{
-       struct acpi_table_dmar *dmar;
-       struct acpi_dmar_header *entry_header;
-       int ret = 0;
-
-       /*
-        * Do it again, earlier dmar_tbl mapping could be mapped with
-        * fixed map.
-        */
-       dmar_table_detect();
-
-       /*
-        * ACPI tables may not be DMA protected by tboot, so use DMAR copy
-        * SINIT saved in SinitMleData in TXT heap (which is DMA protected)
-        */
-       dmar_tbl = tboot_get_dmar_table(dmar_tbl);
-
-       dmar = (struct acpi_table_dmar *)dmar_tbl;
-       if (!dmar)
-               return -ENODEV;
-
-       if (dmar->width < PAGE_SHIFT - 1) {
-               printk(KERN_WARNING PREFIX "Invalid DMAR haw\n");
-               return -EINVAL;
-       }
-
-       printk (KERN_INFO PREFIX "Host address width %d\n",
-               dmar->width + 1);
-
-       entry_header = (struct acpi_dmar_header *)(dmar + 1);
-       while (((unsigned long)entry_header) <
-                       (((unsigned long)dmar) + dmar_tbl->length)) {
-               /* Avoid looping forever on bad ACPI tables */
-               if (entry_header->length == 0) {
-                       printk(KERN_WARNING PREFIX
-                               "Invalid 0-length structure\n");
-                       ret = -EINVAL;
-                       break;
-               }
-
-               dmar_table_print_dmar_entry(entry_header);
-
-               switch (entry_header->type) {
-               case ACPI_DMAR_TYPE_HARDWARE_UNIT:
-                       ret = dmar_parse_one_drhd(entry_header);
-                       break;
-               case ACPI_DMAR_TYPE_RESERVED_MEMORY:
-#ifdef CONFIG_DMAR
-                       ret = dmar_parse_one_rmrr(entry_header);
-#endif
-                       break;
-               case ACPI_DMAR_TYPE_ATSR:
-#ifdef CONFIG_DMAR
-                       ret = dmar_parse_one_atsr(entry_header);
-#endif
-                       break;
-               case ACPI_DMAR_HARDWARE_AFFINITY:
-#ifdef CONFIG_ACPI_NUMA
-                       ret = dmar_parse_one_rhsa(entry_header);
-#endif
-                       break;
-               default:
-                       printk(KERN_WARNING PREFIX
-                               "Unknown DMAR structure type %d\n",
-                               entry_header->type);
-                       ret = 0; /* for forward compatibility */
-                       break;
-               }
-               if (ret)
-                       break;
-
-               entry_header = ((void *)entry_header + entry_header->length);
-       }
-       return ret;
-}
-
-static int dmar_pci_device_match(struct pci_dev *devices[], int cnt,
-                         struct pci_dev *dev)
-{
-       int index;
-
-       while (dev) {
-               for (index = 0; index < cnt; index++)
-                       if (dev == devices[index])
-                               return 1;
-
-               /* Check our parent */
-               dev = dev->bus->self;
-       }
-
-       return 0;
-}
-
-struct dmar_drhd_unit *
-dmar_find_matched_drhd_unit(struct pci_dev *dev)
-{
-       struct dmar_drhd_unit *dmaru = NULL;
-       struct acpi_dmar_hardware_unit *drhd;
-
-       dev = pci_physfn(dev);
-
-       list_for_each_entry(dmaru, &dmar_drhd_units, list) {
-               drhd = container_of(dmaru->hdr,
-                                   struct acpi_dmar_hardware_unit,
-                                   header);
-
-               if (dmaru->include_all &&
-                   drhd->segment == pci_domain_nr(dev->bus))
-                       return dmaru;
-
-               if (dmar_pci_device_match(dmaru->devices,
-                                         dmaru->devices_cnt, dev))
-                       return dmaru;
-       }
-
-       return NULL;
-}
-
-int __init dmar_dev_scope_init(void)
-{
-       struct dmar_drhd_unit *drhd, *drhd_n;
-       int ret = -ENODEV;
-
-       list_for_each_entry_safe(drhd, drhd_n, &dmar_drhd_units, list) {
-               ret = dmar_parse_dev(drhd);
-               if (ret)
-                       return ret;
-       }
-
-#ifdef CONFIG_DMAR
-       {
-               struct dmar_rmrr_unit *rmrr, *rmrr_n;
-               struct dmar_atsr_unit *atsr, *atsr_n;
-
-               list_for_each_entry_safe(rmrr, rmrr_n, &dmar_rmrr_units, list) {
-                       ret = rmrr_parse_dev(rmrr);
-                       if (ret)
-                               return ret;
-               }
-
-               list_for_each_entry_safe(atsr, atsr_n, &dmar_atsr_units, list) {
-                       ret = atsr_parse_dev(atsr);
-                       if (ret)
-                               return ret;
-               }
-       }
-#endif
-
-       return ret;
-}
-
-
-int __init dmar_table_init(void)
-{
-       static int dmar_table_initialized;
-       int ret;
-
-       if (dmar_table_initialized)
-               return 0;
-
-       dmar_table_initialized = 1;
-
-       ret = parse_dmar_table();
-       if (ret) {
-               if (ret != -ENODEV)
-                       printk(KERN_INFO PREFIX "parse DMAR table failure.\n");
-               return ret;
-       }
-
-       if (list_empty(&dmar_drhd_units)) {
-               printk(KERN_INFO PREFIX "No DMAR devices found\n");
-               return -ENODEV;
-       }
-
-#ifdef CONFIG_DMAR
-       if (list_empty(&dmar_rmrr_units))
-               printk(KERN_INFO PREFIX "No RMRR found\n");
-
-       if (list_empty(&dmar_atsr_units))
-               printk(KERN_INFO PREFIX "No ATSR found\n");
-#endif
-
-       return 0;
-}
-
-static void warn_invalid_dmar(u64 addr, const char *message)
-{
-       WARN_TAINT_ONCE(
-               1, TAINT_FIRMWARE_WORKAROUND,
-               "Your BIOS is broken; DMAR reported at address %llx%s!\n"
-               "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
-               addr, message,
-               dmi_get_system_info(DMI_BIOS_VENDOR),
-               dmi_get_system_info(DMI_BIOS_VERSION),
-               dmi_get_system_info(DMI_PRODUCT_VERSION));
-}
-
-int __init check_zero_address(void)
-{
-       struct acpi_table_dmar *dmar;
-       struct acpi_dmar_header *entry_header;
-       struct acpi_dmar_hardware_unit *drhd;
-
-       dmar = (struct acpi_table_dmar *)dmar_tbl;
-       entry_header = (struct acpi_dmar_header *)(dmar + 1);
-
-       while (((unsigned long)entry_header) <
-                       (((unsigned long)dmar) + dmar_tbl->length)) {
-               /* Avoid looping forever on bad ACPI tables */
-               if (entry_header->length == 0) {
-                       printk(KERN_WARNING PREFIX
-                               "Invalid 0-length structure\n");
-                       return 0;
-               }
-
-               if (entry_header->type == ACPI_DMAR_TYPE_HARDWARE_UNIT) {
-                       void __iomem *addr;
-                       u64 cap, ecap;
-
-                       drhd = (void *)entry_header;
-                       if (!drhd->address) {
-                               warn_invalid_dmar(0, "");
-                               goto failed;
-                       }
-
-                       addr = early_ioremap(drhd->address, VTD_PAGE_SIZE);
-                       if (!addr ) {
-                               printk("IOMMU: can't validate: %llx\n", drhd->address);
-                               goto failed;
-                       }
-                       cap = dmar_readq(addr + DMAR_CAP_REG);
-                       ecap = dmar_readq(addr + DMAR_ECAP_REG);
-                       early_iounmap(addr, VTD_PAGE_SIZE);
-                       if (cap == (uint64_t)-1 && ecap == (uint64_t)-1) {
-                               warn_invalid_dmar(drhd->address,
-                                                 " returns all ones");
-                               goto failed;
-                       }
-               }
-
-               entry_header = ((void *)entry_header + entry_header->length);
-       }
-       return 1;
-
-failed:
-#ifdef CONFIG_DMAR
-       dmar_disabled = 1;
-#endif
-       return 0;
-}
-
-int __init detect_intel_iommu(void)
-{
-       int ret;
-
-       ret = dmar_table_detect();
-       if (ret)
-               ret = check_zero_address();
-       {
-#ifdef CONFIG_INTR_REMAP
-               struct acpi_table_dmar *dmar;
-
-               dmar = (struct acpi_table_dmar *) dmar_tbl;
-               if (ret && cpu_has_x2apic && dmar->flags & 0x1)
-                       printk(KERN_INFO
-                              "Queued invalidation will be enabled to support "
-                              "x2apic and Intr-remapping.\n");
-#endif
-#ifdef CONFIG_DMAR
-               if (ret && !no_iommu && !iommu_detected && !dmar_disabled) {
-                       iommu_detected = 1;
-                       /* Make sure ACS will be enabled */
-                       pci_request_acs();
-               }
-#endif
-#ifdef CONFIG_X86
-               if (ret)
-                       x86_init.iommu.iommu_init = intel_iommu_init;
-#endif
-       }
-       early_acpi_os_unmap_memory(dmar_tbl, dmar_tbl_size);
-       dmar_tbl = NULL;
-
-       return ret ? 1 : -ENODEV;
-}
-
-
-int alloc_iommu(struct dmar_drhd_unit *drhd)
-{
-       struct intel_iommu *iommu;
-       int map_size;
-       u32 ver;
-       static int iommu_allocated = 0;
-       int agaw = 0;
-       int msagaw = 0;
-
-       if (!drhd->reg_base_addr) {
-               warn_invalid_dmar(0, "");
-               return -EINVAL;
-       }
-
-       iommu = kzalloc(sizeof(*iommu), GFP_KERNEL);
-       if (!iommu)
-               return -ENOMEM;
-
-       iommu->seq_id = iommu_allocated++;
-       sprintf (iommu->name, "dmar%d", iommu->seq_id);
-
-       iommu->reg = ioremap(drhd->reg_base_addr, VTD_PAGE_SIZE);
-       if (!iommu->reg) {
-               printk(KERN_ERR "IOMMU: can't map the region\n");
-               goto error;
-       }
-       iommu->cap = dmar_readq(iommu->reg + DMAR_CAP_REG);
-       iommu->ecap = dmar_readq(iommu->reg + DMAR_ECAP_REG);
-
-       if (iommu->cap == (uint64_t)-1 && iommu->ecap == (uint64_t)-1) {
-               warn_invalid_dmar(drhd->reg_base_addr, " returns all ones");
-               goto err_unmap;
-       }
-
-#ifdef CONFIG_DMAR
-       agaw = iommu_calculate_agaw(iommu);
-       if (agaw < 0) {
-               printk(KERN_ERR
-                      "Cannot get a valid agaw for iommu (seq_id = %d)\n",
-                      iommu->seq_id);
-               goto err_unmap;
-       }
-       msagaw = iommu_calculate_max_sagaw(iommu);
-       if (msagaw < 0) {
-               printk(KERN_ERR
-                       "Cannot get a valid max agaw for iommu (seq_id = %d)\n",
-                       iommu->seq_id);
-               goto err_unmap;
-       }
-#endif
-       iommu->agaw = agaw;
-       iommu->msagaw = msagaw;
-
-       iommu->node = -1;
-
-       /* the registers might be more than one page */
-       map_size = max_t(int, ecap_max_iotlb_offset(iommu->ecap),
-               cap_max_fault_reg_offset(iommu->cap));
-       map_size = VTD_PAGE_ALIGN(map_size);
-       if (map_size > VTD_PAGE_SIZE) {
-               iounmap(iommu->reg);
-               iommu->reg = ioremap(drhd->reg_base_addr, map_size);
-               if (!iommu->reg) {
-                       printk(KERN_ERR "IOMMU: can't map the region\n");
-                       goto error;
-               }
-       }
-
-       ver = readl(iommu->reg + DMAR_VER_REG);
-       pr_info("IOMMU %d: reg_base_addr %llx ver %d:%d cap %llx ecap %llx\n",
-               iommu->seq_id,
-               (unsigned long long)drhd->reg_base_addr,
-               DMAR_VER_MAJOR(ver), DMAR_VER_MINOR(ver),
-               (unsigned long long)iommu->cap,
-               (unsigned long long)iommu->ecap);
-
-       spin_lock_init(&iommu->register_lock);
-
-       drhd->iommu = iommu;
-       return 0;
-
- err_unmap:
-       iounmap(iommu->reg);
- error:
-       kfree(iommu);
-       return -1;
-}
-
-void free_iommu(struct intel_iommu *iommu)
-{
-       if (!iommu)
-               return;
-
-#ifdef CONFIG_DMAR
-       free_dmar_iommu(iommu);
-#endif
-
-       if (iommu->reg)
-               iounmap(iommu->reg);
-       kfree(iommu);
-}
-
-/*
- * Reclaim all the submitted descriptors which have completed its work.
- */
-static inline void reclaim_free_desc(struct q_inval *qi)
-{
-       while (qi->desc_status[qi->free_tail] == QI_DONE ||
-              qi->desc_status[qi->free_tail] == QI_ABORT) {
-               qi->desc_status[qi->free_tail] = QI_FREE;
-               qi->free_tail = (qi->free_tail + 1) % QI_LENGTH;
-               qi->free_cnt++;
-       }
-}
-
-static int qi_check_fault(struct intel_iommu *iommu, int index)
-{
-       u32 fault;
-       int head, tail;
-       struct q_inval *qi = iommu->qi;
-       int wait_index = (index + 1) % QI_LENGTH;
-
-       if (qi->desc_status[wait_index] == QI_ABORT)
-               return -EAGAIN;
-
-       fault = readl(iommu->reg + DMAR_FSTS_REG);
-
-       /*
-        * If IQE happens, the head points to the descriptor associated
-        * with the error. No new descriptors are fetched until the IQE
-        * is cleared.
-        */
-       if (fault & DMA_FSTS_IQE) {
-               head = readl(iommu->reg + DMAR_IQH_REG);
-               if ((head >> DMAR_IQ_SHIFT) == index) {
-                       printk(KERN_ERR "VT-d detected invalid descriptor: "
-                               "low=%llx, high=%llx\n",
-                               (unsigned long long)qi->desc[index].low,
-                               (unsigned long long)qi->desc[index].high);
-                       memcpy(&qi->desc[index], &qi->desc[wait_index],
-                                       sizeof(struct qi_desc));
-                       __iommu_flush_cache(iommu, &qi->desc[index],
-                                       sizeof(struct qi_desc));
-                       writel(DMA_FSTS_IQE, iommu->reg + DMAR_FSTS_REG);
-                       return -EINVAL;
-               }
-       }
-
-       /*
-        * If ITE happens, all pending wait_desc commands are aborted.
-        * No new descriptors are fetched until the ITE is cleared.
-        */
-       if (fault & DMA_FSTS_ITE) {
-               head = readl(iommu->reg + DMAR_IQH_REG);
-               head = ((head >> DMAR_IQ_SHIFT) - 1 + QI_LENGTH) % QI_LENGTH;
-               head |= 1;
-               tail = readl(iommu->reg + DMAR_IQT_REG);
-               tail = ((tail >> DMAR_IQ_SHIFT) - 1 + QI_LENGTH) % QI_LENGTH;
-
-               writel(DMA_FSTS_ITE, iommu->reg + DMAR_FSTS_REG);
-
-               do {
-                       if (qi->desc_status[head] == QI_IN_USE)
-                               qi->desc_status[head] = QI_ABORT;
-                       head = (head - 2 + QI_LENGTH) % QI_LENGTH;
-               } while (head != tail);
-
-               if (qi->desc_status[wait_index] == QI_ABORT)
-                       return -EAGAIN;
-       }
-
-       if (fault & DMA_FSTS_ICE)
-               writel(DMA_FSTS_ICE, iommu->reg + DMAR_FSTS_REG);
-
-       return 0;
-}
-
-/*
- * Submit the queued invalidation descriptor to the remapping
- * hardware unit and wait for its completion.
- */
-int qi_submit_sync(struct qi_desc *desc, struct intel_iommu *iommu)
-{
-       int rc;
-       struct q_inval *qi = iommu->qi;
-       struct qi_desc *hw, wait_desc;
-       int wait_index, index;
-       unsigned long flags;
-
-       if (!qi)
-               return 0;
-
-       hw = qi->desc;
-
-restart:
-       rc = 0;
-
-       spin_lock_irqsave(&qi->q_lock, flags);
-       while (qi->free_cnt < 3) {
-               spin_unlock_irqrestore(&qi->q_lock, flags);
-               cpu_relax();
-               spin_lock_irqsave(&qi->q_lock, flags);
-       }
-
-       index = qi->free_head;
-       wait_index = (index + 1) % QI_LENGTH;
-
-       qi->desc_status[index] = qi->desc_status[wait_index] = QI_IN_USE;
-
-       hw[index] = *desc;
-
-       wait_desc.low = QI_IWD_STATUS_DATA(QI_DONE) |
-                       QI_IWD_STATUS_WRITE | QI_IWD_TYPE;
-       wait_desc.high = virt_to_phys(&qi->desc_status[wait_index]);
-
-       hw[wait_index] = wait_desc;
-
-       __iommu_flush_cache(iommu, &hw[index], sizeof(struct qi_desc));
-       __iommu_flush_cache(iommu, &hw[wait_index], sizeof(struct qi_desc));
-
-       qi->free_head = (qi->free_head + 2) % QI_LENGTH;
-       qi->free_cnt -= 2;
-
-       /*
-        * update the HW tail register indicating the presence of
-        * new descriptors.
-        */
-       writel(qi->free_head << DMAR_IQ_SHIFT, iommu->reg + DMAR_IQT_REG);
-
-       while (qi->desc_status[wait_index] != QI_DONE) {
-               /*
-                * We will leave the interrupts disabled, to prevent interrupt
-                * context to queue another cmd while a cmd is already submitted
-                * and waiting for completion on this cpu. This is to avoid
-                * a deadlock where the interrupt context can wait indefinitely
-                * for free slots in the queue.
-                */
-               rc = qi_check_fault(iommu, index);
-               if (rc)
-                       break;
-
-               spin_unlock(&qi->q_lock);
-               cpu_relax();
-               spin_lock(&qi->q_lock);
-       }
-
-       qi->desc_status[index] = QI_DONE;
-
-       reclaim_free_desc(qi);
-       spin_unlock_irqrestore(&qi->q_lock, flags);
-
-       if (rc == -EAGAIN)
-               goto restart;
-
-       return rc;
-}
-
-/*
- * Flush the global interrupt entry cache.
- */
-void qi_global_iec(struct intel_iommu *iommu)
-{
-       struct qi_desc desc;
-
-       desc.low = QI_IEC_TYPE;
-       desc.high = 0;
-
-       /* should never fail */
-       qi_submit_sync(&desc, iommu);
-}
-
-void qi_flush_context(struct intel_iommu *iommu, u16 did, u16 sid, u8 fm,
-                     u64 type)
-{
-       struct qi_desc desc;
-
-       desc.low = QI_CC_FM(fm) | QI_CC_SID(sid) | QI_CC_DID(did)
-                       | QI_CC_GRAN(type) | QI_CC_TYPE;
-       desc.high = 0;
-
-       qi_submit_sync(&desc, iommu);
-}
-
-void qi_flush_iotlb(struct intel_iommu *iommu, u16 did, u64 addr,
-                   unsigned int size_order, u64 type)
-{
-       u8 dw = 0, dr = 0;
-
-       struct qi_desc desc;
-       int ih = 0;
-
-       if (cap_write_drain(iommu->cap))
-               dw = 1;
-
-       if (cap_read_drain(iommu->cap))
-               dr = 1;
-
-       desc.low = QI_IOTLB_DID(did) | QI_IOTLB_DR(dr) | QI_IOTLB_DW(dw)
-               | QI_IOTLB_GRAN(type) | QI_IOTLB_TYPE;
-       desc.high = QI_IOTLB_ADDR(addr) | QI_IOTLB_IH(ih)
-               | QI_IOTLB_AM(size_order);
-
-       qi_submit_sync(&desc, iommu);
-}
-
-void qi_flush_dev_iotlb(struct intel_iommu *iommu, u16 sid, u16 qdep,
-                       u64 addr, unsigned mask)
-{
-       struct qi_desc desc;
-
-       if (mask) {
-               BUG_ON(addr & ((1 << (VTD_PAGE_SHIFT + mask)) - 1));
-               addr |= (1 << (VTD_PAGE_SHIFT + mask - 1)) - 1;
-               desc.high = QI_DEV_IOTLB_ADDR(addr) | QI_DEV_IOTLB_SIZE;
-       } else
-               desc.high = QI_DEV_IOTLB_ADDR(addr);
-
-       if (qdep >= QI_DEV_IOTLB_MAX_INVS)
-               qdep = 0;
-
-       desc.low = QI_DEV_IOTLB_SID(sid) | QI_DEV_IOTLB_QDEP(qdep) |
-                  QI_DIOTLB_TYPE;
-
-       qi_submit_sync(&desc, iommu);
-}
-
-/*
- * Disable Queued Invalidation interface.
- */
-void dmar_disable_qi(struct intel_iommu *iommu)
-{
-       unsigned long flags;
-       u32 sts;
-       cycles_t start_time = get_cycles();
-
-       if (!ecap_qis(iommu->ecap))
-               return;
-
-       spin_lock_irqsave(&iommu->register_lock, flags);
-
-       sts =  dmar_readq(iommu->reg + DMAR_GSTS_REG);
-       if (!(sts & DMA_GSTS_QIES))
-               goto end;
-
-       /*
-        * Give a chance to HW to complete the pending invalidation requests.
-        */
-       while ((readl(iommu->reg + DMAR_IQT_REG) !=
-               readl(iommu->reg + DMAR_IQH_REG)) &&
-               (DMAR_OPERATION_TIMEOUT > (get_cycles() - start_time)))
-               cpu_relax();
-
-       iommu->gcmd &= ~DMA_GCMD_QIE;
-       writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
-
-       IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, readl,
-                     !(sts & DMA_GSTS_QIES), sts);
-end:
-       spin_unlock_irqrestore(&iommu->register_lock, flags);
-}
-
-/*
- * Enable queued invalidation.
- */
-static void __dmar_enable_qi(struct intel_iommu *iommu)
-{
-       u32 sts;
-       unsigned long flags;
-       struct q_inval *qi = iommu->qi;
-
-       qi->free_head = qi->free_tail = 0;
-       qi->free_cnt = QI_LENGTH;
-
-       spin_lock_irqsave(&iommu->register_lock, flags);
-
-       /* write zero to the tail reg */
-       writel(0, iommu->reg + DMAR_IQT_REG);
-
-       dmar_writeq(iommu->reg + DMAR_IQA_REG, virt_to_phys(qi->desc));
-
-       iommu->gcmd |= DMA_GCMD_QIE;
-       writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
-
-       /* Make sure hardware complete it */
-       IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, readl, (sts & DMA_GSTS_QIES), sts);
-
-       spin_unlock_irqrestore(&iommu->register_lock, flags);
-}
-
-/*
- * Enable Queued Invalidation interface. This is a must to support
- * interrupt-remapping. Also used by DMA-remapping, which replaces
- * register based IOTLB invalidation.
- */
-int dmar_enable_qi(struct intel_iommu *iommu)
-{
-       struct q_inval *qi;
-       struct page *desc_page;
-
-       if (!ecap_qis(iommu->ecap))
-               return -ENOENT;
-
-       /*
-        * queued invalidation is already setup and enabled.
-        */
-       if (iommu->qi)
-               return 0;
-
-       iommu->qi = kmalloc(sizeof(*qi), GFP_ATOMIC);
-       if (!iommu->qi)
-               return -ENOMEM;
-
-       qi = iommu->qi;
-
-
-       desc_page = alloc_pages_node(iommu->node, GFP_ATOMIC | __GFP_ZERO, 0);
-       if (!desc_page) {
-               kfree(qi);
-               iommu->qi = 0;
-               return -ENOMEM;
-       }
-
-       qi->desc = page_address(desc_page);
-
-       qi->desc_status = kmalloc(QI_LENGTH * sizeof(int), GFP_ATOMIC);
-       if (!qi->desc_status) {
-               free_page((unsigned long) qi->desc);
-               kfree(qi);
-               iommu->qi = 0;
-               return -ENOMEM;
-       }
-
-       qi->free_head = qi->free_tail = 0;
-       qi->free_cnt = QI_LENGTH;
-
-       spin_lock_init(&qi->q_lock);
-
-       __dmar_enable_qi(iommu);
-
-       return 0;
-}
-
-/* iommu interrupt handling. Most stuff are MSI-like. */
-
-enum faulttype {
-       DMA_REMAP,
-       INTR_REMAP,
-       UNKNOWN,
-};
-
-static const char *dma_remap_fault_reasons[] =
-{
-       "Software",
-       "Present bit in root entry is clear",
-       "Present bit in context entry is clear",
-       "Invalid context entry",
-       "Access beyond MGAW",
-       "PTE Write access is not set",
-       "PTE Read access is not set",
-       "Next page table ptr is invalid",
-       "Root table address invalid",
-       "Context table ptr is invalid",
-       "non-zero reserved fields in RTP",
-       "non-zero reserved fields in CTP",
-       "non-zero reserved fields in PTE",
-};
-
-static const char *intr_remap_fault_reasons[] =
-{
-       "Detected reserved fields in the decoded interrupt-remapped request",
-       "Interrupt index exceeded the interrupt-remapping table size",
-       "Present field in the IRTE entry is clear",
-       "Error accessing interrupt-remapping table pointed by IRTA_REG",
-       "Detected reserved fields in the IRTE entry",
-       "Blocked a compatibility format interrupt request",
-       "Blocked an interrupt request due to source-id verification failure",
-};
-
-#define MAX_FAULT_REASON_IDX   (ARRAY_SIZE(fault_reason_strings) - 1)
-
-const char *dmar_get_fault_reason(u8 fault_reason, int *fault_type)
-{
-       if (fault_reason >= 0x20 && (fault_reason <= 0x20 +
-                                    ARRAY_SIZE(intr_remap_fault_reasons))) {
-               *fault_type = INTR_REMAP;
-               return intr_remap_fault_reasons[fault_reason - 0x20];
-       } else if (fault_reason < ARRAY_SIZE(dma_remap_fault_reasons)) {
-               *fault_type = DMA_REMAP;
-               return dma_remap_fault_reasons[fault_reason];
-       } else {
-               *fault_type = UNKNOWN;
-               return "Unknown";
-       }
-}
-
-void dmar_msi_unmask(struct irq_data *data)
-{
-       struct intel_iommu *iommu = irq_data_get_irq_handler_data(data);
-       unsigned long flag;
-
-       /* unmask it */
-       spin_lock_irqsave(&iommu->register_lock, flag);
-       writel(0, iommu->reg + DMAR_FECTL_REG);
-       /* Read a reg to force flush the post write */
-       readl(iommu->reg + DMAR_FECTL_REG);
-       spin_unlock_irqrestore(&iommu->register_lock, flag);
-}
-
-void dmar_msi_mask(struct irq_data *data)
-{
-       unsigned long flag;
-       struct intel_iommu *iommu = irq_data_get_irq_handler_data(data);
-
-       /* mask it */
-       spin_lock_irqsave(&iommu->register_lock, flag);
-       writel(DMA_FECTL_IM, iommu->reg + DMAR_FECTL_REG);
-       /* Read a reg to force flush the post write */
-       readl(iommu->reg + DMAR_FECTL_REG);
-       spin_unlock_irqrestore(&iommu->register_lock, flag);
-}
-
-void dmar_msi_write(int irq, struct msi_msg *msg)
-{
-       struct intel_iommu *iommu = irq_get_handler_data(irq);
-       unsigned long flag;
-
-       spin_lock_irqsave(&iommu->register_lock, flag);
-       writel(msg->data, iommu->reg + DMAR_FEDATA_REG);
-       writel(msg->address_lo, iommu->reg + DMAR_FEADDR_REG);
-       writel(msg->address_hi, iommu->reg + DMAR_FEUADDR_REG);
-       spin_unlock_irqrestore(&iommu->register_lock, flag);
-}
-
-void dmar_msi_read(int irq, struct msi_msg *msg)
-{
-       struct intel_iommu *iommu = irq_get_handler_data(irq);
-       unsigned long flag;
-
-       spin_lock_irqsave(&iommu->register_lock, flag);
-       msg->data = readl(iommu->reg + DMAR_FEDATA_REG);
-       msg->address_lo = readl(iommu->reg + DMAR_FEADDR_REG);
-       msg->address_hi = readl(iommu->reg + DMAR_FEUADDR_REG);
-       spin_unlock_irqrestore(&iommu->register_lock, flag);
-}
-
-static int dmar_fault_do_one(struct intel_iommu *iommu, int type,
-               u8 fault_reason, u16 source_id, unsigned long long addr)
-{
-       const char *reason;
-       int fault_type;
-
-       reason = dmar_get_fault_reason(fault_reason, &fault_type);
-
-       if (fault_type == INTR_REMAP)
-               printk(KERN_ERR "INTR-REMAP: Request device [[%02x:%02x.%d] "
-                      "fault index %llx\n"
-                       "INTR-REMAP:[fault reason %02d] %s\n",
-                       (source_id >> 8), PCI_SLOT(source_id & 0xFF),
-                       PCI_FUNC(source_id & 0xFF), addr >> 48,
-                       fault_reason, reason);
-       else
-               printk(KERN_ERR
-                      "DMAR:[%s] Request device [%02x:%02x.%d] "
-                      "fault addr %llx \n"
-                      "DMAR:[fault reason %02d] %s\n",
-                      (type ? "DMA Read" : "DMA Write"),
-                      (source_id >> 8), PCI_SLOT(source_id & 0xFF),
-                      PCI_FUNC(source_id & 0xFF), addr, fault_reason, reason);
-       return 0;
-}
-
-#define PRIMARY_FAULT_REG_LEN (16)
-irqreturn_t dmar_fault(int irq, void *dev_id)
-{
-       struct intel_iommu *iommu = dev_id;
-       int reg, fault_index;
-       u32 fault_status;
-       unsigned long flag;
-
-       spin_lock_irqsave(&iommu->register_lock, flag);
-       fault_status = readl(iommu->reg + DMAR_FSTS_REG);
-       if (fault_status)
-               printk(KERN_ERR "DRHD: handling fault status reg %x\n",
-                      fault_status);
-
-       /* TBD: ignore advanced fault log currently */
-       if (!(fault_status & DMA_FSTS_PPF))
-               goto clear_rest;
-
-       fault_index = dma_fsts_fault_record_index(fault_status);
-       reg = cap_fault_reg_offset(iommu->cap);
-       while (1) {
-               u8 fault_reason;
-               u16 source_id;
-               u64 guest_addr;
-               int type;
-               u32 data;
-
-               /* highest 32 bits */
-               data = readl(iommu->reg + reg +
-                               fault_index * PRIMARY_FAULT_REG_LEN + 12);
-               if (!(data & DMA_FRCD_F))
-                       break;
-
-               fault_reason = dma_frcd_fault_reason(data);
-               type = dma_frcd_type(data);
-
-               data = readl(iommu->reg + reg +
-                               fault_index * PRIMARY_FAULT_REG_LEN + 8);
-               source_id = dma_frcd_source_id(data);
-
-               guest_addr = dmar_readq(iommu->reg + reg +
-                               fault_index * PRIMARY_FAULT_REG_LEN);
-               guest_addr = dma_frcd_page_addr(guest_addr);
-               /* clear the fault */
-               writel(DMA_FRCD_F, iommu->reg + reg +
-                       fault_index * PRIMARY_FAULT_REG_LEN + 12);
-
-               spin_unlock_irqrestore(&iommu->register_lock, flag);
-
-               dmar_fault_do_one(iommu, type, fault_reason,
-                               source_id, guest_addr);
-
-               fault_index++;
-               if (fault_index >= cap_num_fault_regs(iommu->cap))
-                       fault_index = 0;
-               spin_lock_irqsave(&iommu->register_lock, flag);
-       }
-clear_rest:
-       /* clear all the other faults */
-       fault_status = readl(iommu->reg + DMAR_FSTS_REG);
-       writel(fault_status, iommu->reg + DMAR_FSTS_REG);
-
-       spin_unlock_irqrestore(&iommu->register_lock, flag);
-       return IRQ_HANDLED;
-}
-
-int dmar_set_interrupt(struct intel_iommu *iommu)
-{
-       int irq, ret;
-
-       /*
-        * Check if the fault interrupt is already initialized.
-        */
-       if (iommu->irq)
-               return 0;
-
-       irq = create_irq();
-       if (!irq) {
-               printk(KERN_ERR "IOMMU: no free vectors\n");
-               return -EINVAL;
-       }
-
-       irq_set_handler_data(irq, iommu);
-       iommu->irq = irq;
-
-       ret = arch_setup_dmar_msi(irq);
-       if (ret) {
-               irq_set_handler_data(irq, NULL);
-               iommu->irq = 0;
-               destroy_irq(irq);
-               return ret;
-       }
-
-       ret = request_irq(irq, dmar_fault, 0, iommu->name, iommu);
-       if (ret)
-               printk(KERN_ERR "IOMMU: can't request irq\n");
-       return ret;
-}
-
-int __init enable_drhd_fault_handling(void)
-{
-       struct dmar_drhd_unit *drhd;
-
-       /*
-        * Enable fault control interrupt.
-        */
-       for_each_drhd_unit(drhd) {
-               int ret;
-               struct intel_iommu *iommu = drhd->iommu;
-               ret = dmar_set_interrupt(iommu);
-
-               if (ret) {
-                       printk(KERN_ERR "DRHD %Lx: failed to enable fault, "
-                              " interrupt, ret %d\n",
-                              (unsigned long long)drhd->reg_base_addr, ret);
-                       return -1;
-               }
-
-               /*
-                * Clear any previous faults.
-                */
-               dmar_fault(iommu->irq, iommu);
-       }
-
-       return 0;
-}
-
-/*
- * Re-enable Queued Invalidation interface.
- */
-int dmar_reenable_qi(struct intel_iommu *iommu)
-{
-       if (!ecap_qis(iommu->ecap))
-               return -ENOENT;
-
-       if (!iommu->qi)
-               return -ENOENT;
-
-       /*
-        * First disable queued invalidation.
-        */
-       dmar_disable_qi(iommu);
-       /*
-        * Then enable queued invalidation again. Since there is no pending
-        * invalidation requests now, it's safe to re-enable queued
-        * invalidation.
-        */
-       __dmar_enable_qi(iommu);
-
-       return 0;
-}
-
-/*
- * Check interrupt remapping support in DMAR table description.
- */
-int __init dmar_ir_support(void)
-{
-       struct acpi_table_dmar *dmar;
-       dmar = (struct acpi_table_dmar *)dmar_tbl;
-       if (!dmar)
-               return 0;
-       return dmar->flags & 0x1;
-}
-IOMMU_INIT_POST(detect_intel_iommu);
diff --git a/drivers/pci/intel-iommu.c b/drivers/pci/intel-iommu.c
deleted file mode 100644 (file)
index f02c34d..0000000
+++ /dev/null
@@ -1,4017 +0,0 @@
-/*
- * Copyright (c) 2006, Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
- * Place - Suite 330, Boston, MA 02111-1307 USA.
- *
- * Copyright (C) 2006-2008 Intel Corporation
- * Author: Ashok Raj <ashok.raj@intel.com>
- * Author: Shaohua Li <shaohua.li@intel.com>
- * Author: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
- * Author: Fenghua Yu <fenghua.yu@intel.com>
- */
-
-#include <linux/init.h>
-#include <linux/bitmap.h>
-#include <linux/debugfs.h>
-#include <linux/slab.h>
-#include <linux/irq.h>
-#include <linux/interrupt.h>
-#include <linux/spinlock.h>
-#include <linux/pci.h>
-#include <linux/dmar.h>
-#include <linux/dma-mapping.h>
-#include <linux/mempool.h>
-#include <linux/timer.h>
-#include <linux/iova.h>
-#include <linux/iommu.h>
-#include <linux/intel-iommu.h>
-#include <linux/syscore_ops.h>
-#include <linux/tboot.h>
-#include <linux/dmi.h>
-#include <linux/pci-ats.h>
-#include <asm/cacheflush.h>
-#include <asm/iommu.h>
-#include "pci.h"
-
-#define ROOT_SIZE              VTD_PAGE_SIZE
-#define CONTEXT_SIZE           VTD_PAGE_SIZE
-
-#define IS_BRIDGE_HOST_DEVICE(pdev) \
-                           ((pdev->class >> 8) == PCI_CLASS_BRIDGE_HOST)
-#define IS_GFX_DEVICE(pdev) ((pdev->class >> 16) == PCI_BASE_CLASS_DISPLAY)
-#define IS_ISA_DEVICE(pdev) ((pdev->class >> 8) == PCI_CLASS_BRIDGE_ISA)
-#define IS_AZALIA(pdev) ((pdev)->vendor == 0x8086 && (pdev)->device == 0x3a3e)
-
-#define IOAPIC_RANGE_START     (0xfee00000)
-#define IOAPIC_RANGE_END       (0xfeefffff)
-#define IOVA_START_ADDR                (0x1000)
-
-#define DEFAULT_DOMAIN_ADDRESS_WIDTH 48
-
-#define MAX_AGAW_WIDTH 64
-
-#define __DOMAIN_MAX_PFN(gaw)  ((((uint64_t)1) << (gaw-VTD_PAGE_SHIFT)) - 1)
-#define __DOMAIN_MAX_ADDR(gaw) ((((uint64_t)1) << gaw) - 1)
-
-/* We limit DOMAIN_MAX_PFN to fit in an unsigned long, and DOMAIN_MAX_ADDR
-   to match. That way, we can use 'unsigned long' for PFNs with impunity. */
-#define DOMAIN_MAX_PFN(gaw)    ((unsigned long) min_t(uint64_t, \
-                               __DOMAIN_MAX_PFN(gaw), (unsigned long)-1))
-#define DOMAIN_MAX_ADDR(gaw)   (((uint64_t)__DOMAIN_MAX_PFN(gaw)) << VTD_PAGE_SHIFT)
-
-#define IOVA_PFN(addr)         ((addr) >> PAGE_SHIFT)
-#define DMA_32BIT_PFN          IOVA_PFN(DMA_BIT_MASK(32))
-#define DMA_64BIT_PFN          IOVA_PFN(DMA_BIT_MASK(64))
-
-/* page table handling */
-#define LEVEL_STRIDE           (9)
-#define LEVEL_MASK             (((u64)1 << LEVEL_STRIDE) - 1)
-
-static inline int agaw_to_level(int agaw)
-{
-       return agaw + 2;
-}
-
-static inline int agaw_to_width(int agaw)
-{
-       return 30 + agaw * LEVEL_STRIDE;
-}
-
-static inline int width_to_agaw(int width)
-{
-       return (width - 30) / LEVEL_STRIDE;
-}
-
-static inline unsigned int level_to_offset_bits(int level)
-{
-       return (level - 1) * LEVEL_STRIDE;
-}
-
-static inline int pfn_level_offset(unsigned long pfn, int level)
-{
-       return (pfn >> level_to_offset_bits(level)) & LEVEL_MASK;
-}
-
-static inline unsigned long level_mask(int level)
-{
-       return -1UL << level_to_offset_bits(level);
-}
-
-static inline unsigned long level_size(int level)
-{
-       return 1UL << level_to_offset_bits(level);
-}
-
-static inline unsigned long align_to_level(unsigned long pfn, int level)
-{
-       return (pfn + level_size(level) - 1) & level_mask(level);
-}
-
-static inline unsigned long lvl_to_nr_pages(unsigned int lvl)
-{
-       return  1 << ((lvl - 1) * LEVEL_STRIDE);
-}
-
-/* VT-d pages must always be _smaller_ than MM pages. Otherwise things
-   are never going to work. */
-static inline unsigned long dma_to_mm_pfn(unsigned long dma_pfn)
-{
-       return dma_pfn >> (PAGE_SHIFT - VTD_PAGE_SHIFT);
-}
-
-static inline unsigned long mm_to_dma_pfn(unsigned long mm_pfn)
-{
-       return mm_pfn << (PAGE_SHIFT - VTD_PAGE_SHIFT);
-}
-static inline unsigned long page_to_dma_pfn(struct page *pg)
-{
-       return mm_to_dma_pfn(page_to_pfn(pg));
-}
-static inline unsigned long virt_to_dma_pfn(void *p)
-{
-       return page_to_dma_pfn(virt_to_page(p));
-}
-
-/* global iommu list, set NULL for ignored DMAR units */
-static struct intel_iommu **g_iommus;
-
-static void __init check_tylersburg_isoch(void);
-static int rwbf_quirk;
-
-/*
- * set to 1 to panic kernel if can't successfully enable VT-d
- * (used when kernel is launched w/ TXT)
- */
-static int force_on = 0;
-
-/*
- * 0: Present
- * 1-11: Reserved
- * 12-63: Context Ptr (12 - (haw-1))
- * 64-127: Reserved
- */
-struct root_entry {
-       u64     val;
-       u64     rsvd1;
-};
-#define ROOT_ENTRY_NR (VTD_PAGE_SIZE/sizeof(struct root_entry))
-static inline bool root_present(struct root_entry *root)
-{
-       return (root->val & 1);
-}
-static inline void set_root_present(struct root_entry *root)
-{
-       root->val |= 1;
-}
-static inline void set_root_value(struct root_entry *root, unsigned long value)
-{
-       root->val |= value & VTD_PAGE_MASK;
-}
-
-static inline struct context_entry *
-get_context_addr_from_root(struct root_entry *root)
-{
-       return (struct context_entry *)
-               (root_present(root)?phys_to_virt(
-               root->val & VTD_PAGE_MASK) :
-               NULL);
-}
-
-/*
- * low 64 bits:
- * 0: present
- * 1: fault processing disable
- * 2-3: translation type
- * 12-63: address space root
- * high 64 bits:
- * 0-2: address width
- * 3-6: aval
- * 8-23: domain id
- */
-struct context_entry {
-       u64 lo;
-       u64 hi;
-};
-
-static inline bool context_present(struct context_entry *context)
-{
-       return (context->lo & 1);
-}
-static inline void context_set_present(struct context_entry *context)
-{
-       context->lo |= 1;
-}
-
-static inline void context_set_fault_enable(struct context_entry *context)
-{
-       context->lo &= (((u64)-1) << 2) | 1;
-}
-
-static inline void context_set_translation_type(struct context_entry *context,
-                                               unsigned long value)
-{
-       context->lo &= (((u64)-1) << 4) | 3;
-       context->lo |= (value & 3) << 2;
-}
-
-static inline void context_set_address_root(struct context_entry *context,
-                                           unsigned long value)
-{
-       context->lo |= value & VTD_PAGE_MASK;
-}
-
-static inline void context_set_address_width(struct context_entry *context,
-                                            unsigned long value)
-{
-       context->hi |= value & 7;
-}
-
-static inline void context_set_domain_id(struct context_entry *context,
-                                        unsigned long value)
-{
-       context->hi |= (value & ((1 << 16) - 1)) << 8;
-}
-
-static inline void context_clear_entry(struct context_entry *context)
-{
-       context->lo = 0;
-       context->hi = 0;
-}
-
-/*
- * 0: readable
- * 1: writable
- * 2-6: reserved
- * 7: super page
- * 8-10: available
- * 11: snoop behavior
- * 12-63: Host physcial address
- */
-struct dma_pte {
-       u64 val;
-};
-
-static inline void dma_clear_pte(struct dma_pte *pte)
-{
-       pte->val = 0;
-}
-
-static inline void dma_set_pte_readable(struct dma_pte *pte)
-{
-       pte->val |= DMA_PTE_READ;
-}
-
-static inline void dma_set_pte_writable(struct dma_pte *pte)
-{
-       pte->val |= DMA_PTE_WRITE;
-}
-
-static inline void dma_set_pte_snp(struct dma_pte *pte)
-{
-       pte->val |= DMA_PTE_SNP;
-}
-
-static inline void dma_set_pte_prot(struct dma_pte *pte, unsigned long prot)
-{
-       pte->val = (pte->val & ~3) | (prot & 3);
-}
-
-static inline u64 dma_pte_addr(struct dma_pte *pte)
-{
-#ifdef CONFIG_64BIT
-       return pte->val & VTD_PAGE_MASK;
-#else
-       /* Must have a full atomic 64-bit read */
-       return  __cmpxchg64(&pte->val, 0ULL, 0ULL) & VTD_PAGE_MASK;
-#endif
-}
-
-static inline void dma_set_pte_pfn(struct dma_pte *pte, unsigned long pfn)
-{
-       pte->val |= (uint64_t)pfn << VTD_PAGE_SHIFT;
-}
-
-static inline bool dma_pte_present(struct dma_pte *pte)
-{
-       return (pte->val & 3) != 0;
-}
-
-static inline int first_pte_in_page(struct dma_pte *pte)
-{
-       return !((unsigned long)pte & ~VTD_PAGE_MASK);
-}
-
-/*
- * This domain is a statically identity mapping domain.
- *     1. This domain creats a static 1:1 mapping to all usable memory.
- *     2. It maps to each iommu if successful.
- *     3. Each iommu mapps to this domain if successful.
- */
-static struct dmar_domain *si_domain;
-static int hw_pass_through = 1;
-
-/* devices under the same p2p bridge are owned in one domain */
-#define DOMAIN_FLAG_P2P_MULTIPLE_DEVICES (1 << 0)
-
-/* domain represents a virtual machine, more than one devices
- * across iommus may be owned in one domain, e.g. kvm guest.
- */
-#define DOMAIN_FLAG_VIRTUAL_MACHINE    (1 << 1)
-
-/* si_domain contains mulitple devices */
-#define DOMAIN_FLAG_STATIC_IDENTITY    (1 << 2)
-
-struct dmar_domain {
-       int     id;                     /* domain id */
-       int     nid;                    /* node id */
-       unsigned long iommu_bmp;        /* bitmap of iommus this domain uses*/
-
-       struct list_head devices;       /* all devices' list */
-       struct iova_domain iovad;       /* iova's that belong to this domain */
-
-       struct dma_pte  *pgd;           /* virtual address */
-       int             gaw;            /* max guest address width */
-
-       /* adjusted guest address width, 0 is level 2 30-bit */
-       int             agaw;
-
-       int             flags;          /* flags to find out type of domain */
-
-       int             iommu_coherency;/* indicate coherency of iommu access */
-       int             iommu_snooping; /* indicate snooping control feature*/
-       int             iommu_count;    /* reference count of iommu */
-       int             iommu_superpage;/* Level of superpages supported:
-                                          0 == 4KiB (no superpages), 1 == 2MiB,
-                                          2 == 1GiB, 3 == 512GiB, 4 == 1TiB */
-       spinlock_t      iommu_lock;     /* protect iommu set in domain */
-       u64             max_addr;       /* maximum mapped address */
-};
-
-/* PCI domain-device relationship */
-struct device_domain_info {
-       struct list_head link;  /* link to domain siblings */
-       struct list_head global; /* link to global list */
-       int segment;            /* PCI domain */
-       u8 bus;                 /* PCI bus number */
-       u8 devfn;               /* PCI devfn number */
-       struct pci_dev *dev; /* it's NULL for PCIe-to-PCI bridge */
-       struct intel_iommu *iommu; /* IOMMU used by this device */
-       struct dmar_domain *domain; /* pointer to domain */
-};
-
-static void flush_unmaps_timeout(unsigned long data);
-
-DEFINE_TIMER(unmap_timer,  flush_unmaps_timeout, 0, 0);
-
-#define HIGH_WATER_MARK 250
-struct deferred_flush_tables {
-       int next;
-       struct iova *iova[HIGH_WATER_MARK];
-       struct dmar_domain *domain[HIGH_WATER_MARK];
-};
-
-static struct deferred_flush_tables *deferred_flush;
-
-/* bitmap for indexing intel_iommus */
-static int g_num_of_iommus;
-
-static DEFINE_SPINLOCK(async_umap_flush_lock);
-static LIST_HEAD(unmaps_to_do);
-
-static int timer_on;
-static long list_size;
-
-static void domain_remove_dev_info(struct dmar_domain *domain);
-
-#ifdef CONFIG_DMAR_DEFAULT_ON
-int dmar_disabled = 0;
-#else
-int dmar_disabled = 1;
-#endif /*CONFIG_DMAR_DEFAULT_ON*/
-
-static int dmar_map_gfx = 1;
-static int dmar_forcedac;
-static int intel_iommu_strict;
-static int intel_iommu_superpage = 1;
-
-#define DUMMY_DEVICE_DOMAIN_INFO ((struct device_domain_info *)(-1))
-static DEFINE_SPINLOCK(device_domain_lock);
-static LIST_HEAD(device_domain_list);
-
-static struct iommu_ops intel_iommu_ops;
-
-static int __init intel_iommu_setup(char *str)
-{
-       if (!str)
-               return -EINVAL;
-       while (*str) {
-               if (!strncmp(str, "on", 2)) {
-                       dmar_disabled = 0;
-                       printk(KERN_INFO "Intel-IOMMU: enabled\n");
-               } else if (!strncmp(str, "off", 3)) {
-                       dmar_disabled = 1;
-                       printk(KERN_INFO "Intel-IOMMU: disabled\n");
-               } else if (!strncmp(str, "igfx_off", 8)) {
-                       dmar_map_gfx = 0;
-                       printk(KERN_INFO
-                               "Intel-IOMMU: disable GFX device mapping\n");
-               } else if (!strncmp(str, "forcedac", 8)) {
-                       printk(KERN_INFO
-                               "Intel-IOMMU: Forcing DAC for PCI devices\n");
-                       dmar_forcedac = 1;
-               } else if (!strncmp(str, "strict", 6)) {
-                       printk(KERN_INFO
-                               "Intel-IOMMU: disable batched IOTLB flush\n");
-                       intel_iommu_strict = 1;
-               } else if (!strncmp(str, "sp_off", 6)) {
-                       printk(KERN_INFO
-                               "Intel-IOMMU: disable supported super page\n");
-                       intel_iommu_superpage = 0;
-               }
-
-               str += strcspn(str, ",");
-               while (*str == ',')
-                       str++;
-       }
-       return 0;
-}
-__setup("intel_iommu=", intel_iommu_setup);
-
-static struct kmem_cache *iommu_domain_cache;
-static struct kmem_cache *iommu_devinfo_cache;
-static struct kmem_cache *iommu_iova_cache;
-
-static inline void *alloc_pgtable_page(int node)
-{
-       struct page *page;
-       void *vaddr = NULL;
-
-       page = alloc_pages_node(node, GFP_ATOMIC | __GFP_ZERO, 0);
-       if (page)
-               vaddr = page_address(page);
-       return vaddr;
-}
-
-static inline void free_pgtable_page(void *vaddr)
-{
-       free_page((unsigned long)vaddr);
-}
-
-static inline void *alloc_domain_mem(void)
-{
-       return kmem_cache_alloc(iommu_domain_cache, GFP_ATOMIC);
-}
-
-static void free_domain_mem(void *vaddr)
-{
-       kmem_cache_free(iommu_domain_cache, vaddr);
-}
-
-static inline void * alloc_devinfo_mem(void)
-{
-       return kmem_cache_alloc(iommu_devinfo_cache, GFP_ATOMIC);
-}
-
-static inline void free_devinfo_mem(void *vaddr)
-{
-       kmem_cache_free(iommu_devinfo_cache, vaddr);
-}
-
-struct iova *alloc_iova_mem(void)
-{
-       return kmem_cache_alloc(iommu_iova_cache, GFP_ATOMIC);
-}
-
-void free_iova_mem(struct iova *iova)
-{
-       kmem_cache_free(iommu_iova_cache, iova);
-}
-
-
-static int __iommu_calculate_agaw(struct intel_iommu *iommu, int max_gaw)
-{
-       unsigned long sagaw;
-       int agaw = -1;
-
-       sagaw = cap_sagaw(iommu->cap);
-       for (agaw = width_to_agaw(max_gaw);
-            agaw >= 0; agaw--) {
-               if (test_bit(agaw, &sagaw))
-                       break;
-       }
-
-       return agaw;
-}
-
-/*
- * Calculate max SAGAW for each iommu.
- */
-int iommu_calculate_max_sagaw(struct intel_iommu *iommu)
-{
-       return __iommu_calculate_agaw(iommu, MAX_AGAW_WIDTH);
-}
-
-/*
- * calculate agaw for each iommu.
- * "SAGAW" may be different across iommus, use a default agaw, and
- * get a supported less agaw for iommus that don't support the default agaw.
- */
-int iommu_calculate_agaw(struct intel_iommu *iommu)
-{
-       return __iommu_calculate_agaw(iommu, DEFAULT_DOMAIN_ADDRESS_WIDTH);
-}
-
-/* This functionin only returns single iommu in a domain */
-static struct intel_iommu *domain_get_iommu(struct dmar_domain *domain)
-{
-       int iommu_id;
-
-       /* si_domain and vm domain should not get here. */
-       BUG_ON(domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE);
-       BUG_ON(domain->flags & DOMAIN_FLAG_STATIC_IDENTITY);
-
-       iommu_id = find_first_bit(&domain->iommu_bmp, g_num_of_iommus);
-       if (iommu_id < 0 || iommu_id >= g_num_of_iommus)
-               return NULL;
-
-       return g_iommus[iommu_id];
-}
-
-static void domain_update_iommu_coherency(struct dmar_domain *domain)
-{
-       int i;
-
-       domain->iommu_coherency = 1;
-
-       for_each_set_bit(i, &domain->iommu_bmp, g_num_of_iommus) {
-               if (!ecap_coherent(g_iommus[i]->ecap)) {
-                       domain->iommu_coherency = 0;
-                       break;
-               }
-       }
-}
-
-static void domain_update_iommu_snooping(struct dmar_domain *domain)
-{
-       int i;
-
-       domain->iommu_snooping = 1;
-
-       for_each_set_bit(i, &domain->iommu_bmp, g_num_of_iommus) {
-               if (!ecap_sc_support(g_iommus[i]->ecap)) {
-                       domain->iommu_snooping = 0;
-                       break;
-               }
-       }
-}
-
-static void domain_update_iommu_superpage(struct dmar_domain *domain)
-{
-       int i, mask = 0xf;
-
-       if (!intel_iommu_superpage) {
-               domain->iommu_superpage = 0;
-               return;
-       }
-
-       domain->iommu_superpage = 4; /* 1TiB */
-
-       for_each_set_bit(i, &domain->iommu_bmp, g_num_of_iommus) {
-               mask |= cap_super_page_val(g_iommus[i]->cap);
-               if (!mask) {
-                       break;
-               }
-       }
-       domain->iommu_superpage = fls(mask);
-}
-
-/* Some capabilities may be different across iommus */
-static void domain_update_iommu_cap(struct dmar_domain *domain)
-{
-       domain_update_iommu_coherency(domain);
-       domain_update_iommu_snooping(domain);
-       domain_update_iommu_superpage(domain);
-}
-
-static struct intel_iommu *device_to_iommu(int segment, u8 bus, u8 devfn)
-{
-       struct dmar_drhd_unit *drhd = NULL;
-       int i;
-
-       for_each_drhd_unit(drhd) {
-               if (drhd->ignored)
-                       continue;
-               if (segment != drhd->segment)
-                       continue;
-
-               for (i = 0; i < drhd->devices_cnt; i++) {
-                       if (drhd->devices[i] &&
-                           drhd->devices[i]->bus->number == bus &&
-                           drhd->devices[i]->devfn == devfn)
-                               return drhd->iommu;
-                       if (drhd->devices[i] &&
-                           drhd->devices[i]->subordinate &&
-                           drhd->devices[i]->subordinate->number <= bus &&
-                           drhd->devices[i]->subordinate->subordinate >= bus)
-                               return drhd->iommu;
-               }
-
-               if (drhd->include_all)
-                       return drhd->iommu;
-       }
-
-       return NULL;
-}
-
-static void domain_flush_cache(struct dmar_domain *domain,
-                              void *addr, int size)
-{
-       if (!domain->iommu_coherency)
-               clflush_cache_range(addr, size);
-}
-
-/* Gets context entry for a given bus and devfn */
-static struct context_entry * device_to_context_entry(struct intel_iommu *iommu,
-               u8 bus, u8 devfn)
-{
-       struct root_entry *root;
-       struct context_entry *context;
-       unsigned long phy_addr;
-       unsigned long flags;
-
-       spin_lock_irqsave(&iommu->lock, flags);
-       root = &iommu->root_entry[bus];
-       context = get_context_addr_from_root(root);
-       if (!context) {
-               context = (struct context_entry *)
-                               alloc_pgtable_page(iommu->node);
-               if (!context) {
-                       spin_unlock_irqrestore(&iommu->lock, flags);
-                       return NULL;
-               }
-               __iommu_flush_cache(iommu, (void *)context, CONTEXT_SIZE);
-               phy_addr = virt_to_phys((void *)context);
-               set_root_value(root, phy_addr);
-               set_root_present(root);
-               __iommu_flush_cache(iommu, root, sizeof(*root));
-       }
-       spin_unlock_irqrestore(&iommu->lock, flags);
-       return &context[devfn];
-}
-
-static int device_context_mapped(struct intel_iommu *iommu, u8 bus, u8 devfn)
-{
-       struct root_entry *root;
-       struct context_entry *context;
-       int ret;
-       unsigned long flags;
-
-       spin_lock_irqsave(&iommu->lock, flags);
-       root = &iommu->root_entry[bus];
-       context = get_context_addr_from_root(root);
-       if (!context) {
-               ret = 0;
-               goto out;
-       }
-       ret = context_present(&context[devfn]);
-out:
-       spin_unlock_irqrestore(&iommu->lock, flags);
-       return ret;
-}
-
-static void clear_context_table(struct intel_iommu *iommu, u8 bus, u8 devfn)
-{
-       struct root_entry *root;
-       struct context_entry *context;
-       unsigned long flags;
-
-       spin_lock_irqsave(&iommu->lock, flags);
-       root = &iommu->root_entry[bus];
-       context = get_context_addr_from_root(root);
-       if (context) {
-               context_clear_entry(&context[devfn]);
-               __iommu_flush_cache(iommu, &context[devfn], \
-                       sizeof(*context));
-       }
-       spin_unlock_irqrestore(&iommu->lock, flags);
-}
-
-static void free_context_table(struct intel_iommu *iommu)
-{
-       struct root_entry *root;
-       int i;
-       unsigned long flags;
-       struct context_entry *context;
-
-       spin_lock_irqsave(&iommu->lock, flags);
-       if (!iommu->root_entry) {
-               goto out;
-       }
-       for (i = 0; i < ROOT_ENTRY_NR; i++) {
-               root = &iommu->root_entry[i];
-               context = get_context_addr_from_root(root);
-               if (context)
-                       free_pgtable_page(context);
-       }
-       free_pgtable_page(iommu->root_entry);
-       iommu->root_entry = NULL;
-out:
-       spin_unlock_irqrestore(&iommu->lock, flags);
-}
-
-static struct dma_pte *pfn_to_dma_pte(struct dmar_domain *domain,
-                                     unsigned long pfn, int large_level)
-{
-       int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT;
-       struct dma_pte *parent, *pte = NULL;
-       int level = agaw_to_level(domain->agaw);
-       int offset, target_level;
-
-       BUG_ON(!domain->pgd);
-       BUG_ON(addr_width < BITS_PER_LONG && pfn >> addr_width);
-       parent = domain->pgd;
-
-       /* Search pte */
-       if (!large_level)
-               target_level = 1;
-       else
-               target_level = large_level;
-
-       while (level > 0) {
-               void *tmp_page;
-
-               offset = pfn_level_offset(pfn, level);
-               pte = &parent[offset];
-               if (!large_level && (pte->val & DMA_PTE_LARGE_PAGE))
-                       break;
-               if (level == target_level)
-                       break;
-
-               if (!dma_pte_present(pte)) {
-                       uint64_t pteval;
-
-                       tmp_page = alloc_pgtable_page(domain->nid);
-
-                       if (!tmp_page)
-                               return NULL;
-
-                       domain_flush_cache(domain, tmp_page, VTD_PAGE_SIZE);
-                       pteval = ((uint64_t)virt_to_dma_pfn(tmp_page) << VTD_PAGE_SHIFT) | DMA_PTE_READ | DMA_PTE_WRITE;
-                       if (cmpxchg64(&pte->val, 0ULL, pteval)) {
-                               /* Someone else set it while we were thinking; use theirs. */
-                               free_pgtable_page(tmp_page);
-                       } else {
-                               dma_pte_addr(pte);
-                               domain_flush_cache(domain, pte, sizeof(*pte));
-                       }
-               }
-               parent = phys_to_virt(dma_pte_addr(pte));
-               level--;
-       }
-
-       return pte;
-}
-
-
-/* return address's pte at specific level */
-static struct dma_pte *dma_pfn_level_pte(struct dmar_domain *domain,
-                                        unsigned long pfn,
-                                        int level, int *large_page)
-{
-       struct dma_pte *parent, *pte = NULL;
-       int total = agaw_to_level(domain->agaw);
-       int offset;
-
-       parent = domain->pgd;
-       while (level <= total) {
-               offset = pfn_level_offset(pfn, total);
-               pte = &parent[offset];
-               if (level == total)
-                       return pte;
-
-               if (!dma_pte_present(pte)) {
-                       *large_page = total;
-                       break;
-               }
-
-               if (pte->val & DMA_PTE_LARGE_PAGE) {
-                       *large_page = total;
-                       return pte;
-               }
-
-               parent = phys_to_virt(dma_pte_addr(pte));
-               total--;
-       }
-       return NULL;
-}
-
-/* clear last level pte, a tlb flush should be followed */
-static void dma_pte_clear_range(struct dmar_domain *domain,
-                               unsigned long start_pfn,
-                               unsigned long last_pfn)
-{
-       int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT;
-       unsigned int large_page = 1;
-       struct dma_pte *first_pte, *pte;
-
-       BUG_ON(addr_width < BITS_PER_LONG && start_pfn >> addr_width);
-       BUG_ON(addr_width < BITS_PER_LONG && last_pfn >> addr_width);
-       BUG_ON(start_pfn > last_pfn);
-
-       /* we don't need lock here; nobody else touches the iova range */
-       do {
-               large_page = 1;
-               first_pte = pte = dma_pfn_level_pte(domain, start_pfn, 1, &large_page);
-               if (!pte) {
-                       start_pfn = align_to_level(start_pfn + 1, large_page + 1);
-                       continue;
-               }
-               do {
-                       dma_clear_pte(pte);
-                       start_pfn += lvl_to_nr_pages(large_page);
-                       pte++;
-               } while (start_pfn <= last_pfn && !first_pte_in_page(pte));
-
-               domain_flush_cache(domain, first_pte,
-                                  (void *)pte - (void *)first_pte);
-
-       } while (start_pfn && start_pfn <= last_pfn);
-}
-
-/* free page table pages. last level pte should already be cleared */
-static void dma_pte_free_pagetable(struct dmar_domain *domain,
-                                  unsigned long start_pfn,
-                                  unsigned long last_pfn)
-{
-       int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT;
-       struct dma_pte *first_pte, *pte;
-       int total = agaw_to_level(domain->agaw);
-       int level;
-       unsigned long tmp;
-       int large_page = 2;
-
-       BUG_ON(addr_width < BITS_PER_LONG && start_pfn >> addr_width);
-       BUG_ON(addr_width < BITS_PER_LONG && last_pfn >> addr_width);
-       BUG_ON(start_pfn > last_pfn);
-
-       /* We don't need lock here; nobody else touches the iova range */
-       level = 2;
-       while (level <= total) {
-               tmp = align_to_level(start_pfn, level);
-
-               /* If we can't even clear one PTE at this level, we're done */
-               if (tmp + level_size(level) - 1 > last_pfn)
-                       return;
-
-               do {
-                       large_page = level;
-                       first_pte = pte = dma_pfn_level_pte(domain, tmp, level, &large_page);
-                       if (large_page > level)
-                               level = large_page + 1;
-                       if (!pte) {
-                               tmp = align_to_level(tmp + 1, level + 1);
-                               continue;
-                       }
-                       do {
-                               if (dma_pte_present(pte)) {
-                                       free_pgtable_page(phys_to_virt(dma_pte_addr(pte)));
-                                       dma_clear_pte(pte);
-                               }
-                               pte++;
-                               tmp += level_size(level);
-                       } while (!first_pte_in_page(pte) &&
-                                tmp + level_size(level) - 1 <= last_pfn);
-
-                       domain_flush_cache(domain, first_pte,
-                                          (void *)pte - (void *)first_pte);
-                       
-               } while (tmp && tmp + level_size(level) - 1 <= last_pfn);
-               level++;
-       }
-       /* free pgd */
-       if (start_pfn == 0 && last_pfn == DOMAIN_MAX_PFN(domain->gaw)) {
-               free_pgtable_page(domain->pgd);
-               domain->pgd = NULL;
-       }
-}
-
-/* iommu handling */
-static int iommu_alloc_root_entry(struct intel_iommu *iommu)
-{
-       struct root_entry *root;
-       unsigned long flags;
-
-       root = (struct root_entry *)alloc_pgtable_page(iommu->node);
-       if (!root)
-               return -ENOMEM;
-
-       __iommu_flush_cache(iommu, root, ROOT_SIZE);
-
-       spin_lock_irqsave(&iommu->lock, flags);
-       iommu->root_entry = root;
-       spin_unlock_irqrestore(&iommu->lock, flags);
-
-       return 0;
-}
-
-static void iommu_set_root_entry(struct intel_iommu *iommu)
-{
-       void *addr;
-       u32 sts;
-       unsigned long flag;
-
-       addr = iommu->root_entry;
-
-       spin_lock_irqsave(&iommu->register_lock, flag);
-       dmar_writeq(iommu->reg + DMAR_RTADDR_REG, virt_to_phys(addr));
-
-       writel(iommu->gcmd | DMA_GCMD_SRTP, iommu->reg + DMAR_GCMD_REG);
-
-       /* Make sure hardware complete it */
-       IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
-                     readl, (sts & DMA_GSTS_RTPS), sts);
-
-       spin_unlock_irqrestore(&iommu->register_lock, flag);
-}
-
-static void iommu_flush_write_buffer(struct intel_iommu *iommu)
-{
-       u32 val;
-       unsigned long flag;
-
-       if (!rwbf_quirk && !cap_rwbf(iommu->cap))
-               return;
-
-       spin_lock_irqsave(&iommu->register_lock, flag);
-       writel(iommu->gcmd | DMA_GCMD_WBF, iommu->reg + DMAR_GCMD_REG);
-
-       /* Make sure hardware complete it */
-       IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
-                     readl, (!(val & DMA_GSTS_WBFS)), val);
-
-       spin_unlock_irqrestore(&iommu->register_lock, flag);
-}
-
-/* return value determine if we need a write buffer flush */
-static void __iommu_flush_context(struct intel_iommu *iommu,
-                                 u16 did, u16 source_id, u8 function_mask,
-                                 u64 type)
-{
-       u64 val = 0;
-       unsigned long flag;
-
-       switch (type) {
-       case DMA_CCMD_GLOBAL_INVL:
-               val = DMA_CCMD_GLOBAL_INVL;
-               break;
-       case DMA_CCMD_DOMAIN_INVL:
-               val = DMA_CCMD_DOMAIN_INVL|DMA_CCMD_DID(did);
-               break;
-       case DMA_CCMD_DEVICE_INVL:
-               val = DMA_CCMD_DEVICE_INVL|DMA_CCMD_DID(did)
-                       | DMA_CCMD_SID(source_id) | DMA_CCMD_FM(function_mask);
-               break;
-       default:
-               BUG();
-       }
-       val |= DMA_CCMD_ICC;
-
-       spin_lock_irqsave(&iommu->register_lock, flag);
-       dmar_writeq(iommu->reg + DMAR_CCMD_REG, val);
-
-       /* Make sure hardware complete it */
-       IOMMU_WAIT_OP(iommu, DMAR_CCMD_REG,
-               dmar_readq, (!(val & DMA_CCMD_ICC)), val);
-
-       spin_unlock_irqrestore(&iommu->register_lock, flag);
-}
-
-/* return value determine if we need a write buffer flush */
-static void __iommu_flush_iotlb(struct intel_iommu *iommu, u16 did,
-                               u64 addr, unsigned int size_order, u64 type)
-{
-       int tlb_offset = ecap_iotlb_offset(iommu->ecap);
-       u64 val = 0, val_iva = 0;
-       unsigned long flag;
-
-       switch (type) {
-       case DMA_TLB_GLOBAL_FLUSH:
-               /* global flush doesn't need set IVA_REG */
-               val = DMA_TLB_GLOBAL_FLUSH|DMA_TLB_IVT;
-               break;
-       case DMA_TLB_DSI_FLUSH:
-               val = DMA_TLB_DSI_FLUSH|DMA_TLB_IVT|DMA_TLB_DID(did);
-               break;
-       case DMA_TLB_PSI_FLUSH:
-               val = DMA_TLB_PSI_FLUSH|DMA_TLB_IVT|DMA_TLB_DID(did);
-               /* Note: always flush non-leaf currently */
-               val_iva = size_order | addr;
-               break;
-       default:
-               BUG();
-       }
-       /* Note: set drain read/write */
-#if 0
-       /*
-        * This is probably to be super secure.. Looks like we can
-        * ignore it without any impact.
-        */
-       if (cap_read_drain(iommu->cap))
-               val |= DMA_TLB_READ_DRAIN;
-#endif
-       if (cap_write_drain(iommu->cap))
-               val |= DMA_TLB_WRITE_DRAIN;
-
-       spin_lock_irqsave(&iommu->register_lock, flag);
-       /* Note: Only uses first TLB reg currently */
-       if (val_iva)
-               dmar_writeq(iommu->reg + tlb_offset, val_iva);
-       dmar_writeq(iommu->reg + tlb_offset + 8, val);
-
-       /* Make sure hardware complete it */
-       IOMMU_WAIT_OP(iommu, tlb_offset + 8,
-               dmar_readq, (!(val & DMA_TLB_IVT)), val);
-
-       spin_unlock_irqrestore(&iommu->register_lock, flag);
-
-       /* check IOTLB invalidation granularity */
-       if (DMA_TLB_IAIG(val) == 0)
-               printk(KERN_ERR"IOMMU: flush IOTLB failed\n");
-       if (DMA_TLB_IAIG(val) != DMA_TLB_IIRG(type))
-               pr_debug("IOMMU: tlb flush request %Lx, actual %Lx\n",
-                       (unsigned long long)DMA_TLB_IIRG(type),
-                       (unsigned long long)DMA_TLB_IAIG(val));
-}
-
-static struct device_domain_info *iommu_support_dev_iotlb(
-       struct dmar_domain *domain, int segment, u8 bus, u8 devfn)
-{
-       int found = 0;
-       unsigned long flags;
-       struct device_domain_info *info;
-       struct intel_iommu *iommu = device_to_iommu(segment, bus, devfn);
-
-       if (!ecap_dev_iotlb_support(iommu->ecap))
-               return NULL;
-
-       if (!iommu->qi)
-               return NULL;
-
-       spin_lock_irqsave(&device_domain_lock, flags);
-       list_for_each_entry(info, &domain->devices, link)
-               if (info->bus == bus && info->devfn == devfn) {
-                       found = 1;
-                       break;
-               }
-       spin_unlock_irqrestore(&device_domain_lock, flags);
-
-       if (!found || !info->dev)
-               return NULL;
-
-       if (!pci_find_ext_capability(info->dev, PCI_EXT_CAP_ID_ATS))
-               return NULL;
-
-       if (!dmar_find_matched_atsr_unit(info->dev))
-               return NULL;
-
-       info->iommu = iommu;
-
-       return info;
-}
-
-static void iommu_enable_dev_iotlb(struct device_domain_info *info)
-{
-       if (!info)
-               return;
-
-       pci_enable_ats(info->dev, VTD_PAGE_SHIFT);
-}
-
-static void iommu_disable_dev_iotlb(struct device_domain_info *info)
-{
-       if (!info->dev || !pci_ats_enabled(info->dev))
-               return;
-
-       pci_disable_ats(info->dev);
-}
-
-static void iommu_flush_dev_iotlb(struct dmar_domain *domain,
-                                 u64 addr, unsigned mask)
-{
-       u16 sid, qdep;
-       unsigned long flags;
-       struct device_domain_info *info;
-
-       spin_lock_irqsave(&device_domain_lock, flags);
-       list_for_each_entry(info, &domain->devices, link) {
-               if (!info->dev || !pci_ats_enabled(info->dev))
-                       continue;
-
-               sid = info->bus << 8 | info->devfn;
-               qdep = pci_ats_queue_depth(info->dev);
-               qi_flush_dev_iotlb(info->iommu, sid, qdep, addr, mask);
-       }
-       spin_unlock_irqrestore(&device_domain_lock, flags);
-}
-
-static void iommu_flush_iotlb_psi(struct intel_iommu *iommu, u16 did,
-                                 unsigned long pfn, unsigned int pages, int map)
-{
-       unsigned int mask = ilog2(__roundup_pow_of_two(pages));
-       uint64_t addr = (uint64_t)pfn << VTD_PAGE_SHIFT;
-
-       BUG_ON(pages == 0);
-
-       /*
-        * Fallback to domain selective flush if no PSI support or the size is
-        * too big.
-        * PSI requires page size to be 2 ^ x, and the base address is naturally
-        * aligned to the size
-        */
-       if (!cap_pgsel_inv(iommu->cap) || mask > cap_max_amask_val(iommu->cap))
-               iommu->flush.flush_iotlb(iommu, did, 0, 0,
-                                               DMA_TLB_DSI_FLUSH);
-       else
-               iommu->flush.flush_iotlb(iommu, did, addr, mask,
-                                               DMA_TLB_PSI_FLUSH);
-
-       /*
-        * In caching mode, changes of pages from non-present to present require
-        * flush. However, device IOTLB doesn't need to be flushed in this case.
-        */
-       if (!cap_caching_mode(iommu->cap) || !map)
-               iommu_flush_dev_iotlb(iommu->domains[did], addr, mask);
-}
-
-static void iommu_disable_protect_mem_regions(struct intel_iommu *iommu)
-{
-       u32 pmen;
-       unsigned long flags;
-
-       spin_lock_irqsave(&iommu->register_lock, flags);
-       pmen = readl(iommu->reg + DMAR_PMEN_REG);
-       pmen &= ~DMA_PMEN_EPM;
-       writel(pmen, iommu->reg + DMAR_PMEN_REG);
-
-       /* wait for the protected region status bit to clear */
-       IOMMU_WAIT_OP(iommu, DMAR_PMEN_REG,
-               readl, !(pmen & DMA_PMEN_PRS), pmen);
-
-       spin_unlock_irqrestore(&iommu->register_lock, flags);
-}
-
-static int iommu_enable_translation(struct intel_iommu *iommu)
-{
-       u32 sts;
-       unsigned long flags;
-
-       spin_lock_irqsave(&iommu->register_lock, flags);
-       iommu->gcmd |= DMA_GCMD_TE;
-       writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
-
-       /* Make sure hardware complete it */
-       IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
-                     readl, (sts & DMA_GSTS_TES), sts);
-
-       spin_unlock_irqrestore(&iommu->register_lock, flags);
-       return 0;
-}
-
-static int iommu_disable_translation(struct intel_iommu *iommu)
-{
-       u32 sts;
-       unsigned long flag;
-
-       spin_lock_irqsave(&iommu->register_lock, flag);
-       iommu->gcmd &= ~DMA_GCMD_TE;
-       writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
-
-       /* Make sure hardware complete it */
-       IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
-                     readl, (!(sts & DMA_GSTS_TES)), sts);
-
-       spin_unlock_irqrestore(&iommu->register_lock, flag);
-       return 0;
-}
-
-
-static int iommu_init_domains(struct intel_iommu *iommu)
-{
-       unsigned long ndomains;
-       unsigned long nlongs;
-
-       ndomains = cap_ndoms(iommu->cap);
-       pr_debug("IOMMU %d: Number of Domains supportd <%ld>\n", iommu->seq_id,
-                       ndomains);
-       nlongs = BITS_TO_LONGS(ndomains);
-
-       spin_lock_init(&iommu->lock);
-
-       /* TBD: there might be 64K domains,
-        * consider other allocation for future chip
-        */
-       iommu->domain_ids = kcalloc(nlongs, sizeof(unsigned long), GFP_KERNEL);
-       if (!iommu->domain_ids) {
-               printk(KERN_ERR "Allocating domain id array failed\n");
-               return -ENOMEM;
-       }
-       iommu->domains = kcalloc(ndomains, sizeof(struct dmar_domain *),
-                       GFP_KERNEL);
-       if (!iommu->domains) {
-               printk(KERN_ERR "Allocating domain array failed\n");
-               return -ENOMEM;
-       }
-
-       /*
-        * if Caching mode is set, then invalid translations are tagged
-        * with domainid 0. Hence we need to pre-allocate it.
-        */
-       if (cap_caching_mode(iommu->cap))
-               set_bit(0, iommu->domain_ids);
-       return 0;
-}
-
-
-static void domain_exit(struct dmar_domain *domain);
-static void vm_domain_exit(struct dmar_domain *domain);
-
-void free_dmar_iommu(struct intel_iommu *iommu)
-{
-       struct dmar_domain *domain;
-       int i;
-       unsigned long flags;
-
-       if ((iommu->domains) && (iommu->domain_ids)) {
-               for_each_set_bit(i, iommu->domain_ids, cap_ndoms(iommu->cap)) {
-                       domain = iommu->domains[i];
-                       clear_bit(i, iommu->domain_ids);
-
-                       spin_lock_irqsave(&domain->iommu_lock, flags);
-                       if (--domain->iommu_count == 0) {
-                               if (domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE)
-                                       vm_domain_exit(domain);
-                               else
-                                       domain_exit(domain);
-                       }
-                       spin_unlock_irqrestore(&domain->iommu_lock, flags);
-               }
-       }
-
-       if (iommu->gcmd & DMA_GCMD_TE)
-               iommu_disable_translation(iommu);
-
-       if (iommu->irq) {
-               irq_set_handler_data(iommu->irq, NULL);
-               /* This will mask the irq */
-               free_irq(iommu->irq, iommu);
-               destroy_irq(iommu->irq);
-       }
-
-       kfree(iommu->domains);
-       kfree(iommu->domain_ids);
-
-       g_iommus[iommu->seq_id] = NULL;
-
-       /* if all iommus are freed, free g_iommus */
-       for (i = 0; i < g_num_of_iommus; i++) {
-               if (g_iommus[i])
-                       break;
-       }
-
-       if (i == g_num_of_iommus)
-               kfree(g_iommus);
-
-       /* free context mapping */
-       free_context_table(iommu);
-}
-
-static struct dmar_domain *alloc_domain(void)
-{
-       struct dmar_domain *domain;
-
-       domain = alloc_domain_mem();
-       if (!domain)
-               return NULL;
-
-       domain->nid = -1;
-       memset(&domain->iommu_bmp, 0, sizeof(unsigned long));
-       domain->flags = 0;
-
-       return domain;
-}
-
-static int iommu_attach_domain(struct dmar_domain *domain,
-                              struct intel_iommu *iommu)
-{
-       int num;
-       unsigned long ndomains;
-       unsigned long flags;
-
-       ndomains = cap_ndoms(iommu->cap);
-
-       spin_lock_irqsave(&iommu->lock, flags);
-
-       num = find_first_zero_bit(iommu->domain_ids, ndomains);
-       if (num >= ndomains) {
-               spin_unlock_irqrestore(&iommu->lock, flags);
-               printk(KERN_ERR "IOMMU: no free domain ids\n");
-               return -ENOMEM;
-       }
-
-       domain->id = num;
-       set_bit(num, iommu->domain_ids);
-       set_bit(iommu->seq_id, &domain->iommu_bmp);
-       iommu->domains[num] = domain;
-       spin_unlock_irqrestore(&iommu->lock, flags);
-
-       return 0;
-}
-
-static void iommu_detach_domain(struct dmar_domain *domain,
-                               struct intel_iommu *iommu)
-{
-       unsigned long flags;
-       int num, ndomains;
-       int found = 0;
-
-       spin_lock_irqsave(&iommu->lock, flags);
-       ndomains = cap_ndoms(iommu->cap);
-       for_each_set_bit(num, iommu->domain_ids, ndomains) {
-               if (iommu->domains[num] == domain) {
-                       found = 1;
-                       break;
-               }
-       }
-
-       if (found) {
-               clear_bit(num, iommu->domain_ids);
-               clear_bit(iommu->seq_id, &domain->iommu_bmp);
-               iommu->domains[num] = NULL;
-       }
-       spin_unlock_irqrestore(&iommu->lock, flags);
-}
-
-static struct iova_domain reserved_iova_list;
-static struct lock_class_key reserved_rbtree_key;
-
-static int dmar_init_reserved_ranges(void)
-{
-       struct pci_dev *pdev = NULL;
-       struct iova *iova;
-       int i;
-
-       init_iova_domain(&reserved_iova_list, DMA_32BIT_PFN);
-
-       lockdep_set_class(&reserved_iova_list.iova_rbtree_lock,
-               &reserved_rbtree_key);
-
-       /* IOAPIC ranges shouldn't be accessed by DMA */
-       iova = reserve_iova(&reserved_iova_list, IOVA_PFN(IOAPIC_RANGE_START),
-               IOVA_PFN(IOAPIC_RANGE_END));
-       if (!iova) {
-               printk(KERN_ERR "Reserve IOAPIC range failed\n");
-               return -ENODEV;
-       }
-
-       /* Reserve all PCI MMIO to avoid peer-to-peer access */
-       for_each_pci_dev(pdev) {
-               struct resource *r;
-
-               for (i = 0; i < PCI_NUM_RESOURCES; i++) {
-                       r = &pdev->resource[i];
-                       if (!r->flags || !(r->flags & IORESOURCE_MEM))
-                               continue;
-                       iova = reserve_iova(&reserved_iova_list,
-                                           IOVA_PFN(r->start),
-                                           IOVA_PFN(r->end));
-                       if (!iova) {
-                               printk(KERN_ERR "Reserve iova failed\n");
-                               return -ENODEV;
-                       }
-               }
-       }
-       return 0;
-}
-
-static void domain_reserve_special_ranges(struct dmar_domain *domain)
-{
-       copy_reserved_iova(&reserved_iova_list, &domain->iovad);
-}
-
-static inline int guestwidth_to_adjustwidth(int gaw)
-{
-       int agaw;
-       int r = (gaw - 12) % 9;
-
-       if (r == 0)
-               agaw = gaw;
-       else
-               agaw = gaw + 9 - r;
-       if (agaw > 64)
-               agaw = 64;
-       return agaw;
-}
-
-static int domain_init(struct dmar_domain *domain, int guest_width)
-{
-       struct intel_iommu *iommu;
-       int adjust_width, agaw;
-       unsigned long sagaw;
-
-       init_iova_domain(&domain->iovad, DMA_32BIT_PFN);
-       spin_lock_init(&domain->iommu_lock);
-
-       domain_reserve_special_ranges(domain);
-
-       /* calculate AGAW */
-       iommu = domain_get_iommu(domain);
-       if (guest_width > cap_mgaw(iommu->cap))
-               guest_width = cap_mgaw(iommu->cap);
-       domain->gaw = guest_width;
-       adjust_width = guestwidth_to_adjustwidth(guest_width);
-       agaw = width_to_agaw(adjust_width);
-       sagaw = cap_sagaw(iommu->cap);
-       if (!test_bit(agaw, &sagaw)) {
-               /* hardware doesn't support it, choose a bigger one */
-               pr_debug("IOMMU: hardware doesn't support agaw %d\n", agaw);
-               agaw = find_next_bit(&sagaw, 5, agaw);
-               if (agaw >= 5)
-                       return -ENODEV;
-       }
-       domain->agaw = agaw;
-       INIT_LIST_HEAD(&domain->devices);
-
-       if (ecap_coherent(iommu->ecap))
-               domain->iommu_coherency = 1;
-       else
-               domain->iommu_coherency = 0;
-
-       if (ecap_sc_support(iommu->ecap))
-               domain->iommu_snooping = 1;
-       else
-               domain->iommu_snooping = 0;
-
-       domain->iommu_superpage = fls(cap_super_page_val(iommu->cap));
-       domain->iommu_count = 1;
-       domain->nid = iommu->node;
-
-       /* always allocate the top pgd */
-       domain->pgd = (struct dma_pte *)alloc_pgtable_page(domain->nid);
-       if (!domain->pgd)
-               return -ENOMEM;
-       __iommu_flush_cache(iommu, domain->pgd, PAGE_SIZE);
-       return 0;
-}
-
-static void domain_exit(struct dmar_domain *domain)
-{
-       struct dmar_drhd_unit *drhd;
-       struct intel_iommu *iommu;
-
-       /* Domain 0 is reserved, so dont process it */
-       if (!domain)
-               return;
-
-       /* Flush any lazy unmaps that may reference this domain */
-       if (!intel_iommu_strict)
-               flush_unmaps_timeout(0);
-
-       domain_remove_dev_info(domain);
-       /* destroy iovas */
-       put_iova_domain(&domain->iovad);
-
-       /* clear ptes */
-       dma_pte_clear_range(domain, 0, DOMAIN_MAX_PFN(domain->gaw));
-
-       /* free page tables */
-       dma_pte_free_pagetable(domain, 0, DOMAIN_MAX_PFN(domain->gaw));
-
-       for_each_active_iommu(iommu, drhd)
-               if (test_bit(iommu->seq_id, &domain->iommu_bmp))
-                       iommu_detach_domain(domain, iommu);
-
-       free_domain_mem(domain);
-}
-
-static int domain_context_mapping_one(struct dmar_domain *domain, int segment,
-                                u8 bus, u8 devfn, int translation)
-{
-       struct context_entry *context;
-       unsigned long flags;
-       struct intel_iommu *iommu;
-       struct dma_pte *pgd;
-       unsigned long num;
-       unsigned long ndomains;
-       int id;
-       int agaw;
-       struct device_domain_info *info = NULL;
-
-       pr_debug("Set context mapping for %02x:%02x.%d\n",
-               bus, PCI_SLOT(devfn), PCI_FUNC(devfn));
-
-       BUG_ON(!domain->pgd);
-       BUG_ON(translation != CONTEXT_TT_PASS_THROUGH &&
-              translation != CONTEXT_TT_MULTI_LEVEL);
-
-       iommu = device_to_iommu(segment, bus, devfn);
-       if (!iommu)
-               return -ENODEV;
-
-       context = device_to_context_entry(iommu, bus, devfn);
-       if (!context)
-               return -ENOMEM;
-       spin_lock_irqsave(&iommu->lock, flags);
-       if (context_present(context)) {
-               spin_unlock_irqrestore(&iommu->lock, flags);
-               return 0;
-       }
-
-       id = domain->id;
-       pgd = domain->pgd;
-
-       if (domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE ||
-           domain->flags & DOMAIN_FLAG_STATIC_IDENTITY) {
-               int found = 0;
-
-               /* find an available domain id for this device in iommu */
-               ndomains = cap_ndoms(iommu->cap);
-               for_each_set_bit(num, iommu->domain_ids, ndomains) {
-                       if (iommu->domains[num] == domain) {
-                               id = num;
-                               found = 1;
-                               break;
-                       }
-               }
-
-               if (found == 0) {
-                       num = find_first_zero_bit(iommu->domain_ids, ndomains);
-                       if (num >= ndomains) {
-                               spin_unlock_irqrestore(&iommu->lock, flags);
-                               printk(KERN_ERR "IOMMU: no free domain ids\n");
-                               return -EFAULT;
-                       }
-
-                       set_bit(num, iommu->domain_ids);
-                       iommu->domains[num] = domain;
-                       id = num;
-               }
-
-               /* Skip top levels of page tables for
-                * iommu which has less agaw than default.
-                * Unnecessary for PT mode.
-                */
-               if (translation != CONTEXT_TT_PASS_THROUGH) {
-                       for (agaw = domain->agaw; agaw != iommu->agaw; agaw--) {
-                               pgd = phys_to_virt(dma_pte_addr(pgd));
-                               if (!dma_pte_present(pgd)) {
-                                       spin_unlock_irqrestore(&iommu->lock, flags);
-                                       return -ENOMEM;
-                               }
-                       }
-               }
-       }
-
-       context_set_domain_id(context, id);
-
-       if (translation != CONTEXT_TT_PASS_THROUGH) {
-               info = iommu_support_dev_iotlb(domain, segment, bus, devfn);
-               translation = info ? CONTEXT_TT_DEV_IOTLB :
-                                    CONTEXT_TT_MULTI_LEVEL;
-       }
-       /*
-        * In pass through mode, AW must be programmed to indicate the largest
-        * AGAW value supported by hardware. And ASR is ignored by hardware.
-        */
-       if (unlikely(translation == CONTEXT_TT_PASS_THROUGH))
-               context_set_address_width(context, iommu->msagaw);
-       else {
-               context_set_address_root(context, virt_to_phys(pgd));
-               context_set_address_width(context, iommu->agaw);
-       }
-
-       context_set_translation_type(context, translation);
-       context_set_fault_enable(context);
-       context_set_present(context);
-       domain_flush_cache(domain, context, sizeof(*context));
-
-       /*
-        * It's a non-present to present mapping. If hardware doesn't cache
-        * non-present entry we only need to flush the write-buffer. If the
-        * _does_ cache non-present entries, then it does so in the special
-        * domain #0, which we have to flush:
-        */
-       if (cap_caching_mode(iommu->cap)) {
-               iommu->flush.flush_context(iommu, 0,
-                                          (((u16)bus) << 8) | devfn,
-                                          DMA_CCMD_MASK_NOBIT,
-                                          DMA_CCMD_DEVICE_INVL);
-               iommu->flush.flush_iotlb(iommu, domain->id, 0, 0, DMA_TLB_DSI_FLUSH);
-       } else {
-               iommu_flush_write_buffer(iommu);
-       }
-       iommu_enable_dev_iotlb(info);
-       spin_unlock_irqrestore(&iommu->lock, flags);
-
-       spin_lock_irqsave(&domain->iommu_lock, flags);
-       if (!test_and_set_bit(iommu->seq_id, &domain->iommu_bmp)) {
-               domain->iommu_count++;
-               if (domain->iommu_count == 1)
-                       domain->nid = iommu->node;
-               domain_update_iommu_cap(domain);
-       }
-       spin_unlock_irqrestore(&domain->iommu_lock, flags);
-       return 0;
-}
-
-static int
-domain_context_mapping(struct dmar_domain *domain, struct pci_dev *pdev,
-                       int translation)
-{
-       int ret;
-       struct pci_dev *tmp, *parent;
-
-       ret = domain_context_mapping_one(domain, pci_domain_nr(pdev->bus),
-                                        pdev->bus->number, pdev->devfn,
-                                        translation);
-       if (ret)
-               return ret;
-
-       /* dependent device mapping */
-       tmp = pci_find_upstream_pcie_bridge(pdev);
-       if (!tmp)
-               return 0;
-       /* Secondary interface's bus number and devfn 0 */
-       parent = pdev->bus->self;
-       while (parent != tmp) {
-               ret = domain_context_mapping_one(domain,
-                                                pci_domain_nr(parent->bus),
-                                                parent->bus->number,
-                                                parent->devfn, translation);
-               if (ret)
-                       return ret;
-               parent = parent->bus->self;
-       }
-       if (pci_is_pcie(tmp)) /* this is a PCIe-to-PCI bridge */
-               return domain_context_mapping_one(domain,
-                                       pci_domain_nr(tmp->subordinate),
-                                       tmp->subordinate->number, 0,
-                                       translation);
-       else /* this is a legacy PCI bridge */
-               return domain_context_mapping_one(domain,
-                                                 pci_domain_nr(tmp->bus),
-                                                 tmp->bus->number,
-                                                 tmp->devfn,
-                                                 translation);
-}
-
-static int domain_context_mapped(struct pci_dev *pdev)
-{
-       int ret;
-       struct pci_dev *tmp, *parent;
-       struct intel_iommu *iommu;
-
-       iommu = device_to_iommu(pci_domain_nr(pdev->bus), pdev->bus->number,
-                               pdev->devfn);
-       if (!iommu)
-               return -ENODEV;
-
-       ret = device_context_mapped(iommu, pdev->bus->number, pdev->devfn);
-       if (!ret)
-               return ret;
-       /* dependent device mapping */
-       tmp = pci_find_upstream_pcie_bridge(pdev);
-       if (!tmp)
-               return ret;
-       /* Secondary interface's bus number and devfn 0 */
-       parent = pdev->bus->self;
-       while (parent != tmp) {
-               ret = device_context_mapped(iommu, parent->bus->number,
-                                           parent->devfn);
-               if (!ret)
-                       return ret;
-               parent = parent->bus->self;
-       }
-       if (pci_is_pcie(tmp))
-               return device_context_mapped(iommu, tmp->subordinate->number,
-                                            0);
-       else
-               return device_context_mapped(iommu, tmp->bus->number,
-                                            tmp->devfn);
-}
-
-/* Returns a number of VTD pages, but aligned to MM page size */
-static inline unsigned long aligned_nrpages(unsigned long host_addr,
-                                           size_t size)
-{
-       host_addr &= ~PAGE_MASK;
-       return PAGE_ALIGN(host_addr + size) >> VTD_PAGE_SHIFT;
-}
-
-/* Return largest possible superpage level for a given mapping */
-static inline int hardware_largepage_caps(struct dmar_domain *domain,
-                                         unsigned long iov_pfn,
-                                         unsigned long phy_pfn,
-                                         unsigned long pages)
-{
-       int support, level = 1;
-       unsigned long pfnmerge;
-
-       support = domain->iommu_superpage;
-
-       /* To use a large page, the virtual *and* physical addresses
-          must be aligned to 2MiB/1GiB/etc. Lower bits set in either
-          of them will mean we have to use smaller pages. So just
-          merge them and check both at once. */
-       pfnmerge = iov_pfn | phy_pfn;
-
-       while (support && !(pfnmerge & ~VTD_STRIDE_MASK)) {
-               pages >>= VTD_STRIDE_SHIFT;
-               if (!pages)
-                       break;
-               pfnmerge >>= VTD_STRIDE_SHIFT;
-               level++;
-               support--;
-       }
-       return level;
-}
-
-static int __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
-                           struct scatterlist *sg, unsigned long phys_pfn,
-                           unsigned long nr_pages, int prot)
-{
-       struct dma_pte *first_pte = NULL, *pte = NULL;
-       phys_addr_t uninitialized_var(pteval);
-       int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT;
-       unsigned long sg_res;
-       unsigned int largepage_lvl = 0;
-       unsigned long lvl_pages = 0;
-
-       BUG_ON(addr_width < BITS_PER_LONG && (iov_pfn + nr_pages - 1) >> addr_width);
-
-       if ((prot & (DMA_PTE_READ|DMA_PTE_WRITE)) == 0)
-               return -EINVAL;
-
-       prot &= DMA_PTE_READ | DMA_PTE_WRITE | DMA_PTE_SNP;
-
-       if (sg)
-               sg_res = 0;
-       else {
-               sg_res = nr_pages + 1;
-               pteval = ((phys_addr_t)phys_pfn << VTD_PAGE_SHIFT) | prot;
-       }
-
-       while (nr_pages > 0) {
-               uint64_t tmp;
-
-               if (!sg_res) {
-                       sg_res = aligned_nrpages(sg->offset, sg->length);
-                       sg->dma_address = ((dma_addr_t)iov_pfn << VTD_PAGE_SHIFT) + sg->offset;
-                       sg->dma_length = sg->length;
-                       pteval = page_to_phys(sg_page(sg)) | prot;
-                       phys_pfn = pteval >> VTD_PAGE_SHIFT;
-               }
-
-               if (!pte) {
-                       largepage_lvl = hardware_largepage_caps(domain, iov_pfn, phys_pfn, sg_res);
-
-                       first_pte = pte = pfn_to_dma_pte(domain, iov_pfn, largepage_lvl);
-                       if (!pte)
-                               return -ENOMEM;
-                       /* It is large page*/
-                       if (largepage_lvl > 1)
-                               pteval |= DMA_PTE_LARGE_PAGE;
-                       else
-                               pteval &= ~(uint64_t)DMA_PTE_LARGE_PAGE;
-
-               }
-               /* We don't need lock here, nobody else
-                * touches the iova range
-                */
-               tmp = cmpxchg64_local(&pte->val, 0ULL, pteval);
-               if (tmp) {
-                       static int dumps = 5;
-                       printk(KERN_CRIT "ERROR: DMA PTE for vPFN 0x%lx already set (to %llx not %llx)\n",
-                              iov_pfn, tmp, (unsigned long long)pteval);
-                       if (dumps) {
-                               dumps--;
-                               debug_dma_dump_mappings(NULL);
-                       }
-                       WARN_ON(1);
-               }
-
-               lvl_pages = lvl_to_nr_pages(largepage_lvl);
-
-               BUG_ON(nr_pages < lvl_pages);
-               BUG_ON(sg_res < lvl_pages);
-
-               nr_pages -= lvl_pages;
-               iov_pfn += lvl_pages;
-               phys_pfn += lvl_pages;
-               pteval += lvl_pages * VTD_PAGE_SIZE;
-               sg_res -= lvl_pages;
-
-               /* If the next PTE would be the first in a new page, then we
-                  need to flush the cache on the entries we've just written.
-                  And then we'll need to recalculate 'pte', so clear it and
-                  let it get set again in the if (!pte) block above.
-
-                  If we're done (!nr_pages) we need to flush the cache too.
-
-                  Also if we've been setting superpages, we may need to
-                  recalculate 'pte' and switch back to smaller pages for the
-                  end of the mapping, if the trailing size is not enough to
-                  use another superpage (i.e. sg_res < lvl_pages). */
-               pte++;
-               if (!nr_pages || first_pte_in_page(pte) ||
-                   (largepage_lvl > 1 && sg_res < lvl_pages)) {
-                       domain_flush_cache(domain, first_pte,
-                                          (void *)pte - (void *)first_pte);
-                       pte = NULL;
-               }
-
-               if (!sg_res && nr_pages)
-                       sg = sg_next(sg);
-       }
-       return 0;
-}
-
-static inline int domain_sg_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
-                                   struct scatterlist *sg, unsigned long nr_pages,
-                                   int prot)
-{
-       return __domain_mapping(domain, iov_pfn, sg, 0, nr_pages, prot);
-}
-
-static inline int domain_pfn_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
-                                    unsigned long phys_pfn, unsigned long nr_pages,
-                                    int prot)
-{
-       return __domain_mapping(domain, iov_pfn, NULL, phys_pfn, nr_pages, prot);
-}
-
-static void iommu_detach_dev(struct intel_iommu *iommu, u8 bus, u8 devfn)
-{
-       if (!iommu)
-               return;
-
-       clear_context_table(iommu, bus, devfn);
-       iommu->flush.flush_context(iommu, 0, 0, 0,
-                                          DMA_CCMD_GLOBAL_INVL);
-       iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
-}
-
-static void domain_remove_dev_info(struct dmar_domain *domain)
-{
-       struct device_domain_info *info;
-       unsigned long flags;
-       struct intel_iommu *iommu;
-
-       spin_lock_irqsave(&device_domain_lock, flags);
-       while (!list_empty(&domain->devices)) {
-               info = list_entry(domain->devices.next,
-                       struct device_domain_info, link);
-               list_del(&info->link);
-               list_del(&info->global);
-               if (info->dev)
-                       info->dev->dev.archdata.iommu = NULL;
-               spin_unlock_irqrestore(&device_domain_lock, flags);
-
-               iommu_disable_dev_iotlb(info);
-               iommu = device_to_iommu(info->segment, info->bus, info->devfn);
-               iommu_detach_dev(iommu, info->bus, info->devfn);
-               free_devinfo_mem(info);
-
-               spin_lock_irqsave(&device_domain_lock, flags);
-       }
-       spin_unlock_irqrestore(&device_domain_lock, flags);
-}
-
-/*
- * find_domain
- * Note: we use struct pci_dev->dev.archdata.iommu stores the info
- */
-static struct dmar_domain *
-find_domain(struct pci_dev *pdev)
-{
-       struct device_domain_info *info;
-
-       /* No lock here, assumes no domain exit in normal case */
-       info = pdev->dev.archdata.iommu;
-       if (info)
-               return info->domain;
-       return NULL;
-}
-
-/* domain is initialized */
-static struct dmar_domain *get_domain_for_dev(struct pci_dev *pdev, int gaw)
-{
-       struct dmar_domain *domain, *found = NULL;
-       struct intel_iommu *iommu;
-       struct dmar_drhd_unit *drhd;
-       struct device_domain_info *info, *tmp;
-       struct pci_dev *dev_tmp;
-       unsigned long flags;
-       int bus = 0, devfn = 0;
-       int segment;
-       int ret;
-
-       domain = find_domain(pdev);
-       if (domain)
-               return domain;
-
-       segment = pci_domain_nr(pdev->bus);
-
-       dev_tmp = pci_find_upstream_pcie_bridge(pdev);
-       if (dev_tmp) {
-               if (pci_is_pcie(dev_tmp)) {
-                       bus = dev_tmp->subordinate->number;
-                       devfn = 0;
-               } else {
-                       bus = dev_tmp->bus->number;
-                       devfn = dev_tmp->devfn;
-               }
-               spin_lock_irqsave(&device_domain_lock, flags);
-               list_for_each_entry(info, &device_domain_list, global) {
-                       if (info->segment == segment &&
-                           info->bus == bus && info->devfn == devfn) {
-                               found = info->domain;
-                               break;
-                       }
-               }
-               spin_unlock_irqrestore(&device_domain_lock, flags);
-               /* pcie-pci bridge already has a domain, uses it */
-               if (found) {
-                       domain = found;
-                       goto found_domain;
-               }
-       }
-
-       domain = alloc_domain();
-       if (!domain)
-               goto error;
-
-       /* Allocate new domain for the device */
-       drhd = dmar_find_matched_drhd_unit(pdev);
-       if (!drhd) {
-               printk(KERN_ERR "IOMMU: can't find DMAR for device %s\n",
-                       pci_name(pdev));
-               return NULL;
-       }
-       iommu = drhd->iommu;
-
-       ret = iommu_attach_domain(domain, iommu);
-       if (ret) {
-               free_domain_mem(domain);
-               goto error;
-       }
-
-       if (domain_init(domain, gaw)) {
-               domain_exit(domain);
-               goto error;
-       }
-
-       /* register pcie-to-pci device */
-       if (dev_tmp) {
-               info = alloc_devinfo_mem();
-               if (!info) {
-                       domain_exit(domain);
-                       goto error;
-               }
-               info->segment = segment;
-               info->bus = bus;
-               info->devfn = devfn;
-               info->dev = NULL;
-               info->domain = domain;
-               /* This domain is shared by devices under p2p bridge */
-               domain->flags |= DOMAIN_FLAG_P2P_MULTIPLE_DEVICES;
-
-               /* pcie-to-pci bridge already has a domain, uses it */
-               found = NULL;
-               spin_lock_irqsave(&device_domain_lock, flags);
-               list_for_each_entry(tmp, &device_domain_list, global) {
-                       if (tmp->segment == segment &&
-                           tmp->bus == bus && tmp->devfn == devfn) {
-                               found = tmp->domain;
-                               break;
-                       }
-               }
-               if (found) {
-                       spin_unlock_irqrestore(&device_domain_lock, flags);
-                       free_devinfo_mem(info);
-                       domain_exit(domain);
-                       domain = found;
-               } else {
-                       list_add(&info->link, &domain->devices);
-                       list_add(&info->global, &device_domain_list);
-                       spin_unlock_irqrestore(&device_domain_lock, flags);
-               }
-       }
-
-found_domain:
-       info = alloc_devinfo_mem();
-       if (!info)
-               goto error;
-       info->segment = segment;
-       info->bus = pdev->bus->number;
-       info->devfn = pdev->devfn;
-       info->dev = pdev;
-       info->domain = domain;
-       spin_lock_irqsave(&device_domain_lock, flags);
-       /* somebody is fast */
-       found = find_domain(pdev);
-       if (found != NULL) {
-               spin_unlock_irqrestore(&device_domain_lock, flags);
-               if (found != domain) {
-                       domain_exit(domain);
-                       domain = found;
-               }
-               free_devinfo_mem(info);
-               return domain;
-       }
-       list_add(&info->link, &domain->devices);
-       list_add(&info->global, &device_domain_list);
-       pdev->dev.archdata.iommu = info;
-       spin_unlock_irqrestore(&device_domain_lock, flags);
-       return domain;
-error:
-       /* recheck it here, maybe others set it */
-       return find_domain(pdev);
-}
-
-static int iommu_identity_mapping;
-#define IDENTMAP_ALL           1
-#define IDENTMAP_GFX           2
-#define IDENTMAP_AZALIA                4
-
-static int iommu_domain_identity_map(struct dmar_domain *domain,
-                                    unsigned long long start,
-                                    unsigned long long end)
-{
-       unsigned long first_vpfn = start >> VTD_PAGE_SHIFT;
-       unsigned long last_vpfn = end >> VTD_PAGE_SHIFT;
-
-       if (!reserve_iova(&domain->iovad, dma_to_mm_pfn(first_vpfn),
-                         dma_to_mm_pfn(last_vpfn))) {
-               printk(KERN_ERR "IOMMU: reserve iova failed\n");
-               return -ENOMEM;
-       }
-
-       pr_debug("Mapping reserved region %llx-%llx for domain %d\n",
-                start, end, domain->id);
-       /*
-        * RMRR range might have overlap with physical memory range,
-        * clear it first
-        */
-       dma_pte_clear_range(domain, first_vpfn, last_vpfn);
-
-       return domain_pfn_mapping(domain, first_vpfn, first_vpfn,
-                                 last_vpfn - first_vpfn + 1,
-                                 DMA_PTE_READ|DMA_PTE_WRITE);
-}
-
-static int iommu_prepare_identity_map(struct pci_dev *pdev,
-                                     unsigned long long start,
-                                     unsigned long long end)
-{
-       struct dmar_domain *domain;
-       int ret;
-
-       domain = get_domain_for_dev(pdev, DEFAULT_DOMAIN_ADDRESS_WIDTH);
-       if (!domain)
-               return -ENOMEM;
-
-       /* For _hardware_ passthrough, don't bother. But for software
-          passthrough, we do it anyway -- it may indicate a memory
-          range which is reserved in E820, so which didn't get set
-          up to start with in si_domain */
-       if (domain == si_domain && hw_pass_through) {
-               printk("Ignoring identity map for HW passthrough device %s [0x%Lx - 0x%Lx]\n",
-                      pci_name(pdev), start, end);
-               return 0;
-       }
-
-       printk(KERN_INFO
-              "IOMMU: Setting identity map for device %s [0x%Lx - 0x%Lx]\n",
-              pci_name(pdev), start, end);
-       
-       if (end < start) {
-               WARN(1, "Your BIOS is broken; RMRR ends before it starts!\n"
-                       "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
-                       dmi_get_system_info(DMI_BIOS_VENDOR),
-                       dmi_get_system_info(DMI_BIOS_VERSION),
-                    dmi_get_system_info(DMI_PRODUCT_VERSION));
-               ret = -EIO;
-               goto error;
-       }
-
-       if (end >> agaw_to_width(domain->agaw)) {
-               WARN(1, "Your BIOS is broken; RMRR exceeds permitted address width (%d bits)\n"
-                    "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
-                    agaw_to_width(domain->agaw),
-                    dmi_get_system_info(DMI_BIOS_VENDOR),
-                    dmi_get_system_info(DMI_BIOS_VERSION),
-                    dmi_get_system_info(DMI_PRODUCT_VERSION));
-               ret = -EIO;
-               goto error;
-       }
-
-       ret = iommu_domain_identity_map(domain, start, end);
-       if (ret)
-               goto error;
-
-       /* context entry init */
-       ret = domain_context_mapping(domain, pdev, CONTEXT_TT_MULTI_LEVEL);
-       if (ret)
-               goto error;
-
-       return 0;
-
- error:
-       domain_exit(domain);
-       return ret;
-}
-
-static inline int iommu_prepare_rmrr_dev(struct dmar_rmrr_unit *rmrr,
-       struct pci_dev *pdev)
-{
-       if (pdev->dev.archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO)
-               return 0;
-       return iommu_prepare_identity_map(pdev, rmrr->base_address,
-               rmrr->end_address);
-}
-
-#ifdef CONFIG_DMAR_FLOPPY_WA
-static inline void iommu_prepare_isa(void)
-{
-       struct pci_dev *pdev;
-       int ret;
-
-       pdev = pci_get_class(PCI_CLASS_BRIDGE_ISA << 8, NULL);
-       if (!pdev)
-               return;
-
-       printk(KERN_INFO "IOMMU: Prepare 0-16MiB unity mapping for LPC\n");
-       ret = iommu_prepare_identity_map(pdev, 0, 16*1024*1024 - 1);
-
-       if (ret)
-               printk(KERN_ERR "IOMMU: Failed to create 0-16MiB identity map; "
-                      "floppy might not work\n");
-
-}
-#else
-static inline void iommu_prepare_isa(void)
-{
-       return;
-}
-#endif /* !CONFIG_DMAR_FLPY_WA */
-
-static int md_domain_init(struct dmar_domain *domain, int guest_width);
-
-static int __init si_domain_work_fn(unsigned long start_pfn,
-                                   unsigned long end_pfn, void *datax)
-{
-       int *ret = datax;
-
-       *ret = iommu_domain_identity_map(si_domain,
-                                        (uint64_t)start_pfn << PAGE_SHIFT,
-                                        (uint64_t)end_pfn << PAGE_SHIFT);
-       return *ret;
-
-}
-
-static int __init si_domain_init(int hw)
-{
-       struct dmar_drhd_unit *drhd;
-       struct intel_iommu *iommu;
-       int nid, ret = 0;
-
-       si_domain = alloc_domain();
-       if (!si_domain)
-               return -EFAULT;
-
-       pr_debug("Identity mapping domain is domain %d\n", si_domain->id);
-
-       for_each_active_iommu(iommu, drhd) {
-               ret = iommu_attach_domain(si_domain, iommu);
-               if (ret) {
-                       domain_exit(si_domain);
-                       return -EFAULT;
-               }
-       }
-
-       if (md_domain_init(si_domain, DEFAULT_DOMAIN_ADDRESS_WIDTH)) {
-               domain_exit(si_domain);
-               return -EFAULT;
-       }
-
-       si_domain->flags = DOMAIN_FLAG_STATIC_IDENTITY;
-
-       if (hw)
-               return 0;
-
-       for_each_online_node(nid) {
-               work_with_active_regions(nid, si_domain_work_fn, &ret);
-               if (ret)
-                       return ret;
-       }
-
-       return 0;
-}
-
-static void domain_remove_one_dev_info(struct dmar_domain *domain,
-                                         struct pci_dev *pdev);
-static int identity_mapping(struct pci_dev *pdev)
-{
-       struct device_domain_info *info;
-
-       if (likely(!iommu_identity_mapping))
-               return 0;
-
-       info = pdev->dev.archdata.iommu;
-       if (info && info != DUMMY_DEVICE_DOMAIN_INFO)
-               return (info->domain == si_domain);
-
-       return 0;
-}
-
-static int domain_add_dev_info(struct dmar_domain *domain,
-                              struct pci_dev *pdev,
-                              int translation)
-{
-       struct device_domain_info *info;
-       unsigned long flags;
-       int ret;
-
-       info = alloc_devinfo_mem();
-       if (!info)
-               return -ENOMEM;
-
-       ret = domain_context_mapping(domain, pdev, translation);
-       if (ret) {
-               free_devinfo_mem(info);
-               return ret;
-       }
-
-       info->segment = pci_domain_nr(pdev->bus);
-       info->bus = pdev->bus->number;
-       info->devfn = pdev->devfn;
-       info->dev = pdev;
-       info->domain = domain;
-
-       spin_lock_irqsave(&device_domain_lock, flags);
-       list_add(&info->link, &domain->devices);
-       list_add(&info->global, &device_domain_list);
-       pdev->dev.archdata.iommu = info;
-       spin_unlock_irqrestore(&device_domain_lock, flags);
-
-       return 0;
-}
-
-static int iommu_should_identity_map(struct pci_dev *pdev, int startup)
-{
-       if ((iommu_identity_mapping & IDENTMAP_AZALIA) && IS_AZALIA(pdev))
-               return 1;
-
-       if ((iommu_identity_mapping & IDENTMAP_GFX) && IS_GFX_DEVICE(pdev))
-               return 1;
-
-       if (!(iommu_identity_mapping & IDENTMAP_ALL))
-               return 0;
-
-       /*
-        * We want to start off with all devices in the 1:1 domain, and
-        * take them out later if we find they can't access all of memory.
-        *
-        * However, we can't do this for PCI devices behind bridges,
-        * because all PCI devices behind the same bridge will end up
-        * with the same source-id on their transactions.
-        *
-        * Practically speaking, we can't change things around for these
-        * devices at run-time, because we can't be sure there'll be no
-        * DMA transactions in flight for any of their siblings.
-        * 
-        * So PCI devices (unless they're on the root bus) as well as
-        * their parent PCI-PCI or PCIe-PCI bridges must be left _out_ of
-        * the 1:1 domain, just in _case_ one of their siblings turns out
-        * not to be able to map all of memory.
-        */
-       if (!pci_is_pcie(pdev)) {
-               if (!pci_is_root_bus(pdev->bus))
-                       return 0;
-               if (pdev->class >> 8 == PCI_CLASS_BRIDGE_PCI)
-                       return 0;
-       } else if (pdev->pcie_type == PCI_EXP_TYPE_PCI_BRIDGE)
-               return 0;
-
-       /* 
-        * At boot time, we don't yet know if devices will be 64-bit capable.
-        * Assume that they will -- if they turn out not to be, then we can 
-        * take them out of the 1:1 domain later.
-        */
-       if (!startup) {
-               /*
-                * If the device's dma_mask is less than the system's memory
-                * size then this is not a candidate for identity mapping.
-                */
-               u64 dma_mask = pdev->dma_mask;
-
-               if (pdev->dev.coherent_dma_mask &&
-                   pdev->dev.coherent_dma_mask < dma_mask)
-                       dma_mask = pdev->dev.coherent_dma_mask;
-
-               return dma_mask >= dma_get_required_mask(&pdev->dev);
-       }
-
-       return 1;
-}
-
-static int __init iommu_prepare_static_identity_mapping(int hw)
-{
-       struct pci_dev *pdev = NULL;
-       int ret;
-
-       ret = si_domain_init(hw);
-       if (ret)
-               return -EFAULT;
-
-       for_each_pci_dev(pdev) {
-               /* Skip Host/PCI Bridge devices */
-               if (IS_BRIDGE_HOST_DEVICE(pdev))
-                       continue;
-               if (iommu_should_identity_map(pdev, 1)) {
-                       printk(KERN_INFO "IOMMU: %s identity mapping for device %s\n",
-                              hw ? "hardware" : "software", pci_name(pdev));
-
-                       ret = domain_add_dev_info(si_domain, pdev,
-                                                    hw ? CONTEXT_TT_PASS_THROUGH :
-                                                    CONTEXT_TT_MULTI_LEVEL);
-                       if (ret)
-                               return ret;
-               }
-       }
-
-       return 0;
-}
-
-static int __init init_dmars(void)
-{
-       struct dmar_drhd_unit *drhd;
-       struct dmar_rmrr_unit *rmrr;
-       struct pci_dev *pdev;
-       struct intel_iommu *iommu;
-       int i, ret;
-
-       /*
-        * for each drhd
-        *    allocate root
-        *    initialize and program root entry to not present
-        * endfor
-        */
-       for_each_drhd_unit(drhd) {
-               g_num_of_iommus++;
-               /*
-                * lock not needed as this is only incremented in the single
-                * threaded kernel __init code path all other access are read
-                * only
-                */
-       }
-
-       g_iommus = kcalloc(g_num_of_iommus, sizeof(struct intel_iommu *),
-                       GFP_KERNEL);
-       if (!g_iommus) {
-               printk(KERN_ERR "Allocating global iommu array failed\n");
-               ret = -ENOMEM;
-               goto error;
-       }
-
-       deferred_flush = kzalloc(g_num_of_iommus *
-               sizeof(struct deferred_flush_tables), GFP_KERNEL);
-       if (!deferred_flush) {
-               ret = -ENOMEM;
-               goto error;
-       }
-
-       for_each_drhd_unit(drhd) {
-               if (drhd->ignored)
-                       continue;
-
-               iommu = drhd->iommu;
-               g_iommus[iommu->seq_id] = iommu;
-
-               ret = iommu_init_domains(iommu);
-               if (ret)
-                       goto error;
-
-               /*
-                * TBD:
-                * we could share the same root & context tables
-                * among all IOMMU's. Need to Split it later.
-                */
-               ret = iommu_alloc_root_entry(iommu);
-               if (ret) {
-                       printk(KERN_ERR "IOMMU: allocate root entry failed\n");
-                       goto error;
-               }
-               if (!ecap_pass_through(iommu->ecap))
-                       hw_pass_through = 0;
-       }
-
-       /*
-        * Start from the sane iommu hardware state.
-        */
-       for_each_drhd_unit(drhd) {
-               if (drhd->ignored)
-                       continue;
-
-               iommu = drhd->iommu;
-
-               /*
-                * If the queued invalidation is already initialized by us
-                * (for example, while enabling interrupt-remapping) then
-                * we got the things already rolling from a sane state.
-                */
-               if (iommu->qi)
-                       continue;
-
-               /*
-                * Clear any previous faults.
-                */
-               dmar_fault(-1, iommu);
-               /*
-                * Disable queued invalidation if supported and already enabled
-                * before OS handover.
-                */
-               dmar_disable_qi(iommu);
-       }
-
-       for_each_drhd_unit(drhd) {
-               if (drhd->ignored)
-                       continue;
-
-               iommu = drhd->iommu;
-
-               if (dmar_enable_qi(iommu)) {
-                       /*
-                        * Queued Invalidate not enabled, use Register Based
-                        * Invalidate
-                        */
-                       iommu->flush.flush_context = __iommu_flush_context;
-                       iommu->flush.flush_iotlb = __iommu_flush_iotlb;
-                       printk(KERN_INFO "IOMMU %d 0x%Lx: using Register based "
-                              "invalidation\n",
-                               iommu->seq_id,
-                              (unsigned long long)drhd->reg_base_addr);
-               } else {
-                       iommu->flush.flush_context = qi_flush_context;
-                       iommu->flush.flush_iotlb = qi_flush_iotlb;
-                       printk(KERN_INFO "IOMMU %d 0x%Lx: using Queued "
-                              "invalidation\n",
-                               iommu->seq_id,
-                              (unsigned long long)drhd->reg_base_addr);
-               }
-       }
-
-       if (iommu_pass_through)
-               iommu_identity_mapping |= IDENTMAP_ALL;
-
-#ifdef CONFIG_DMAR_BROKEN_GFX_WA
-       iommu_identity_mapping |= IDENTMAP_GFX;
-#endif
-
-       check_tylersburg_isoch();
-
-       /*
-        * If pass through is not set or not enabled, setup context entries for
-        * identity mappings for rmrr, gfx, and isa and may fall back to static
-        * identity mapping if iommu_identity_mapping is set.
-        */
-       if (iommu_identity_mapping) {
-               ret = iommu_prepare_static_identity_mapping(hw_pass_through);
-               if (ret) {
-                       printk(KERN_CRIT "Failed to setup IOMMU pass-through\n");
-                       goto error;
-               }
-       }
-       /*
-        * For each rmrr
-        *   for each dev attached to rmrr
-        *   do
-        *     locate drhd for dev, alloc domain for dev
-        *     allocate free domain
-        *     allocate page table entries for rmrr
-        *     if context not allocated for bus
-        *           allocate and init context
-        *           set present in root table for this bus
-        *     init context with domain, translation etc
-        *    endfor
-        * endfor
-        */
-       printk(KERN_INFO "IOMMU: Setting RMRR:\n");
-       for_each_rmrr_units(rmrr) {
-               for (i = 0; i < rmrr->devices_cnt; i++) {
-                       pdev = rmrr->devices[i];
-                       /*
-                        * some BIOS lists non-exist devices in DMAR
-                        * table.
-                        */
-                       if (!pdev)
-                               continue;
-                       ret = iommu_prepare_rmrr_dev(rmrr, pdev);
-                       if (ret)
-                               printk(KERN_ERR
-                                      "IOMMU: mapping reserved region failed\n");
-               }
-       }
-
-       iommu_prepare_isa();
-
-       /*
-        * for each drhd
-        *   enable fault log
-        *   global invalidate context cache
-        *   global invalidate iotlb
-        *   enable translation
-        */
-       for_each_drhd_unit(drhd) {
-               if (drhd->ignored) {
-                       /*
-                        * we always have to disable PMRs or DMA may fail on
-                        * this device
-                        */
-                       if (force_on)
-                               iommu_disable_protect_mem_regions(drhd->iommu);
-                       continue;
-               }
-               iommu = drhd->iommu;
-
-               iommu_flush_write_buffer(iommu);
-
-               ret = dmar_set_interrupt(iommu);
-               if (ret)
-                       goto error;
-
-               iommu_set_root_entry(iommu);
-
-               iommu->flush.flush_context(iommu, 0, 0, 0, DMA_CCMD_GLOBAL_INVL);
-               iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
-
-               ret = iommu_enable_translation(iommu);
-               if (ret)
-                       goto error;
-
-               iommu_disable_protect_mem_regions(iommu);
-       }
-
-       return 0;
-error:
-       for_each_drhd_unit(drhd) {
-               if (drhd->ignored)
-                       continue;
-               iommu = drhd->iommu;
-               free_iommu(iommu);
-       }
-       kfree(g_iommus);
-       return ret;
-}
-
-/* This takes a number of _MM_ pages, not VTD pages */
-static struct iova *intel_alloc_iova(struct device *dev,
-                                    struct dmar_domain *domain,
-                                    unsigned long nrpages, uint64_t dma_mask)
-{
-       struct pci_dev *pdev = to_pci_dev(dev);
-       struct iova *iova = NULL;
-
-       /* Restrict dma_mask to the width that the iommu can handle */
-       dma_mask = min_t(uint64_t, DOMAIN_MAX_ADDR(domain->gaw), dma_mask);
-
-       if (!dmar_forcedac && dma_mask > DMA_BIT_MASK(32)) {
-               /*
-                * First try to allocate an io virtual address in
-                * DMA_BIT_MASK(32) and if that fails then try allocating
-                * from higher range
-                */
-               iova = alloc_iova(&domain->iovad, nrpages,
-                                 IOVA_PFN(DMA_BIT_MASK(32)), 1);
-               if (iova)
-                       return iova;
-       }
-       iova = alloc_iova(&domain->iovad, nrpages, IOVA_PFN(dma_mask), 1);
-       if (unlikely(!iova)) {
-               printk(KERN_ERR "Allocating %ld-page iova for %s failed",
-                      nrpages, pci_name(pdev));
-               return NULL;
-       }
-
-       return iova;
-}
-
-static struct dmar_domain *__get_valid_domain_for_dev(struct pci_dev *pdev)
-{
-       struct dmar_domain *domain;
-       int ret;
-
-       domain = get_domain_for_dev(pdev,
-                       DEFAULT_DOMAIN_ADDRESS_WIDTH);
-       if (!domain) {
-               printk(KERN_ERR
-                       "Allocating domain for %s failed", pci_name(pdev));
-               return NULL;
-       }
-
-       /* make sure context mapping is ok */
-       if (unlikely(!domain_context_mapped(pdev))) {
-               ret = domain_context_mapping(domain, pdev,
-                                            CONTEXT_TT_MULTI_LEVEL);
-               if (ret) {
-                       printk(KERN_ERR
-                               "Domain context map for %s failed",
-                               pci_name(pdev));
-                       return NULL;
-               }
-       }
-
-       return domain;
-}
-
-static inline struct dmar_domain *get_valid_domain_for_dev(struct pci_dev *dev)
-{
-       struct device_domain_info *info;
-
-       /* No lock here, assumes no domain exit in normal case */
-       info = dev->dev.archdata.iommu;
-       if (likely(info))
-               return info->domain;
-
-       return __get_valid_domain_for_dev(dev);
-}
-
-static int iommu_dummy(struct pci_dev *pdev)
-{
-       return pdev->dev.archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO;
-}
-
-/* Check if the pdev needs to go through non-identity map and unmap process.*/
-static int iommu_no_mapping(struct device *dev)
-{
-       struct pci_dev *pdev;
-       int found;
-
-       if (unlikely(dev->bus != &pci_bus_type))
-               return 1;
-
-       pdev = to_pci_dev(dev);
-       if (iommu_dummy(pdev))
-               return 1;
-
-       if (!iommu_identity_mapping)
-               return 0;
-
-       found = identity_mapping(pdev);
-       if (found) {
-               if (iommu_should_identity_map(pdev, 0))
-                       return 1;
-               else {
-                       /*
-                        * 32 bit DMA is removed from si_domain and fall back
-                        * to non-identity mapping.
-                        */
-                       domain_remove_one_dev_info(si_domain, pdev);
-                       printk(KERN_INFO "32bit %s uses non-identity mapping\n",
-                              pci_name(pdev));
-                       return 0;
-               }
-       } else {
-               /*
-                * In case of a detached 64 bit DMA device from vm, the device
-                * is put into si_domain for identity mapping.
-                */
-               if (iommu_should_identity_map(pdev, 0)) {
-                       int ret;
-                       ret = domain_add_dev_info(si_domain, pdev,
-                                                 hw_pass_through ?
-                                                 CONTEXT_TT_PASS_THROUGH :
-                                                 CONTEXT_TT_MULTI_LEVEL);
-                       if (!ret) {
-                               printk(KERN_INFO "64bit %s uses identity mapping\n",
-                                      pci_name(pdev));
-                               return 1;
-                       }
-               }
-       }
-
-       return 0;
-}
-
-static dma_addr_t __intel_map_single(struct device *hwdev, phys_addr_t paddr,
-                                    size_t size, int dir, u64 dma_mask)
-{
-       struct pci_dev *pdev = to_pci_dev(hwdev);
-       struct dmar_domain *domain;
-       phys_addr_t start_paddr;
-       struct iova *iova;
-       int prot = 0;
-       int ret;
-       struct intel_iommu *iommu;
-       unsigned long paddr_pfn = paddr >> PAGE_SHIFT;
-
-       BUG_ON(dir == DMA_NONE);
-
-       if (iommu_no_mapping(hwdev))
-               return paddr;
-
-       domain = get_valid_domain_for_dev(pdev);
-       if (!domain)
-               return 0;
-
-       iommu = domain_get_iommu(domain);
-       size = aligned_nrpages(paddr, size);
-
-       iova = intel_alloc_iova(hwdev, domain, dma_to_mm_pfn(size), dma_mask);
-       if (!iova)
-               goto error;
-
-       /*
-        * Check if DMAR supports zero-length reads on write only
-        * mappings..
-        */
-       if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL || \
-                       !cap_zlr(iommu->cap))
-               prot |= DMA_PTE_READ;
-       if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL)
-               prot |= DMA_PTE_WRITE;
-       /*
-        * paddr - (paddr + size) might be partial page, we should map the whole
-        * page.  Note: if two part of one page are separately mapped, we
-        * might have two guest_addr mapping to the same host paddr, but this
-        * is not a big problem
-        */
-       ret = domain_pfn_mapping(domain, mm_to_dma_pfn(iova->pfn_lo),
-                                mm_to_dma_pfn(paddr_pfn), size, prot);
-       if (ret)
-               goto error;
-
-       /* it's a non-present to present mapping. Only flush if caching mode */
-       if (cap_caching_mode(iommu->cap))
-               iommu_flush_iotlb_psi(iommu, domain->id, mm_to_dma_pfn(iova->pfn_lo), size, 1);
-       else
-               iommu_flush_write_buffer(iommu);
-
-       start_paddr = (phys_addr_t)iova->pfn_lo << PAGE_SHIFT;
-       start_paddr += paddr & ~PAGE_MASK;
-       return start_paddr;
-
-error:
-       if (iova)
-               __free_iova(&domain->iovad, iova);
-       printk(KERN_ERR"Device %s request: %zx@%llx dir %d --- failed\n",
-               pci_name(pdev), size, (unsigned long long)paddr, dir);
-       return 0;
-}
-
-static dma_addr_t intel_map_page(struct device *dev, struct page *page,
-                                unsigned long offset, size_t size,
-                                enum dma_data_direction dir,
-                                struct dma_attrs *attrs)
-{
-       return __intel_map_single(dev, page_to_phys(page) + offset, size,
-                                 dir, to_pci_dev(dev)->dma_mask);
-}
-
-static void flush_unmaps(void)
-{
-       int i, j;
-
-       timer_on = 0;
-
-       /* just flush them all */
-       for (i = 0; i < g_num_of_iommus; i++) {
-               struct intel_iommu *iommu = g_iommus[i];
-               if (!iommu)
-                       continue;
-
-               if (!deferred_flush[i].next)
-                       continue;
-
-               /* In caching mode, global flushes turn emulation expensive */
-               if (!cap_caching_mode(iommu->cap))
-                       iommu->flush.flush_iotlb(iommu, 0, 0, 0,
-                                        DMA_TLB_GLOBAL_FLUSH);
-               for (j = 0; j < deferred_flush[i].next; j++) {
-                       unsigned long mask;
-                       struct iova *iova = deferred_flush[i].iova[j];
-                       struct dmar_domain *domain = deferred_flush[i].domain[j];
-
-                       /* On real hardware multiple invalidations are expensive */
-                       if (cap_caching_mode(iommu->cap))
-                               iommu_flush_iotlb_psi(iommu, domain->id,
-                               iova->pfn_lo, iova->pfn_hi - iova->pfn_lo + 1, 0);
-                       else {
-                               mask = ilog2(mm_to_dma_pfn(iova->pfn_hi - iova->pfn_lo + 1));
-                               iommu_flush_dev_iotlb(deferred_flush[i].domain[j],
-                                               (uint64_t)iova->pfn_lo << PAGE_SHIFT, mask);
-                       }
-                       __free_iova(&deferred_flush[i].domain[j]->iovad, iova);
-               }
-               deferred_flush[i].next = 0;
-       }
-
-       list_size = 0;
-}
-
-static void flush_unmaps_timeout(unsigned long data)
-{
-       unsigned long flags;
-
-       spin_lock_irqsave(&async_umap_flush_lock, flags);
-       flush_unmaps();
-       spin_unlock_irqrestore(&async_umap_flush_lock, flags);
-}
-
-static void add_unmap(struct dmar_domain *dom, struct iova *iova)
-{
-       unsigned long flags;
-       int next, iommu_id;
-       struct intel_iommu *iommu;
-
-       spin_lock_irqsave(&async_umap_flush_lock, flags);
-       if (list_size == HIGH_WATER_MARK)
-               flush_unmaps();
-
-       iommu = domain_get_iommu(dom);
-       iommu_id = iommu->seq_id;
-
-       next = deferred_flush[iommu_id].next;
-       deferred_flush[iommu_id].domain[next] = dom;
-       deferred_flush[iommu_id].iova[next] = iova;
-       deferred_flush[iommu_id].next++;
-
-       if (!timer_on) {
-               mod_timer(&unmap_timer, jiffies + msecs_to_jiffies(10));
-               timer_on = 1;
-       }
-       list_size++;
-       spin_unlock_irqrestore(&async_umap_flush_lock, flags);
-}
-
-static void intel_unmap_page(struct device *dev, dma_addr_t dev_addr,
-                            size_t size, enum dma_data_direction dir,
-                            struct dma_attrs *attrs)
-{
-       struct pci_dev *pdev = to_pci_dev(dev);
-       struct dmar_domain *domain;
-       unsigned long start_pfn, last_pfn;
-       struct iova *iova;
-       struct intel_iommu *iommu;
-
-       if (iommu_no_mapping(dev))
-               return;
-
-       domain = find_domain(pdev);
-       BUG_ON(!domain);
-
-       iommu = domain_get_iommu(domain);
-
-       iova = find_iova(&domain->iovad, IOVA_PFN(dev_addr));
-       if (WARN_ONCE(!iova, "Driver unmaps unmatched page at PFN %llx\n",
-                     (unsigned long long)dev_addr))
-               return;
-
-       start_pfn = mm_to_dma_pfn(iova->pfn_lo);
-       last_pfn = mm_to_dma_pfn(iova->pfn_hi + 1) - 1;
-
-       pr_debug("Device %s unmapping: pfn %lx-%lx\n",
-                pci_name(pdev), start_pfn, last_pfn);
-
-       /*  clear the whole page */
-       dma_pte_clear_range(domain, start_pfn, last_pfn);
-
-       /* free page tables */
-       dma_pte_free_pagetable(domain, start_pfn, last_pfn);
-
-       if (intel_iommu_strict) {
-               iommu_flush_iotlb_psi(iommu, domain->id, start_pfn,
-                                     last_pfn - start_pfn + 1, 0);
-               /* free iova */
-               __free_iova(&domain->iovad, iova);
-       } else {
-               add_unmap(domain, iova);
-               /*
-                * queue up the release of the unmap to save the 1/6th of the
-                * cpu used up by the iotlb flush operation...
-                */
-       }
-}
-
-static void *intel_alloc_coherent(struct device *hwdev, size_t size,
-                                 dma_addr_t *dma_handle, gfp_t flags)
-{
-       void *vaddr;
-       int order;
-
-       size = PAGE_ALIGN(size);
-       order = get_order(size);
-
-       if (!iommu_no_mapping(hwdev))
-               flags &= ~(GFP_DMA | GFP_DMA32);
-       else if (hwdev->coherent_dma_mask < dma_get_required_mask(hwdev)) {
-               if (hwdev->coherent_dma_mask < DMA_BIT_MASK(32))
-                       flags |= GFP_DMA;
-               else
-                       flags |= GFP_DMA32;
-       }
-
-       vaddr = (void *)__get_free_pages(flags, order);
-       if (!vaddr)
-               return NULL;
-       memset(vaddr, 0, size);
-
-       *dma_handle = __intel_map_single(hwdev, virt_to_bus(vaddr), size,
-                                        DMA_BIDIRECTIONAL,
-                                        hwdev->coherent_dma_mask);
-       if (*dma_handle)
-               return vaddr;
-       free_pages((unsigned long)vaddr, order);
-       return NULL;
-}
-
-static void intel_free_coherent(struct device *hwdev, size_t size, void *vaddr,
-                               dma_addr_t dma_handle)
-{
-       int order;
-
-       size = PAGE_ALIGN(size);
-       order = get_order(size);
-
-       intel_unmap_page(hwdev, dma_handle, size, DMA_BIDIRECTIONAL, NULL);
-       free_pages((unsigned long)vaddr, order);
-}
-
-static void intel_unmap_sg(struct device *hwdev, struct scatterlist *sglist,
-                          int nelems, enum dma_data_direction dir,
-                          struct dma_attrs *attrs)
-{
-       struct pci_dev *pdev = to_pci_dev(hwdev);
-       struct dmar_domain *domain;
-       unsigned long start_pfn, last_pfn;
-       struct iova *iova;
-       struct intel_iommu *iommu;
-
-       if (iommu_no_mapping(hwdev))
-               return;
-
-       domain = find_domain(pdev);
-       BUG_ON(!domain);
-
-       iommu = domain_get_iommu(domain);
-
-       iova = find_iova(&domain->iovad, IOVA_PFN(sglist[0].dma_address));
-       if (WARN_ONCE(!iova, "Driver unmaps unmatched sglist at PFN %llx\n",
-                     (unsigned long long)sglist[0].dma_address))
-               return;
-
-       start_pfn = mm_to_dma_pfn(iova->pfn_lo);
-       last_pfn = mm_to_dma_pfn(iova->pfn_hi + 1) - 1;
-
-       /*  clear the whole page */
-       dma_pte_clear_range(domain, start_pfn, last_pfn);
-
-       /* free page tables */
-       dma_pte_free_pagetable(domain, start_pfn, last_pfn);
-
-       if (intel_iommu_strict) {
-               iommu_flush_iotlb_psi(iommu, domain->id, start_pfn,
-                                     last_pfn - start_pfn + 1, 0);
-               /* free iova */
-               __free_iova(&domain->iovad, iova);
-       } else {
-               add_unmap(domain, iova);
-               /*
-                * queue up the release of the unmap to save the 1/6th of the
-                * cpu used up by the iotlb flush operation...
-                */
-       }
-}
-
-static int intel_nontranslate_map_sg(struct device *hddev,
-       struct scatterlist *sglist, int nelems, int dir)
-{
-       int i;
-       struct scatterlist *sg;
-
-       for_each_sg(sglist, sg, nelems, i) {
-               BUG_ON(!sg_page(sg));
-               sg->dma_address = page_to_phys(sg_page(sg)) + sg->offset;
-               sg->dma_length = sg->length;
-       }
-       return nelems;
-}
-
-static int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, int nelems,
-                       enum dma_data_direction dir, struct dma_attrs *attrs)
-{
-       int i;
-       struct pci_dev *pdev = to_pci_dev(hwdev);
-       struct dmar_domain *domain;
-       size_t size = 0;
-       int prot = 0;
-       struct iova *iova = NULL;
-       int ret;
-       struct scatterlist *sg;
-       unsigned long start_vpfn;
-       struct intel_iommu *iommu;
-
-       BUG_ON(dir == DMA_NONE);
-       if (iommu_no_mapping(hwdev))
-               return intel_nontranslate_map_sg(hwdev, sglist, nelems, dir);
-
-       domain = get_valid_domain_for_dev(pdev);
-       if (!domain)
-               return 0;
-
-       iommu = domain_get_iommu(domain);
-
-       for_each_sg(sglist, sg, nelems, i)
-               size += aligned_nrpages(sg->offset, sg->length);
-
-       iova = intel_alloc_iova(hwdev, domain, dma_to_mm_pfn(size),
-                               pdev->dma_mask);
-       if (!iova) {
-               sglist->dma_length = 0;
-               return 0;
-       }
-
-       /*
-        * Check if DMAR supports zero-length reads on write only
-        * mappings..
-        */
-       if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL || \
-                       !cap_zlr(iommu->cap))
-               prot |= DMA_PTE_READ;
-       if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL)
-               prot |= DMA_PTE_WRITE;
-
-       start_vpfn = mm_to_dma_pfn(iova->pfn_lo);
-
-       ret = domain_sg_mapping(domain, start_vpfn, sglist, size, prot);
-       if (unlikely(ret)) {
-               /*  clear the page */
-               dma_pte_clear_range(domain, start_vpfn,
-                                   start_vpfn + size - 1);
-               /* free page tables */
-               dma_pte_free_pagetable(domain, start_vpfn,
-                                      start_vpfn + size - 1);
-               /* free iova */
-               __free_iova(&domain->iovad, iova);
-               return 0;
-       }
-
-       /* it's a non-present to present mapping. Only flush if caching mode */
-       if (cap_caching_mode(iommu->cap))
-               iommu_flush_iotlb_psi(iommu, domain->id, start_vpfn, size, 1);
-       else
-               iommu_flush_write_buffer(iommu);
-
-       return nelems;
-}
-
-static int intel_mapping_error(struct device *dev, dma_addr_t dma_addr)
-{
-       return !dma_addr;
-}
-
-struct dma_map_ops intel_dma_ops = {
-       .alloc_coherent = intel_alloc_coherent,
-       .free_coherent = intel_free_coherent,
-       .map_sg = intel_map_sg,
-       .unmap_sg = intel_unmap_sg,
-       .map_page = intel_map_page,
-       .unmap_page = intel_unmap_page,
-       .mapping_error = intel_mapping_error,
-};
-
-static inline int iommu_domain_cache_init(void)
-{
-       int ret = 0;
-
-       iommu_domain_cache = kmem_cache_create("iommu_domain",
-                                        sizeof(struct dmar_domain),
-                                        0,
-                                        SLAB_HWCACHE_ALIGN,
-
-                                        NULL);
-       if (!iommu_domain_cache) {
-               printk(KERN_ERR "Couldn't create iommu_domain cache\n");
-               ret = -ENOMEM;
-       }
-
-       return ret;
-}
-
-static inline int iommu_devinfo_cache_init(void)
-{
-       int ret = 0;
-
-       iommu_devinfo_cache = kmem_cache_create("iommu_devinfo",
-                                        sizeof(struct device_domain_info),
-                                        0,
-                                        SLAB_HWCACHE_ALIGN,
-                                        NULL);
-       if (!iommu_devinfo_cache) {
-               printk(KERN_ERR "Couldn't create devinfo cache\n");
-               ret = -ENOMEM;
-       }
-
-       return ret;
-}
-
-static inline int iommu_iova_cache_init(void)
-{
-       int ret = 0;
-
-       iommu_iova_cache = kmem_cache_create("iommu_iova",
-                                        sizeof(struct iova),
-                                        0,
-                                        SLAB_HWCACHE_ALIGN,
-                                        NULL);
-       if (!iommu_iova_cache) {
-               printk(KERN_ERR "Couldn't create iova cache\n");
-               ret = -ENOMEM;
-       }
-
-       return ret;
-}
-
-static int __init iommu_init_mempool(void)
-{
-       int ret;
-       ret = iommu_iova_cache_init();
-       if (ret)
-               return ret;
-
-       ret = iommu_domain_cache_init();
-       if (ret)
-               goto domain_error;
-
-       ret = iommu_devinfo_cache_init();
-       if (!ret)
-               return ret;
-
-       kmem_cache_destroy(iommu_domain_cache);
-domain_error:
-       kmem_cache_destroy(iommu_iova_cache);
-
-       return -ENOMEM;
-}
-
-static void __init iommu_exit_mempool(void)
-{
-       kmem_cache_destroy(iommu_devinfo_cache);
-       kmem_cache_destroy(iommu_domain_cache);
-       kmem_cache_destroy(iommu_iova_cache);
-
-}
-
-static void quirk_ioat_snb_local_iommu(struct pci_dev *pdev)
-{
-       struct dmar_drhd_unit *drhd;
-       u32 vtbar;
-       int rc;
-
-       /* We know that this device on this chipset has its own IOMMU.
-        * If we find it under a different IOMMU, then the BIOS is lying
-        * to us. Hope that the IOMMU for this device is actually
-        * disabled, and it needs no translation...
-        */
-       rc = pci_bus_read_config_dword(pdev->bus, PCI_DEVFN(0, 0), 0xb0, &vtbar);
-       if (rc) {
-               /* "can't" happen */
-               dev_info(&pdev->dev, "failed to run vt-d quirk\n");
-               return;
-       }
-       vtbar &= 0xffff0000;
-
-       /* we know that the this iommu should be at offset 0xa000 from vtbar */
-       drhd = dmar_find_matched_drhd_unit(pdev);
-       if (WARN_TAINT_ONCE(!drhd || drhd->reg_base_addr - vtbar != 0xa000,
-                           TAINT_FIRMWARE_WORKAROUND,
-                           "BIOS assigned incorrect VT-d unit for Intel(R) QuickData Technology device\n"))
-               pdev->dev.archdata.iommu = DUMMY_DEVICE_DOMAIN_INFO;
-}
-DECLARE_PCI_FIXUP_ENABLE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB, quirk_ioat_snb_local_iommu);
-
-static void __init init_no_remapping_devices(void)
-{
-       struct dmar_drhd_unit *drhd;
-
-       for_each_drhd_unit(drhd) {
-               if (!drhd->include_all) {
-                       int i;
-                       for (i = 0; i < drhd->devices_cnt; i++)
-                               if (drhd->devices[i] != NULL)
-                                       break;
-                       /* ignore DMAR unit if no pci devices exist */
-                       if (i == drhd->devices_cnt)
-                               drhd->ignored = 1;
-               }
-       }
-
-       if (dmar_map_gfx)
-               return;
-
-       for_each_drhd_unit(drhd) {
-               int i;
-               if (drhd->ignored || drhd->include_all)
-                       continue;
-
-               for (i = 0; i < drhd->devices_cnt; i++)
-                       if (drhd->devices[i] &&
-                               !IS_GFX_DEVICE(drhd->devices[i]))
-                               break;
-
-               if (i < drhd->devices_cnt)
-                       continue;
-
-               /* bypass IOMMU if it is just for gfx devices */
-               drhd->ignored = 1;
-               for (i = 0; i < drhd->devices_cnt; i++) {
-                       if (!drhd->devices[i])
-                               continue;
-                       drhd->devices[i]->dev.archdata.iommu = DUMMY_DEVICE_DOMAIN_INFO;
-               }
-       }
-}
-
-#ifdef CONFIG_SUSPEND
-static int init_iommu_hw(void)
-{
-       struct dmar_drhd_unit *drhd;
-       struct intel_iommu *iommu = NULL;
-
-       for_each_active_iommu(iommu, drhd)
-               if (iommu->qi)
-                       dmar_reenable_qi(iommu);
-
-       for_each_iommu(iommu, drhd) {
-               if (drhd->ignored) {
-                       /*
-                        * we always have to disable PMRs or DMA may fail on
-                        * this device
-                        */
-                       if (force_on)
-                               iommu_disable_protect_mem_regions(iommu);
-                       continue;
-               }
-       
-               iommu_flush_write_buffer(iommu);
-
-               iommu_set_root_entry(iommu);
-
-               iommu->flush.flush_context(iommu, 0, 0, 0,
-                                          DMA_CCMD_GLOBAL_INVL);
-               iommu->flush.flush_iotlb(iommu, 0, 0, 0,
-                                        DMA_TLB_GLOBAL_FLUSH);
-               if (iommu_enable_translation(iommu))
-                       return 1;
-               iommu_disable_protect_mem_regions(iommu);
-       }
-
-       return 0;
-}
-
-static void iommu_flush_all(void)
-{
-       struct dmar_drhd_unit *drhd;
-       struct intel_iommu *iommu;
-
-       for_each_active_iommu(iommu, drhd) {
-               iommu->flush.flush_context(iommu, 0, 0, 0,
-                                          DMA_CCMD_GLOBAL_INVL);
-               iommu->flush.flush_iotlb(iommu, 0, 0, 0,
-                                        DMA_TLB_GLOBAL_FLUSH);
-       }
-}
-
-static int iommu_suspend(void)
-{
-       struct dmar_drhd_unit *drhd;
-       struct intel_iommu *iommu = NULL;
-       unsigned long flag;
-
-       for_each_active_iommu(iommu, drhd) {
-               iommu->iommu_state = kzalloc(sizeof(u32) * MAX_SR_DMAR_REGS,
-                                                GFP_ATOMIC);
-               if (!iommu->iommu_state)
-                       goto nomem;
-       }
-
-       iommu_flush_all();
-
-       for_each_active_iommu(iommu, drhd) {
-               iommu_disable_translation(iommu);
-
-               spin_lock_irqsave(&iommu->register_lock, flag);
-
-               iommu->iommu_state[SR_DMAR_FECTL_REG] =
-                       readl(iommu->reg + DMAR_FECTL_REG);
-               iommu->iommu_state[SR_DMAR_FEDATA_REG] =
-                       readl(iommu->reg + DMAR_FEDATA_REG);
-               iommu->iommu_state[SR_DMAR_FEADDR_REG] =
-                       readl(iommu->reg + DMAR_FEADDR_REG);
-               iommu->iommu_state[SR_DMAR_FEUADDR_REG] =
-                       readl(iommu->reg + DMAR_FEUADDR_REG);
-
-               spin_unlock_irqrestore(&iommu->register_lock, flag);
-       }
-       return 0;
-
-nomem:
-       for_each_active_iommu(iommu, drhd)
-               kfree(iommu->iommu_state);
-
-       return -ENOMEM;
-}
-
-static void iommu_resume(void)
-{
-       struct dmar_drhd_unit *drhd;
-       struct intel_iommu *iommu = NULL;
-       unsigned long flag;
-
-       if (init_iommu_hw()) {
-               if (force_on)
-                       panic("tboot: IOMMU setup failed, DMAR can not resume!\n");
-               else
-                       WARN(1, "IOMMU setup failed, DMAR can not resume!\n");
-               return;
-       }
-
-       for_each_active_iommu(iommu, drhd) {
-
-               spin_lock_irqsave(&iommu->register_lock, flag);
-
-               writel(iommu->iommu_state[SR_DMAR_FECTL_REG],
-                       iommu->reg + DMAR_FECTL_REG);
-               writel(iommu->iommu_state[SR_DMAR_FEDATA_REG],
-                       iommu->reg + DMAR_FEDATA_REG);
-               writel(iommu->iommu_state[SR_DMAR_FEADDR_REG],
-                       iommu->reg + DMAR_FEADDR_REG);
-               writel(iommu->iommu_state[SR_DMAR_FEUADDR_REG],
-                       iommu->reg + DMAR_FEUADDR_REG);
-
-               spin_unlock_irqrestore(&iommu->register_lock, flag);
-       }
-
-       for_each_active_iommu(iommu, drhd)
-               kfree(iommu->iommu_state);
-}
-
-static struct syscore_ops iommu_syscore_ops = {
-       .resume         = iommu_resume,
-       .suspend        = iommu_suspend,
-};
-
-static void __init init_iommu_pm_ops(void)
-{
-       register_syscore_ops(&iommu_syscore_ops);
-}
-
-#else
-static inline void init_iommu_pm_ops(void) {}
-#endif /* CONFIG_PM */
-
-/*
- * Here we only respond to action of unbound device from driver.
- *
- * Added device is not attached to its DMAR domain here yet. That will happen
- * when mapping the device to iova.
- */
-static int device_notifier(struct notifier_block *nb,
-                                 unsigned long action, void *data)
-{
-       struct device *dev = data;
-       struct pci_dev *pdev = to_pci_dev(dev);
-       struct dmar_domain *domain;
-
-       if (iommu_no_mapping(dev))
-               return 0;
-
-       domain = find_domain(pdev);
-       if (!domain)
-               return 0;
-
-       if (action == BUS_NOTIFY_UNBOUND_DRIVER && !iommu_pass_through) {
-               domain_remove_one_dev_info(domain, pdev);
-
-               if (!(domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE) &&
-                   !(domain->flags & DOMAIN_FLAG_STATIC_IDENTITY) &&
-                   list_empty(&domain->devices))
-                       domain_exit(domain);
-       }
-
-       return 0;
-}
-
-static struct notifier_block device_nb = {
-       .notifier_call = device_notifier,
-};
-
-int __init intel_iommu_init(void)
-{
-       int ret = 0;
-
-       /* VT-d is required for a TXT/tboot launch, so enforce that */
-       force_on = tboot_force_iommu();
-
-       if (dmar_table_init()) {
-               if (force_on)
-                       panic("tboot: Failed to initialize DMAR table\n");
-               return  -ENODEV;
-       }
-
-       if (dmar_dev_scope_init()) {
-               if (force_on)
-                       panic("tboot: Failed to initialize DMAR device scope\n");
-               return  -ENODEV;
-       }
-
-       /*
-        * Check the need for DMA-remapping initialization now.
-        * Above initialization will also be used by Interrupt-remapping.
-        */
-       if (no_iommu || dmar_disabled)
-               return -ENODEV;
-
-       if (iommu_init_mempool()) {
-               if (force_on)
-                       panic("tboot: Failed to initialize iommu memory\n");
-               return  -ENODEV;
-       }
-
-       if (dmar_init_reserved_ranges()) {
-               if (force_on)
-                       panic("tboot: Failed to reserve iommu ranges\n");
-               return  -ENODEV;
-       }
-
-       init_no_remapping_devices();
-
-       ret = init_dmars();
-       if (ret) {
-               if (force_on)
-                       panic("tboot: Failed to initialize DMARs\n");
-               printk(KERN_ERR "IOMMU: dmar init failed\n");
-               put_iova_domain(&reserved_iova_list);
-               iommu_exit_mempool();
-               return ret;
-       }
-       printk(KERN_INFO
-       "PCI-DMA: Intel(R) Virtualization Technology for Directed I/O\n");
-
-       init_timer(&unmap_timer);
-#ifdef CONFIG_SWIOTLB
-       swiotlb = 0;
-#endif
-       dma_ops = &intel_dma_ops;
-
-       init_iommu_pm_ops();
-
-       register_iommu(&intel_iommu_ops);
-
-       bus_register_notifier(&pci_bus_type, &device_nb);
-
-       return 0;
-}
-
-static void iommu_detach_dependent_devices(struct intel_iommu *iommu,
-                                          struct pci_dev *pdev)
-{
-       struct pci_dev *tmp, *parent;
-
-       if (!iommu || !pdev)
-               return;
-
-       /* dependent device detach */
-       tmp = pci_find_upstream_pcie_bridge(pdev);
-       /* Secondary interface's bus number and devfn 0 */
-       if (tmp) {
-               parent = pdev->bus->self;
-               while (parent != tmp) {
-                       iommu_detach_dev(iommu, parent->bus->number,
-                                        parent->devfn);
-                       parent = parent->bus->self;
-               }
-               if (pci_is_pcie(tmp)) /* this is a PCIe-to-PCI bridge */
-                       iommu_detach_dev(iommu,
-                               tmp->subordinate->number, 0);
-               else /* this is a legacy PCI bridge */
-                       iommu_detach_dev(iommu, tmp->bus->number,
-                                        tmp->devfn);
-       }
-}
-
-static void domain_remove_one_dev_info(struct dmar_domain *domain,
-                                         struct pci_dev *pdev)
-{
-       struct device_domain_info *info;
-       struct intel_iommu *iommu;
-       unsigned long flags;
-       int found = 0;
-       struct list_head *entry, *tmp;
-
-       iommu = device_to_iommu(pci_domain_nr(pdev->bus), pdev->bus->number,
-                               pdev->devfn);
-       if (!iommu)
-               return;
-
-       spin_lock_irqsave(&device_domain_lock, flags);
-       list_for_each_safe(entry, tmp, &domain->devices) {
-               info = list_entry(entry, struct device_domain_info, link);
-               if (info->segment == pci_domain_nr(pdev->bus) &&
-                   info->bus == pdev->bus->number &&
-                   info->devfn == pdev->devfn) {
-                       list_del(&info->link);
-                       list_del(&info->global);
-                       if (info->dev)
-                               info->dev->dev.archdata.iommu = NULL;
-                       spin_unlock_irqrestore(&device_domain_lock, flags);
-
-                       iommu_disable_dev_iotlb(info);
-                       iommu_detach_dev(iommu, info->bus, info->devfn);
-                       iommu_detach_dependent_devices(iommu, pdev);
-                       free_devinfo_mem(info);
-
-                       spin_lock_irqsave(&device_domain_lock, flags);
-
-                       if (found)
-                               break;
-                       else
-                               continue;
-               }
-
-               /* if there is no other devices under the same iommu
-                * owned by this domain, clear this iommu in iommu_bmp
-                * update iommu count and coherency
-                */
-               if (iommu == device_to_iommu(info->segment, info->bus,
-                                           info->devfn))
-                       found = 1;
-       }
-
-       if (found == 0) {
-               unsigned long tmp_flags;
-               spin_lock_irqsave(&domain->iommu_lock, tmp_flags);
-               clear_bit(iommu->seq_id, &domain->iommu_bmp);
-               domain->iommu_count--;
-               domain_update_iommu_cap(domain);
-               spin_unlock_irqrestore(&domain->iommu_lock, tmp_flags);
-
-               if (!(domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE) &&
-                   !(domain->flags & DOMAIN_FLAG_STATIC_IDENTITY)) {
-                       spin_lock_irqsave(&iommu->lock, tmp_flags);
-                       clear_bit(domain->id, iommu->domain_ids);
-                       iommu->domains[domain->id] = NULL;
-                       spin_unlock_irqrestore(&iommu->lock, tmp_flags);
-               }
-       }
-
-       spin_unlock_irqrestore(&device_domain_lock, flags);
-}
-
-static void vm_domain_remove_all_dev_info(struct dmar_domain *domain)
-{
-       struct device_domain_info *info;
-       struct intel_iommu *iommu;
-       unsigned long flags1, flags2;
-
-       spin_lock_irqsave(&device_domain_lock, flags1);
-       while (!list_empty(&domain->devices)) {
-               info = list_entry(domain->devices.next,
-                       struct device_domain_info, link);
-               list_del(&info->link);
-               list_del(&info->global);
-               if (info->dev)
-                       info->dev->dev.archdata.iommu = NULL;
-
-               spin_unlock_irqrestore(&device_domain_lock, flags1);
-
-               iommu_disable_dev_iotlb(info);
-               iommu = device_to_iommu(info->segment, info->bus, info->devfn);
-               iommu_detach_dev(iommu, info->bus, info->devfn);
-               iommu_detach_dependent_devices(iommu, info->dev);
-
-               /* clear this iommu in iommu_bmp, update iommu count
-                * and capabilities
-                */
-               spin_lock_irqsave(&domain->iommu_lock, flags2);
-               if (test_and_clear_bit(iommu->seq_id,
-                                      &domain->iommu_bmp)) {
-                       domain->iommu_count--;
-                       domain_update_iommu_cap(domain);
-               }
-               spin_unlock_irqrestore(&domain->iommu_lock, flags2);
-
-               free_devinfo_mem(info);
-               spin_lock_irqsave(&device_domain_lock, flags1);
-       }
-       spin_unlock_irqrestore(&device_domain_lock, flags1);
-}
-
-/* domain id for virtual machine, it won't be set in context */
-static unsigned long vm_domid;
-
-static struct dmar_domain *iommu_alloc_vm_domain(void)
-{
-       struct dmar_domain *domain;
-
-       domain = alloc_domain_mem();
-       if (!domain)
-               return NULL;
-
-       domain->id = vm_domid++;
-       domain->nid = -1;
-       memset(&domain->iommu_bmp, 0, sizeof(unsigned long));
-       domain->flags = DOMAIN_FLAG_VIRTUAL_MACHINE;
-
-       return domain;
-}
-
-static int md_domain_init(struct dmar_domain *domain, int guest_width)
-{
-       int adjust_width;
-
-       init_iova_domain(&domain->iovad, DMA_32BIT_PFN);
-       spin_lock_init(&domain->iommu_lock);
-
-       domain_reserve_special_ranges(domain);
-
-       /* calculate AGAW */
-       domain->gaw = guest_width;
-       adjust_width = guestwidth_to_adjustwidth(guest_width);
-       domain->agaw = width_to_agaw(adjust_width);
-
-       INIT_LIST_HEAD(&domain->devices);
-
-       domain->iommu_count = 0;
-       domain->iommu_coherency = 0;
-       domain->iommu_snooping = 0;
-       domain->iommu_superpage = 0;
-       domain->max_addr = 0;
-       domain->nid = -1;
-
-       /* always allocate the top pgd */
-       domain->pgd = (struct dma_pte *)alloc_pgtable_page(domain->nid);
-       if (!domain->pgd)
-               return -ENOMEM;
-       domain_flush_cache(domain, domain->pgd, PAGE_SIZE);
-       return 0;
-}
-
-static void iommu_free_vm_domain(struct dmar_domain *domain)
-{
-       unsigned long flags;
-       struct dmar_drhd_unit *drhd;
-       struct intel_iommu *iommu;
-       unsigned long i;
-       unsigned long ndomains;
-
-       for_each_drhd_unit(drhd) {
-               if (drhd->ignored)
-                       continue;
-               iommu = drhd->iommu;
-
-               ndomains = cap_ndoms(iommu->cap);
-               for_each_set_bit(i, iommu->domain_ids, ndomains) {
-                       if (iommu->domains[i] == domain) {
-                               spin_lock_irqsave(&iommu->lock, flags);
-                               clear_bit(i, iommu->domain_ids);
-                               iommu->domains[i] = NULL;
-                               spin_unlock_irqrestore(&iommu->lock, flags);
-                               break;
-                       }
-               }
-       }
-}
-
-static void vm_domain_exit(struct dmar_domain *domain)
-{
-       /* Domain 0 is reserved, so dont process it */
-       if (!domain)
-               return;
-
-       vm_domain_remove_all_dev_info(domain);
-       /* destroy iovas */
-       put_iova_domain(&domain->iovad);
-
-       /* clear ptes */
-       dma_pte_clear_range(domain, 0, DOMAIN_MAX_PFN(domain->gaw));
-
-       /* free page tables */
-       dma_pte_free_pagetable(domain, 0, DOMAIN_MAX_PFN(domain->gaw));
-
-       iommu_free_vm_domain(domain);
-       free_domain_mem(domain);
-}
-
-static int intel_iommu_domain_init(struct iommu_domain *domain)
-{
-       struct dmar_domain *dmar_domain;
-
-       dmar_domain = iommu_alloc_vm_domain();
-       if (!dmar_domain) {
-               printk(KERN_ERR
-                       "intel_iommu_domain_init: dmar_domain == NULL\n");
-               return -ENOMEM;
-       }
-       if (md_domain_init(dmar_domain, DEFAULT_DOMAIN_ADDRESS_WIDTH)) {
-               printk(KERN_ERR
-                       "intel_iommu_domain_init() failed\n");
-               vm_domain_exit(dmar_domain);
-               return -ENOMEM;
-       }
-       domain->priv = dmar_domain;
-
-       return 0;
-}
-
-static void intel_iommu_domain_destroy(struct iommu_domain *domain)
-{
-       struct dmar_domain *dmar_domain = domain->priv;
-
-       domain->priv = NULL;
-       vm_domain_exit(dmar_domain);
-}
-
-static int intel_iommu_attach_device(struct iommu_domain *domain,
-                                    struct device *dev)
-{
-       struct dmar_domain *dmar_domain = domain->priv;
-       struct pci_dev *pdev = to_pci_dev(dev);
-       struct intel_iommu *iommu;
-       int addr_width;
-
-       /* normally pdev is not mapped */
-       if (unlikely(domain_context_mapped(pdev))) {
-               struct dmar_domain *old_domain;
-
-               old_domain = find_domain(pdev);
-               if (old_domain) {
-                       if (dmar_domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE ||
-                           dmar_domain->flags & DOMAIN_FLAG_STATIC_IDENTITY)
-                               domain_remove_one_dev_info(old_domain, pdev);
-                       else
-                               domain_remove_dev_info(old_domain);
-               }
-       }
-
-       iommu = device_to_iommu(pci_domain_nr(pdev->bus), pdev->bus->number,
-                               pdev->devfn);
-       if (!iommu)
-               return -ENODEV;
-
-       /* check if this iommu agaw is sufficient for max mapped address */
-       addr_width = agaw_to_width(iommu->agaw);
-       if (addr_width > cap_mgaw(iommu->cap))
-               addr_width = cap_mgaw(iommu->cap);
-
-       if (dmar_domain->max_addr > (1LL << addr_width)) {
-               printk(KERN_ERR "%s: iommu width (%d) is not "
-                      "sufficient for the mapped address (%llx)\n",
-                      __func__, addr_width, dmar_domain->max_addr);
-               return -EFAULT;
-       }
-       dmar_domain->gaw = addr_width;
-
-       /*
-        * Knock out extra levels of page tables if necessary
-        */
-       while (iommu->agaw < dmar_domain->agaw) {
-               struct dma_pte *pte;
-
-               pte = dmar_domain->pgd;
-               if (dma_pte_present(pte)) {
-                       dmar_domain->pgd = (struct dma_pte *)
-                               phys_to_virt(dma_pte_addr(pte));
-                       free_pgtable_page(pte);
-               }
-               dmar_domain->agaw--;
-       }
-
-       return domain_add_dev_info(dmar_domain, pdev, CONTEXT_TT_MULTI_LEVEL);
-}
-
-static void intel_iommu_detach_device(struct iommu_domain *domain,
-                                     struct device *dev)
-{
-       struct dmar_domain *dmar_domain = domain->priv;
-       struct pci_dev *pdev = to_pci_dev(dev);
-
-       domain_remove_one_dev_info(dmar_domain, pdev);
-}
-
-static int intel_iommu_map(struct iommu_domain *domain,
-                          unsigned long iova, phys_addr_t hpa,
-                          int gfp_order, int iommu_prot)
-{
-       struct dmar_domain *dmar_domain = domain->priv;
-       u64 max_addr;
-       int prot = 0;
-       size_t size;
-       int ret;
-
-       if (iommu_prot & IOMMU_READ)
-               prot |= DMA_PTE_READ;
-       if (iommu_prot & IOMMU_WRITE)
-               prot |= DMA_PTE_WRITE;
-       if ((iommu_prot & IOMMU_CACHE) && dmar_domain->iommu_snooping)
-               prot |= DMA_PTE_SNP;
-
-       size     = PAGE_SIZE << gfp_order;
-       max_addr = iova + size;
-       if (dmar_domain->max_addr < max_addr) {
-               u64 end;
-
-               /* check if minimum agaw is sufficient for mapped address */
-               end = __DOMAIN_MAX_ADDR(dmar_domain->gaw) + 1;
-               if (end < max_addr) {
-                       printk(KERN_ERR "%s: iommu width (%d) is not "
-                              "sufficient for the mapped address (%llx)\n",
-                              __func__, dmar_domain->gaw, max_addr);
-                       return -EFAULT;
-               }
-               dmar_domain->max_addr = max_addr;
-       }
-       /* Round up size to next multiple of PAGE_SIZE, if it and
-          the low bits of hpa would take us onto the next page */
-       size = aligned_nrpages(hpa, size);
-       ret = domain_pfn_mapping(dmar_domain, iova >> VTD_PAGE_SHIFT,
-                                hpa >> VTD_PAGE_SHIFT, size, prot);
-       return ret;
-}
-
-static int intel_iommu_unmap(struct iommu_domain *domain,
-                            unsigned long iova, int gfp_order)
-{
-       struct dmar_domain *dmar_domain = domain->priv;
-       size_t size = PAGE_SIZE << gfp_order;
-
-       dma_pte_clear_range(dmar_domain, iova >> VTD_PAGE_SHIFT,
-                           (iova + size - 1) >> VTD_PAGE_SHIFT);
-
-       if (dmar_domain->max_addr == iova + size)
-               dmar_domain->max_addr = iova;
-
-       return gfp_order;
-}
-
-static phys_addr_t intel_iommu_iova_to_phys(struct iommu_domain *domain,
-                                           unsigned long iova)
-{
-       struct dmar_domain *dmar_domain = domain->priv;
-       struct dma_pte *pte;
-       u64 phys = 0;
-
-       pte = pfn_to_dma_pte(dmar_domain, iova >> VTD_PAGE_SHIFT, 0);
-       if (pte)
-               phys = dma_pte_addr(pte);
-
-       return phys;
-}
-
-static int intel_iommu_domain_has_cap(struct iommu_domain *domain,
-                                     unsigned long cap)
-{
-       struct dmar_domain *dmar_domain = domain->priv;
-
-       if (cap == IOMMU_CAP_CACHE_COHERENCY)
-               return dmar_domain->iommu_snooping;
-       if (cap == IOMMU_CAP_INTR_REMAP)
-               return intr_remapping_enabled;
-
-       return 0;
-}
-
-static struct iommu_ops intel_iommu_ops = {
-       .domain_init    = intel_iommu_domain_init,
-       .domain_destroy = intel_iommu_domain_destroy,
-       .attach_dev     = intel_iommu_attach_device,
-       .detach_dev     = intel_iommu_detach_device,
-       .map            = intel_iommu_map,
-       .unmap          = intel_iommu_unmap,
-       .iova_to_phys   = intel_iommu_iova_to_phys,
-       .domain_has_cap = intel_iommu_domain_has_cap,
-};
-
-static void __devinit quirk_iommu_rwbf(struct pci_dev *dev)
-{
-       /*
-        * Mobile 4 Series Chipset neglects to set RWBF capability,
-        * but needs it:
-        */
-       printk(KERN_INFO "DMAR: Forcing write-buffer flush capability\n");
-       rwbf_quirk = 1;
-
-       /* https://bugzilla.redhat.com/show_bug.cgi?id=538163 */
-       if (dev->revision == 0x07) {
-               printk(KERN_INFO "DMAR: Disabling IOMMU for graphics on this chipset\n");
-               dmar_map_gfx = 0;
-       }
-}
-
-DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2a40, quirk_iommu_rwbf);
-
-#define GGC 0x52
-#define GGC_MEMORY_SIZE_MASK   (0xf << 8)
-#define GGC_MEMORY_SIZE_NONE   (0x0 << 8)
-#define GGC_MEMORY_SIZE_1M     (0x1 << 8)
-#define GGC_MEMORY_SIZE_2M     (0x3 << 8)
-#define GGC_MEMORY_VT_ENABLED  (0x8 << 8)
-#define GGC_MEMORY_SIZE_2M_VT  (0x9 << 8)
-#define GGC_MEMORY_SIZE_3M_VT  (0xa << 8)
-#define GGC_MEMORY_SIZE_4M_VT  (0xb << 8)
-
-static void __devinit quirk_calpella_no_shadow_gtt(struct pci_dev *dev)
-{
-       unsigned short ggc;
-
-       if (pci_read_config_word(dev, GGC, &ggc))
-               return;
-
-       if (!(ggc & GGC_MEMORY_VT_ENABLED)) {
-               printk(KERN_INFO "DMAR: BIOS has allocated no shadow GTT; disabling IOMMU for graphics\n");
-               dmar_map_gfx = 0;
-       }
-}
-DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0040, quirk_calpella_no_shadow_gtt);
-DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0044, quirk_calpella_no_shadow_gtt);
-DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0062, quirk_calpella_no_shadow_gtt);
-DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x006a, quirk_calpella_no_shadow_gtt);
-
-/* On Tylersburg chipsets, some BIOSes have been known to enable the
-   ISOCH DMAR unit for the Azalia sound device, but not give it any
-   TLB entries, which causes it to deadlock. Check for that.  We do
-   this in a function called from init_dmars(), instead of in a PCI
-   quirk, because we don't want to print the obnoxious "BIOS broken"
-   message if VT-d is actually disabled.
-*/
-static void __init check_tylersburg_isoch(void)
-{
-       struct pci_dev *pdev;
-       uint32_t vtisochctrl;
-
-       /* If there's no Azalia in the system anyway, forget it. */
-       pdev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x3a3e, NULL);
-       if (!pdev)
-               return;
-       pci_dev_put(pdev);
-
-       /* System Management Registers. Might be hidden, in which case
-          we can't do the sanity check. But that's OK, because the
-          known-broken BIOSes _don't_ actually hide it, so far. */
-       pdev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x342e, NULL);
-       if (!pdev)
-               return;
-
-       if (pci_read_config_dword(pdev, 0x188, &vtisochctrl)) {
-               pci_dev_put(pdev);
-               return;
-       }
-
-       pci_dev_put(pdev);
-
-       /* If Azalia DMA is routed to the non-isoch DMAR unit, fine. */
-       if (vtisochctrl & 1)
-               return;
-
-       /* Drop all bits other than the number of TLB entries */
-       vtisochctrl &= 0x1c;
-
-       /* If we have the recommended number of TLB entries (16), fine. */
-       if (vtisochctrl == 0x10)
-               return;
-
-       /* Zero TLB entries? You get to ride the short bus to school. */
-       if (!vtisochctrl) {
-               WARN(1, "Your BIOS is broken; DMA routed to ISOCH DMAR unit but no TLB space.\n"
-                    "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
-                    dmi_get_system_info(DMI_BIOS_VENDOR),
-                    dmi_get_system_info(DMI_BIOS_VERSION),
-                    dmi_get_system_info(DMI_PRODUCT_VERSION));
-               iommu_identity_mapping |= IDENTMAP_AZALIA;
-               return;
-       }
-       
-       printk(KERN_WARNING "DMAR: Recommended TLB entries for ISOCH unit is 16; your BIOS set %d\n",
-              vtisochctrl);
-}
diff --git a/drivers/pci/intr_remapping.c b/drivers/pci/intr_remapping.c
deleted file mode 100644 (file)
index 3607faf..0000000
+++ /dev/null
@@ -1,798 +0,0 @@
-#include <linux/interrupt.h>
-#include <linux/dmar.h>
-#include <linux/spinlock.h>
-#include <linux/slab.h>
-#include <linux/jiffies.h>
-#include <linux/hpet.h>
-#include <linux/pci.h>
-#include <linux/irq.h>
-#include <asm/io_apic.h>
-#include <asm/smp.h>
-#include <asm/cpu.h>
-#include <linux/intel-iommu.h>
-#include "intr_remapping.h"
-#include <acpi/acpi.h>
-#include <asm/pci-direct.h>
-#include "pci.h"
-
-static struct ioapic_scope ir_ioapic[MAX_IO_APICS];
-static struct hpet_scope ir_hpet[MAX_HPET_TBS];
-static int ir_ioapic_num, ir_hpet_num;
-int intr_remapping_enabled;
-
-static int disable_intremap;
-static int disable_sourceid_checking;
-
-static __init int setup_nointremap(char *str)
-{
-       disable_intremap = 1;
-       return 0;
-}
-early_param("nointremap", setup_nointremap);
-
-static __init int setup_intremap(char *str)
-{
-       if (!str)
-               return -EINVAL;
-
-       if (!strncmp(str, "on", 2))
-               disable_intremap = 0;
-       else if (!strncmp(str, "off", 3))
-               disable_intremap = 1;
-       else if (!strncmp(str, "nosid", 5))
-               disable_sourceid_checking = 1;
-
-       return 0;
-}
-early_param("intremap", setup_intremap);
-
-static DEFINE_SPINLOCK(irq_2_ir_lock);
-
-static struct irq_2_iommu *irq_2_iommu(unsigned int irq)
-{
-       struct irq_cfg *cfg = irq_get_chip_data(irq);
-       return cfg ? &cfg->irq_2_iommu : NULL;
-}
-
-int get_irte(int irq, struct irte *entry)
-{
-       struct irq_2_iommu *irq_iommu = irq_2_iommu(irq);
-       unsigned long flags;
-       int index;
-
-       if (!entry || !irq_iommu)
-               return -1;
-
-       spin_lock_irqsave(&irq_2_ir_lock, flags);
-
-       index = irq_iommu->irte_index + irq_iommu->sub_handle;
-       *entry = *(irq_iommu->iommu->ir_table->base + index);
-
-       spin_unlock_irqrestore(&irq_2_ir_lock, flags);
-       return 0;
-}
-
-int alloc_irte(struct intel_iommu *iommu, int irq, u16 count)
-{
-       struct ir_table *table = iommu->ir_table;
-       struct irq_2_iommu *irq_iommu = irq_2_iommu(irq);
-       u16 index, start_index;
-       unsigned int mask = 0;
-       unsigned long flags;
-       int i;
-
-       if (!count || !irq_iommu)
-               return -1;
-
-       /*
-        * start the IRTE search from index 0.
-        */
-       index = start_index = 0;
-
-       if (count > 1) {
-               count = __roundup_pow_of_two(count);
-               mask = ilog2(count);
-       }
-
-       if (mask > ecap_max_handle_mask(iommu->ecap)) {
-               printk(KERN_ERR
-                      "Requested mask %x exceeds the max invalidation handle"
-                      " mask value %Lx\n", mask,
-                      ecap_max_handle_mask(iommu->ecap));
-               return -1;
-       }
-
-       spin_lock_irqsave(&irq_2_ir_lock, flags);
-       do {
-               for (i = index; i < index + count; i++)
-                       if  (table->base[i].present)
-                               break;
-               /* empty index found */
-               if (i == index + count)
-                       break;
-
-               index = (index + count) % INTR_REMAP_TABLE_ENTRIES;
-
-               if (index == start_index) {
-                       spin_unlock_irqrestore(&irq_2_ir_lock, flags);
-                       printk(KERN_ERR "can't allocate an IRTE\n");
-                       return -1;
-               }
-       } while (1);
-
-       for (i = index; i < index + count; i++)
-               table->base[i].present = 1;
-
-       irq_iommu->iommu = iommu;
-       irq_iommu->irte_index =  index;
-       irq_iommu->sub_handle = 0;
-       irq_iommu->irte_mask = mask;
-
-       spin_unlock_irqrestore(&irq_2_ir_lock, flags);
-
-       return index;
-}
-
-static int qi_flush_iec(struct intel_iommu *iommu, int index, int mask)
-{
-       struct qi_desc desc;
-
-       desc.low = QI_IEC_IIDEX(index) | QI_IEC_TYPE | QI_IEC_IM(mask)
-                  | QI_IEC_SELECTIVE;
-       desc.high = 0;
-
-       return qi_submit_sync(&desc, iommu);
-}
-
-int map_irq_to_irte_handle(int irq, u16 *sub_handle)
-{
-       struct irq_2_iommu *irq_iommu = irq_2_iommu(irq);
-       unsigned long flags;
-       int index;
-
-       if (!irq_iommu)
-               return -1;
-
-       spin_lock_irqsave(&irq_2_ir_lock, flags);
-       *sub_handle = irq_iommu->sub_handle;
-       index = irq_iommu->irte_index;
-       spin_unlock_irqrestore(&irq_2_ir_lock, flags);
-       return index;
-}
-
-int set_irte_irq(int irq, struct intel_iommu *iommu, u16 index, u16 subhandle)
-{
-       struct irq_2_iommu *irq_iommu = irq_2_iommu(irq);
-       unsigned long flags;
-
-       if (!irq_iommu)
-               return -1;
-
-       spin_lock_irqsave(&irq_2_ir_lock, flags);
-
-       irq_iommu->iommu = iommu;
-       irq_iommu->irte_index = index;
-       irq_iommu->sub_handle = subhandle;
-       irq_iommu->irte_mask = 0;
-
-       spin_unlock_irqrestore(&irq_2_ir_lock, flags);
-
-       return 0;
-}
-
-int modify_irte(int irq, struct irte *irte_modified)
-{
-       struct irq_2_iommu *irq_iommu = irq_2_iommu(irq);
-       struct intel_iommu *iommu;
-       unsigned long flags;
-       struct irte *irte;
-       int rc, index;
-
-       if (!irq_iommu)
-               return -1;
-
-       spin_lock_irqsave(&irq_2_ir_lock, flags);
-
-       iommu = irq_iommu->iommu;
-
-       index = irq_iommu->irte_index + irq_iommu->sub_handle;
-       irte = &iommu->ir_table->base[index];
-
-       set_64bit(&irte->low, irte_modified->low);
-       set_64bit(&irte->high, irte_modified->high);
-       __iommu_flush_cache(iommu, irte, sizeof(*irte));
-
-       rc = qi_flush_iec(iommu, index, 0);
-       spin_unlock_irqrestore(&irq_2_ir_lock, flags);
-
-       return rc;
-}
-
-struct intel_iommu *map_hpet_to_ir(u8 hpet_id)
-{
-       int i;
-
-       for (i = 0; i < MAX_HPET_TBS; i++)
-               if (ir_hpet[i].id == hpet_id)
-                       return ir_hpet[i].iommu;
-       return NULL;
-}
-
-struct intel_iommu *map_ioapic_to_ir(int apic)
-{
-       int i;
-
-       for (i = 0; i < MAX_IO_APICS; i++)
-               if (ir_ioapic[i].id == apic)
-                       return ir_ioapic[i].iommu;
-       return NULL;
-}
-
-struct intel_iommu *map_dev_to_ir(struct pci_dev *dev)
-{
-       struct dmar_drhd_unit *drhd;
-
-       drhd = dmar_find_matched_drhd_unit(dev);
-       if (!drhd)
-               return NULL;
-
-       return drhd->iommu;
-}
-
-static int clear_entries(struct irq_2_iommu *irq_iommu)
-{
-       struct irte *start, *entry, *end;
-       struct intel_iommu *iommu;
-       int index;
-
-       if (irq_iommu->sub_handle)
-               return 0;
-
-       iommu = irq_iommu->iommu;
-       index = irq_iommu->irte_index + irq_iommu->sub_handle;
-
-       start = iommu->ir_table->base + index;
-       end = start + (1 << irq_iommu->irte_mask);
-
-       for (entry = start; entry < end; entry++) {
-               set_64bit(&entry->low, 0);
-               set_64bit(&entry->high, 0);
-       }
-
-       return qi_flush_iec(iommu, index, irq_iommu->irte_mask);
-}
-
-int free_irte(int irq)
-{
-       struct irq_2_iommu *irq_iommu = irq_2_iommu(irq);
-       unsigned long flags;
-       int rc;
-
-       if (!irq_iommu)
-               return -1;
-
-       spin_lock_irqsave(&irq_2_ir_lock, flags);
-
-       rc = clear_entries(irq_iommu);
-
-       irq_iommu->iommu = NULL;
-       irq_iommu->irte_index = 0;
-       irq_iommu->sub_handle = 0;
-       irq_iommu->irte_mask = 0;
-
-       spin_unlock_irqrestore(&irq_2_ir_lock, flags);
-
-       return rc;
-}
-
-/*
- * source validation type
- */
-#define SVT_NO_VERIFY          0x0  /* no verification is required */
-#define SVT_VERIFY_SID_SQ      0x1  /* verify using SID and SQ fields */
-#define SVT_VERIFY_BUS         0x2  /* verify bus of request-id */
-
-/*
- * source-id qualifier
- */
-#define SQ_ALL_16      0x0  /* verify all 16 bits of request-id */
-#define SQ_13_IGNORE_1 0x1  /* verify most significant 13 bits, ignore
-                             * the third least significant bit
-                             */
-#define SQ_13_IGNORE_2 0x2  /* verify most significant 13 bits, ignore
-                             * the second and third least significant bits
-                             */
-#define SQ_13_IGNORE_3 0x3  /* verify most significant 13 bits, ignore
-                             * the least three significant bits
-                             */
-
-/*
- * set SVT, SQ and SID fields of irte to verify
- * source ids of interrupt requests
- */
-static void set_irte_sid(struct irte *irte, unsigned int svt,
-                        unsigned int sq, unsigned int sid)
-{
-       if (disable_sourceid_checking)
-               svt = SVT_NO_VERIFY;
-       irte->svt = svt;
-       irte->sq = sq;
-       irte->sid = sid;
-}
-
-int set_ioapic_sid(struct irte *irte, int apic)
-{
-       int i;
-       u16 sid = 0;
-
-       if (!irte)
-               return -1;
-
-       for (i = 0; i < MAX_IO_APICS; i++) {
-               if (ir_ioapic[i].id == apic) {
-                       sid = (ir_ioapic[i].bus << 8) | ir_ioapic[i].devfn;
-                       break;
-               }
-       }
-
-       if (sid == 0) {
-               pr_warning("Failed to set source-id of IOAPIC (%d)\n", apic);
-               return -1;
-       }
-
-       set_irte_sid(irte, 1, 0, sid);
-
-       return 0;
-}
-
-int set_hpet_sid(struct irte *irte, u8 id)
-{
-       int i;
-       u16 sid = 0;
-
-       if (!irte)
-               return -1;
-
-       for (i = 0; i < MAX_HPET_TBS; i++) {
-               if (ir_hpet[i].id == id) {
-                       sid = (ir_hpet[i].bus << 8) | ir_hpet[i].devfn;
-                       break;
-               }
-       }
-
-       if (sid == 0) {
-               pr_warning("Failed to set source-id of HPET block (%d)\n", id);
-               return -1;
-       }
-
-       /*
-        * Should really use SQ_ALL_16. Some platforms are broken.
-        * While we figure out the right quirks for these broken platforms, use
-        * SQ_13_IGNORE_3 for now.
-        */
-       set_irte_sid(irte, SVT_VERIFY_SID_SQ, SQ_13_IGNORE_3, sid);
-
-       return 0;
-}
-
-int set_msi_sid(struct irte *irte, struct pci_dev *dev)
-{
-       struct pci_dev *bridge;
-
-       if (!irte || !dev)
-               return -1;
-
-       /* PCIe device or Root Complex integrated PCI device */
-       if (pci_is_pcie(dev) || !dev->bus->parent) {
-               set_irte_sid(irte, SVT_VERIFY_SID_SQ, SQ_ALL_16,
-                            (dev->bus->number << 8) | dev->devfn);
-               return 0;
-       }
-
-       bridge = pci_find_upstream_pcie_bridge(dev);
-       if (bridge) {
-               if (pci_is_pcie(bridge))/* this is a PCIe-to-PCI/PCIX bridge */
-                       set_irte_sid(irte, SVT_VERIFY_BUS, SQ_ALL_16,
-                               (bridge->bus->number << 8) | dev->bus->number);
-               else /* this is a legacy PCI bridge */
-                       set_irte_sid(irte, SVT_VERIFY_SID_SQ, SQ_ALL_16,
-                               (bridge->bus->number << 8) | bridge->devfn);
-       }
-
-       return 0;
-}
-
-static void iommu_set_intr_remapping(struct intel_iommu *iommu, int mode)
-{
-       u64 addr;
-       u32 sts;
-       unsigned long flags;
-
-       addr = virt_to_phys((void *)iommu->ir_table->base);
-
-       spin_lock_irqsave(&iommu->register_lock, flags);
-
-       dmar_writeq(iommu->reg + DMAR_IRTA_REG,
-                   (addr) | IR_X2APIC_MODE(mode) | INTR_REMAP_TABLE_REG_SIZE);
-
-       /* Set interrupt-remapping table pointer */
-       iommu->gcmd |= DMA_GCMD_SIRTP;
-       writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
-
-       IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
-                     readl, (sts & DMA_GSTS_IRTPS), sts);
-       spin_unlock_irqrestore(&iommu->register_lock, flags);
-
-       /*
-        * global invalidation of interrupt entry cache before enabling
-        * interrupt-remapping.
-        */
-       qi_global_iec(iommu);
-
-       spin_lock_irqsave(&iommu->register_lock, flags);
-
-       /* Enable interrupt-remapping */
-       iommu->gcmd |= DMA_GCMD_IRE;
-       writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
-
-       IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
-                     readl, (sts & DMA_GSTS_IRES), sts);
-
-       spin_unlock_irqrestore(&iommu->register_lock, flags);
-}
-
-
-static int setup_intr_remapping(struct intel_iommu *iommu, int mode)
-{
-       struct ir_table *ir_table;
-       struct page *pages;
-
-       ir_table = iommu->ir_table = kzalloc(sizeof(struct ir_table),
-                                            GFP_ATOMIC);
-
-       if (!iommu->ir_table)
-               return -ENOMEM;
-
-       pages = alloc_pages_node(iommu->node, GFP_ATOMIC | __GFP_ZERO,
-                                INTR_REMAP_PAGE_ORDER);
-
-       if (!pages) {
-               printk(KERN_ERR "failed to allocate pages of order %d\n",
-                      INTR_REMAP_PAGE_ORDER);
-               kfree(iommu->ir_table);
-               return -ENOMEM;
-       }
-
-       ir_table->base = page_address(pages);
-
-       iommu_set_intr_remapping(iommu, mode);
-       return 0;
-}
-
-/*
- * Disable Interrupt Remapping.
- */
-static void iommu_disable_intr_remapping(struct intel_iommu *iommu)
-{
-       unsigned long flags;
-       u32 sts;
-
-       if (!ecap_ir_support(iommu->ecap))
-               return;
-
-       /*
-        * global invalidation of interrupt entry cache before disabling
-        * interrupt-remapping.
-        */
-       qi_global_iec(iommu);
-
-       spin_lock_irqsave(&iommu->register_lock, flags);
-
-       sts = dmar_readq(iommu->reg + DMAR_GSTS_REG);
-       if (!(sts & DMA_GSTS_IRES))
-               goto end;
-
-       iommu->gcmd &= ~DMA_GCMD_IRE;
-       writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
-
-       IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
-                     readl, !(sts & DMA_GSTS_IRES), sts);
-
-end:
-       spin_unlock_irqrestore(&iommu->register_lock, flags);
-}
-
-int __init intr_remapping_supported(void)
-{
-       struct dmar_drhd_unit *drhd;
-
-       if (disable_intremap)
-               return 0;
-
-       if (!dmar_ir_support())
-               return 0;
-
-       for_each_drhd_unit(drhd) {
-               struct intel_iommu *iommu = drhd->iommu;
-
-               if (!ecap_ir_support(iommu->ecap))
-                       return 0;
-       }
-
-       return 1;
-}
-
-int __init enable_intr_remapping(int eim)
-{
-       struct dmar_drhd_unit *drhd;
-       int setup = 0;
-
-       if (parse_ioapics_under_ir() != 1) {
-               printk(KERN_INFO "Not enable interrupt remapping\n");
-               return -1;
-       }
-
-       for_each_drhd_unit(drhd) {
-               struct intel_iommu *iommu = drhd->iommu;
-
-               /*
-                * If the queued invalidation is already initialized,
-                * shouldn't disable it.
-                */
-               if (iommu->qi)
-                       continue;
-
-               /*
-                * Clear previous faults.
-                */
-               dmar_fault(-1, iommu);
-
-               /*
-                * Disable intr remapping and queued invalidation, if already
-                * enabled prior to OS handover.
-                */
-               iommu_disable_intr_remapping(iommu);
-
-               dmar_disable_qi(iommu);
-       }
-
-       /*
-        * check for the Interrupt-remapping support
-        */
-       for_each_drhd_unit(drhd) {
-               struct intel_iommu *iommu = drhd->iommu;
-
-               if (!ecap_ir_support(iommu->ecap))
-                       continue;
-
-               if (eim && !ecap_eim_support(iommu->ecap)) {
-                       printk(KERN_INFO "DRHD %Lx: EIM not supported by DRHD, "
-                              " ecap %Lx\n", drhd->reg_base_addr, iommu->ecap);
-                       return -1;
-               }
-       }
-
-       /*
-        * Enable queued invalidation for all the DRHD's.
-        */
-       for_each_drhd_unit(drhd) {
-               int ret;
-               struct intel_iommu *iommu = drhd->iommu;
-               ret = dmar_enable_qi(iommu);
-
-               if (ret) {
-                       printk(KERN_ERR "DRHD %Lx: failed to enable queued, "
-                              " invalidation, ecap %Lx, ret %d\n",
-                              drhd->reg_base_addr, iommu->ecap, ret);
-                       return -1;
-               }
-       }
-
-       /*
-        * Setup Interrupt-remapping for all the DRHD's now.
-        */
-       for_each_drhd_unit(drhd) {
-               struct intel_iommu *iommu = drhd->iommu;
-
-               if (!ecap_ir_support(iommu->ecap))
-                       continue;
-
-               if (setup_intr_remapping(iommu, eim))
-                       goto error;
-
-               setup = 1;
-       }
-
-       if (!setup)
-               goto error;
-
-       intr_remapping_enabled = 1;
-
-       return 0;
-
-error:
-       /*
-        * handle error condition gracefully here!
-        */
-       return -1;
-}
-
-static void ir_parse_one_hpet_scope(struct acpi_dmar_device_scope *scope,
-                                     struct intel_iommu *iommu)
-{
-       struct acpi_dmar_pci_path *path;
-       u8 bus;
-       int count;
-
-       bus = scope->bus;
-       path = (struct acpi_dmar_pci_path *)(scope + 1);
-       count = (scope->length - sizeof(struct acpi_dmar_device_scope))
-               / sizeof(struct acpi_dmar_pci_path);
-
-       while (--count > 0) {
-               /*
-                * Access PCI directly due to the PCI
-                * subsystem isn't initialized yet.
-                */
-               bus = read_pci_config_byte(bus, path->dev, path->fn,
-                                          PCI_SECONDARY_BUS);
-               path++;
-       }
-       ir_hpet[ir_hpet_num].bus   = bus;
-       ir_hpet[ir_hpet_num].devfn = PCI_DEVFN(path->dev, path->fn);
-       ir_hpet[ir_hpet_num].iommu = iommu;
-       ir_hpet[ir_hpet_num].id    = scope->enumeration_id;
-       ir_hpet_num++;
-}
-
-static void ir_parse_one_ioapic_scope(struct acpi_dmar_device_scope *scope,
-                                     struct intel_iommu *iommu)
-{
-       struct acpi_dmar_pci_path *path;
-       u8 bus;
-       int count;
-
-       bus = scope->bus;
-       path = (struct acpi_dmar_pci_path *)(scope + 1);
-       count = (scope->length - sizeof(struct acpi_dmar_device_scope))
-               / sizeof(struct acpi_dmar_pci_path);
-
-       while (--count > 0) {
-               /*
-                * Access PCI directly due to the PCI
-                * subsystem isn't initialized yet.
-                */
-               bus = read_pci_config_byte(bus, path->dev, path->fn,
-                                          PCI_SECONDARY_BUS);
-               path++;
-       }
-
-       ir_ioapic[ir_ioapic_num].bus   = bus;
-       ir_ioapic[ir_ioapic_num].devfn = PCI_DEVFN(path->dev, path->fn);
-       ir_ioapic[ir_ioapic_num].iommu = iommu;
-       ir_ioapic[ir_ioapic_num].id    = scope->enumeration_id;
-       ir_ioapic_num++;
-}
-
-static int ir_parse_ioapic_hpet_scope(struct acpi_dmar_header *header,
-                                     struct intel_iommu *iommu)
-{
-       struct acpi_dmar_hardware_unit *drhd;
-       struct acpi_dmar_device_scope *scope;
-       void *start, *end;
-
-       drhd = (struct acpi_dmar_hardware_unit *)header;
-
-       start = (void *)(drhd + 1);
-       end = ((void *)drhd) + header->length;
-
-       while (start < end) {
-               scope = start;
-               if (scope->entry_type == ACPI_DMAR_SCOPE_TYPE_IOAPIC) {
-                       if (ir_ioapic_num == MAX_IO_APICS) {
-                               printk(KERN_WARNING "Exceeded Max IO APICS\n");
-                               return -1;
-                       }
-
-                       printk(KERN_INFO "IOAPIC id %d under DRHD base "
-                              " 0x%Lx IOMMU %d\n", scope->enumeration_id,
-                              drhd->address, iommu->seq_id);
-
-                       ir_parse_one_ioapic_scope(scope, iommu);
-               } else if (scope->entry_type == ACPI_DMAR_SCOPE_TYPE_HPET) {
-                       if (ir_hpet_num == MAX_HPET_TBS) {
-                               printk(KERN_WARNING "Exceeded Max HPET blocks\n");
-                               return -1;
-                       }
-
-                       printk(KERN_INFO "HPET id %d under DRHD base"
-                              " 0x%Lx\n", scope->enumeration_id,
-                              drhd->address);
-
-                       ir_parse_one_hpet_scope(scope, iommu);
-               }
-               start += scope->length;
-       }
-
-       return 0;
-}
-
-/*
- * Finds the assocaition between IOAPIC's and its Interrupt-remapping
- * hardware unit.
- */
-int __init parse_ioapics_under_ir(void)
-{
-       struct dmar_drhd_unit *drhd;
-       int ir_supported = 0;
-
-       for_each_drhd_unit(drhd) {
-               struct intel_iommu *iommu = drhd->iommu;
-
-               if (ecap_ir_support(iommu->ecap)) {
-                       if (ir_parse_ioapic_hpet_scope(drhd->hdr, iommu))
-                               return -1;
-
-                       ir_supported = 1;
-               }
-       }
-
-       if (ir_supported && ir_ioapic_num != nr_ioapics) {
-               printk(KERN_WARNING
-                      "Not all IO-APIC's listed under remapping hardware\n");
-               return -1;
-       }
-
-       return ir_supported;
-}
-
-void disable_intr_remapping(void)
-{
-       struct dmar_drhd_unit *drhd;
-       struct intel_iommu *iommu = NULL;
-
-       /*
-        * Disable Interrupt-remapping for all the DRHD's now.
-        */
-       for_each_iommu(iommu, drhd) {
-               if (!ecap_ir_support(iommu->ecap))
-                       continue;
-
-               iommu_disable_intr_remapping(iommu);
-       }
-}
-
-int reenable_intr_remapping(int eim)
-{
-       struct dmar_drhd_unit *drhd;
-       int setup = 0;
-       struct intel_iommu *iommu = NULL;
-
-       for_each_iommu(iommu, drhd)
-               if (iommu->qi)
-                       dmar_reenable_qi(iommu);
-
-       /*
-        * Setup Interrupt-remapping for all the DRHD's now.
-        */
-       for_each_iommu(iommu, drhd) {
-               if (!ecap_ir_support(iommu->ecap))
-                       continue;
-
-               /* Set up interrupt remapping for iommu.*/
-               iommu_set_intr_remapping(iommu, eim);
-               setup = 1;
-       }
-
-       if (!setup)
-               goto error;
-
-       return 0;
-
-error:
-       /*
-        * handle error condition gracefully here!
-        */
-       return -1;
-}
-
diff --git a/drivers/pci/intr_remapping.h b/drivers/pci/intr_remapping.h
deleted file mode 100644 (file)
index 5662fec..0000000
+++ /dev/null
@@ -1,17 +0,0 @@
-#include <linux/intel-iommu.h>
-
-struct ioapic_scope {
-       struct intel_iommu *iommu;
-       unsigned int id;
-       unsigned int bus;       /* PCI bus number */
-       unsigned int devfn;     /* PCI devfn number */
-};
-
-struct hpet_scope {
-       struct intel_iommu *iommu;
-       u8 id;
-       unsigned int bus;
-       unsigned int devfn;
-};
-
-#define IR_X2APIC_MODE(mode) (mode ? (1 << 11) : 0)
diff --git a/drivers/pci/iova.c b/drivers/pci/iova.c
deleted file mode 100644 (file)
index c5c274a..0000000
+++ /dev/null
@@ -1,435 +0,0 @@
-/*
- * Copyright © 2006-2009, Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
- * Place - Suite 330, Boston, MA 02111-1307 USA.
- *
- * Author: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
- */
-
-#include <linux/iova.h>
-
-void
-init_iova_domain(struct iova_domain *iovad, unsigned long pfn_32bit)
-{
-       spin_lock_init(&iovad->iova_rbtree_lock);
-       iovad->rbroot = RB_ROOT;
-       iovad->cached32_node = NULL;
-       iovad->dma_32bit_pfn = pfn_32bit;
-}
-
-static struct rb_node *
-__get_cached_rbnode(struct iova_domain *iovad, unsigned long *limit_pfn)
-{
-       if ((*limit_pfn != iovad->dma_32bit_pfn) ||
-               (iovad->cached32_node == NULL))
-               return rb_last(&iovad->rbroot);
-       else {
-               struct rb_node *prev_node = rb_prev(iovad->cached32_node);
-               struct iova *curr_iova =
-                       container_of(iovad->cached32_node, struct iova, node);
-               *limit_pfn = curr_iova->pfn_lo - 1;
-               return prev_node;
-       }
-}
-
-static void
-__cached_rbnode_insert_update(struct iova_domain *iovad,
-       unsigned long limit_pfn, struct iova *new)
-{
-       if (limit_pfn != iovad->dma_32bit_pfn)
-               return;
-       iovad->cached32_node = &new->node;
-}
-
-static void
-__cached_rbnode_delete_update(struct iova_domain *iovad, struct iova *free)
-{
-       struct iova *cached_iova;
-       struct rb_node *curr;
-
-       if (!iovad->cached32_node)
-               return;
-       curr = iovad->cached32_node;
-       cached_iova = container_of(curr, struct iova, node);
-
-       if (free->pfn_lo >= cached_iova->pfn_lo) {
-               struct rb_node *node = rb_next(&free->node);
-               struct iova *iova = container_of(node, struct iova, node);
-
-               /* only cache if it's below 32bit pfn */
-               if (node && iova->pfn_lo < iovad->dma_32bit_pfn)
-                       iovad->cached32_node = node;
-               else
-                       iovad->cached32_node = NULL;
-       }
-}
-
-/* Computes the padding size required, to make the
- * the start address naturally aligned on its size
- */
-static int
-iova_get_pad_size(int size, unsigned int limit_pfn)
-{
-       unsigned int pad_size = 0;
-       unsigned int order = ilog2(size);
-
-       if (order)
-               pad_size = (limit_pfn + 1) % (1 << order);
-
-       return pad_size;
-}
-
-static int __alloc_and_insert_iova_range(struct iova_domain *iovad,
-               unsigned long size, unsigned long limit_pfn,
-                       struct iova *new, bool size_aligned)
-{
-       struct rb_node *prev, *curr = NULL;
-       unsigned long flags;
-       unsigned long saved_pfn;
-       unsigned int pad_size = 0;
-
-       /* Walk the tree backwards */
-       spin_lock_irqsave(&iovad->iova_rbtree_lock, flags);
-       saved_pfn = limit_pfn;
-       curr = __get_cached_rbnode(iovad, &limit_pfn);
-       prev = curr;
-       while (curr) {
-               struct iova *curr_iova = container_of(curr, struct iova, node);
-
-               if (limit_pfn < curr_iova->pfn_lo)
-                       goto move_left;
-               else if (limit_pfn < curr_iova->pfn_hi)
-                       goto adjust_limit_pfn;
-               else {
-                       if (size_aligned)
-                               pad_size = iova_get_pad_size(size, limit_pfn);
-                       if ((curr_iova->pfn_hi + size + pad_size) <= limit_pfn)
-                               break;  /* found a free slot */
-               }
-adjust_limit_pfn:
-               limit_pfn = curr_iova->pfn_lo - 1;
-move_left:
-               prev = curr;
-               curr = rb_prev(curr);
-       }
-
-       if (!curr) {
-               if (size_aligned)
-                       pad_size = iova_get_pad_size(size, limit_pfn);
-               if ((IOVA_START_PFN + size + pad_size) > limit_pfn) {
-                       spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
-                       return -ENOMEM;
-               }
-       }
-
-       /* pfn_lo will point to size aligned address if size_aligned is set */
-       new->pfn_lo = limit_pfn - (size + pad_size) + 1;
-       new->pfn_hi = new->pfn_lo + size - 1;
-
-       /* Insert the new_iova into domain rbtree by holding writer lock */
-       /* Add new node and rebalance tree. */
-       {
-               struct rb_node **entry, *parent = NULL;
-
-               /* If we have 'prev', it's a valid place to start the
-                  insertion. Otherwise, start from the root. */
-               if (prev)
-                       entry = &prev;
-               else
-                       entry = &iovad->rbroot.rb_node;
-
-               /* Figure out where to put new node */
-               while (*entry) {
-                       struct iova *this = container_of(*entry,
-                                                       struct iova, node);
-                       parent = *entry;
-
-                       if (new->pfn_lo < this->pfn_lo)
-                               entry = &((*entry)->rb_left);
-                       else if (new->pfn_lo > this->pfn_lo)
-                               entry = &((*entry)->rb_right);
-                       else
-                               BUG(); /* this should not happen */
-               }
-
-               /* Add new node and rebalance tree. */
-               rb_link_node(&new->node, parent, entry);
-               rb_insert_color(&new->node, &iovad->rbroot);
-       }
-       __cached_rbnode_insert_update(iovad, saved_pfn, new);
-
-       spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
-
-
-       return 0;
-}
-
-static void
-iova_insert_rbtree(struct rb_root *root, struct iova *iova)
-{
-       struct rb_node **new = &(root->rb_node), *parent = NULL;
-       /* Figure out where to put new node */
-       while (*new) {
-               struct iova *this = container_of(*new, struct iova, node);
-               parent = *new;
-
-               if (iova->pfn_lo < this->pfn_lo)
-                       new = &((*new)->rb_left);
-               else if (iova->pfn_lo > this->pfn_lo)
-                       new = &((*new)->rb_right);
-               else
-                       BUG(); /* this should not happen */
-       }
-       /* Add new node and rebalance tree. */
-       rb_link_node(&iova->node, parent, new);
-       rb_insert_color(&iova->node, root);
-}
-
-/**
- * alloc_iova - allocates an iova
- * @iovad - iova domain in question
- * @size - size of page frames to allocate
- * @limit_pfn - max limit address
- * @size_aligned - set if size_aligned address range is required
- * This function allocates an iova in the range limit_pfn to IOVA_START_PFN
- * looking from limit_pfn instead from IOVA_START_PFN. If the size_aligned
- * flag is set then the allocated address iova->pfn_lo will be naturally
- * aligned on roundup_power_of_two(size).
- */
-struct iova *
-alloc_iova(struct iova_domain *iovad, unsigned long size,
-       unsigned long limit_pfn,
-       bool size_aligned)
-{
-       struct iova *new_iova;
-       int ret;
-
-       new_iova = alloc_iova_mem();
-       if (!new_iova)
-               return NULL;
-
-       /* If size aligned is set then round the size to
-        * to next power of two.
-        */
-       if (size_aligned)
-               size = __roundup_pow_of_two(size);
-
-       ret = __alloc_and_insert_iova_range(iovad, size, limit_pfn,
-                       new_iova, size_aligned);
-
-       if (ret) {
-               free_iova_mem(new_iova);
-               return NULL;
-       }
-
-       return new_iova;
-}
-
-/**
- * find_iova - find's an iova for a given pfn
- * @iovad - iova domain in question.
- * pfn - page frame number
- * This function finds and returns an iova belonging to the
- * given doamin which matches the given pfn.
- */
-struct iova *find_iova(struct iova_domain *iovad, unsigned long pfn)
-{
-       unsigned long flags;
-       struct rb_node *node;
-
-       /* Take the lock so that no other thread is manipulating the rbtree */
-       spin_lock_irqsave(&iovad->iova_rbtree_lock, flags);
-       node = iovad->rbroot.rb_node;
-       while (node) {
-               struct iova *iova = container_of(node, struct iova, node);
-
-               /* If pfn falls within iova's range, return iova */
-               if ((pfn >= iova->pfn_lo) && (pfn <= iova->pfn_hi)) {
-                       spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
-                       /* We are not holding the lock while this iova
-                        * is referenced by the caller as the same thread
-                        * which called this function also calls __free_iova()
-                        * and it is by desing that only one thread can possibly
-                        * reference a particular iova and hence no conflict.
-                        */
-                       return iova;
-               }
-
-               if (pfn < iova->pfn_lo)
-                       node = node->rb_left;
-               else if (pfn > iova->pfn_lo)
-                       node = node->rb_right;
-       }
-
-       spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
-       return NULL;
-}
-
-/**
- * __free_iova - frees the given iova
- * @iovad: iova domain in question.
- * @iova: iova in question.
- * Frees the given iova belonging to the giving domain
- */
-void
-__free_iova(struct iova_domain *iovad, struct iova *iova)
-{
-       unsigned long flags;
-
-       spin_lock_irqsave(&iovad->iova_rbtree_lock, flags);
-       __cached_rbnode_delete_update(iovad, iova);
-       rb_erase(&iova->node, &iovad->rbroot);
-       spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
-       free_iova_mem(iova);
-}
-
-/**
- * free_iova - finds and frees the iova for a given pfn
- * @iovad: - iova domain in question.
- * @pfn: - pfn that is allocated previously
- * This functions finds an iova for a given pfn and then
- * frees the iova from that domain.
- */
-void
-free_iova(struct iova_domain *iovad, unsigned long pfn)
-{
-       struct iova *iova = find_iova(iovad, pfn);
-       if (iova)
-               __free_iova(iovad, iova);
-
-}
-
-/**
- * put_iova_domain - destroys the iova doamin
- * @iovad: - iova domain in question.
- * All the iova's in that domain are destroyed.
- */
-void put_iova_domain(struct iova_domain *iovad)
-{
-       struct rb_node *node;
-       unsigned long flags;
-
-       spin_lock_irqsave(&iovad->iova_rbtree_lock, flags);
-       node = rb_first(&iovad->rbroot);
-       while (node) {
-               struct iova *iova = container_of(node, struct iova, node);
-               rb_erase(node, &iovad->rbroot);
-               free_iova_mem(iova);
-               node = rb_first(&iovad->rbroot);
-       }
-       spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
-}
-
-static int
-__is_range_overlap(struct rb_node *node,
-       unsigned long pfn_lo, unsigned long pfn_hi)
-{
-       struct iova *iova = container_of(node, struct iova, node);
-
-       if ((pfn_lo <= iova->pfn_hi) && (pfn_hi >= iova->pfn_lo))
-               return 1;
-       return 0;
-}
-
-static struct iova *
-__insert_new_range(struct iova_domain *iovad,
-       unsigned long pfn_lo, unsigned long pfn_hi)
-{
-       struct iova *iova;
-
-       iova = alloc_iova_mem();
-       if (!iova)
-               return iova;
-
-       iova->pfn_hi = pfn_hi;
-       iova->pfn_lo = pfn_lo;
-       iova_insert_rbtree(&iovad->rbroot, iova);
-       return iova;
-}
-
-static void
-__adjust_overlap_range(struct iova *iova,
-       unsigned long *pfn_lo, unsigned long *pfn_hi)
-{
-       if (*pfn_lo < iova->pfn_lo)
-               iova->pfn_lo = *pfn_lo;
-       if (*pfn_hi > iova->pfn_hi)
-               *pfn_lo = iova->pfn_hi + 1;
-}
-
-/**
- * reserve_iova - reserves an iova in the given range
- * @iovad: - iova domain pointer
- * @pfn_lo: - lower page frame address
- * @pfn_hi:- higher pfn adderss
- * This function allocates reserves the address range from pfn_lo to pfn_hi so
- * that this address is not dished out as part of alloc_iova.
- */
-struct iova *
-reserve_iova(struct iova_domain *iovad,
-       unsigned long pfn_lo, unsigned long pfn_hi)
-{
-       struct rb_node *node;
-       unsigned long flags;
-       struct iova *iova;
-       unsigned int overlap = 0;
-
-       spin_lock_irqsave(&iovad->iova_rbtree_lock, flags);
-       for (node = rb_first(&iovad->rbroot); node; node = rb_next(node)) {
-               if (__is_range_overlap(node, pfn_lo, pfn_hi)) {
-                       iova = container_of(node, struct iova, node);
-                       __adjust_overlap_range(iova, &pfn_lo, &pfn_hi);
-                       if ((pfn_lo >= iova->pfn_lo) &&
-                               (pfn_hi <= iova->pfn_hi))
-                               goto finish;
-                       overlap = 1;
-
-               } else if (overlap)
-                               break;
-       }
-
-       /* We are here either because this is the first reserver node
-        * or need to insert remaining non overlap addr range
-        */
-       iova = __insert_new_range(iovad, pfn_lo, pfn_hi);
-finish:
-
-       spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
-       return iova;
-}
-
-/**
- * copy_reserved_iova - copies the reserved between domains
- * @from: - source doamin from where to copy
- * @to: - destination domin where to copy
- * This function copies reserved iova's from one doamin to
- * other.
- */
-void
-copy_reserved_iova(struct iova_domain *from, struct iova_domain *to)
-{
-       unsigned long flags;
-       struct rb_node *node;
-
-       spin_lock_irqsave(&from->iova_rbtree_lock, flags);
-       for (node = rb_first(&from->rbroot); node; node = rb_next(node)) {
-               struct iova *iova = container_of(node, struct iova, node);
-               struct iova *new_iova;
-               new_iova = reserve_iova(to, iova->pfn_lo, iova->pfn_hi);
-               if (!new_iova)
-                       printk(KERN_ERR "Reserve iova range %lx@%lx failed\n",
-                               iova->pfn_lo, iova->pfn_lo);
-       }
-       spin_unlock_irqrestore(&from->iova_rbtree_lock, flags);
-}
index 731e20265ace428114b4ba217470fb5868908ad1..b7bf11dd546a9a7d8059075ab8ba5bf5166eedee 100644 (file)
@@ -184,8 +184,6 @@ pci_match_one_device(const struct pci_device_id *id, const struct pci_dev *dev)
        return NULL;
 }
 
-struct pci_dev *pci_find_upstream_pcie_bridge(struct pci_dev *pdev);
-
 /* PCI slot sysfs helper code */
 #define to_pci_slot(s) container_of(s, struct pci_slot, kobj)
 
index c446b5ca2d38e0e58b650b217bec0f0f90945dd7..970bfe0941c37061e32f43a3a3773d4d2f562f43 100644 (file)
@@ -1589,5 +1589,16 @@ int pci_vpd_find_tag(const u8 *buf, unsigned int off, unsigned int len, u8 rdt);
 int pci_vpd_find_info_keyword(const u8 *buf, unsigned int off,
                              unsigned int len, const char *kw);
 
+/**
+ * pci_find_upstream_pcie_bridge - find upstream PCIe-to-PCI bridge of a device
+ * @pdev: the PCI device
+ *
+ * if the device is PCIE, return NULL
+ * if the device isn't connected to a PCIe bridge (that is its parent is a
+ * legacy PCI bridge and the bridge is directly connected to bus 0), return its
+ * parent
+ */
+struct pci_dev *pci_find_upstream_pcie_bridge(struct pci_dev *pdev);
+
 #endif /* __KERNEL__ */
 #endif /* LINUX_PCI_H */