2 * Copyright © 2006-2014 Intel Corporation.
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * Authors: David Woodhouse <dwmw2@infradead.org>,
14 * Ashok Raj <ashok.raj@intel.com>,
15 * Shaohua Li <shaohua.li@intel.com>,
16 * Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>,
17 * Fenghua Yu <fenghua.yu@intel.com>
18 * Joerg Roedel <jroedel@suse.de>
21 #define pr_fmt(fmt) "DMAR: " fmt
23 #include <linux/init.h>
24 #include <linux/bitmap.h>
25 #include <linux/debugfs.h>
26 #include <linux/export.h>
27 #include <linux/slab.h>
28 #include <linux/irq.h>
29 #include <linux/interrupt.h>
30 #include <linux/spinlock.h>
31 #include <linux/pci.h>
32 #include <linux/dmar.h>
33 #include <linux/dma-mapping.h>
34 #include <linux/mempool.h>
35 #include <linux/memory.h>
36 #include <linux/cpu.h>
37 #include <linux/timer.h>
39 #include <linux/iova.h>
40 #include <linux/iommu.h>
41 #include <linux/intel-iommu.h>
42 #include <linux/syscore_ops.h>
43 #include <linux/tboot.h>
44 #include <linux/dmi.h>
45 #include <linux/pci-ats.h>
46 #include <linux/memblock.h>
47 #include <linux/dma-contiguous.h>
48 #include <linux/crash_dump.h>
49 #include <asm/irq_remapping.h>
50 #include <asm/cacheflush.h>
51 #include <asm/iommu.h>
53 #include "irq_remapping.h"
55 #define ROOT_SIZE VTD_PAGE_SIZE
56 #define CONTEXT_SIZE VTD_PAGE_SIZE
58 #define IS_GFX_DEVICE(pdev) ((pdev->class >> 16) == PCI_BASE_CLASS_DISPLAY)
59 #define IS_USB_DEVICE(pdev) ((pdev->class >> 8) == PCI_CLASS_SERIAL_USB)
60 #define IS_ISA_DEVICE(pdev) ((pdev->class >> 8) == PCI_CLASS_BRIDGE_ISA)
61 #define IS_AZALIA(pdev) ((pdev)->vendor == 0x8086 && (pdev)->device == 0x3a3e)
63 #define IOAPIC_RANGE_START (0xfee00000)
64 #define IOAPIC_RANGE_END (0xfeefffff)
65 #define IOVA_START_ADDR (0x1000)
67 #define DEFAULT_DOMAIN_ADDRESS_WIDTH 48
69 #define MAX_AGAW_WIDTH 64
70 #define MAX_AGAW_PFN_WIDTH (MAX_AGAW_WIDTH - VTD_PAGE_SHIFT)
72 #define __DOMAIN_MAX_PFN(gaw) ((((uint64_t)1) << (gaw-VTD_PAGE_SHIFT)) - 1)
73 #define __DOMAIN_MAX_ADDR(gaw) ((((uint64_t)1) << gaw) - 1)
75 /* We limit DOMAIN_MAX_PFN to fit in an unsigned long, and DOMAIN_MAX_ADDR
76 to match. That way, we can use 'unsigned long' for PFNs with impunity. */
77 #define DOMAIN_MAX_PFN(gaw) ((unsigned long) min_t(uint64_t, \
78 __DOMAIN_MAX_PFN(gaw), (unsigned long)-1))
79 #define DOMAIN_MAX_ADDR(gaw) (((uint64_t)__DOMAIN_MAX_PFN(gaw)) << VTD_PAGE_SHIFT)
81 /* IO virtual address start page frame number */
82 #define IOVA_START_PFN (1)
84 #define IOVA_PFN(addr) ((addr) >> PAGE_SHIFT)
86 /* page table handling */
87 #define LEVEL_STRIDE (9)
88 #define LEVEL_MASK (((u64)1 << LEVEL_STRIDE) - 1)
91 * This bitmap is used to advertise the page sizes our hardware support
92 * to the IOMMU core, which will then use this information to split
93 * physically contiguous memory regions it is mapping into page sizes
96 * Traditionally the IOMMU core just handed us the mappings directly,
97 * after making sure the size is an order of a 4KiB page and that the
98 * mapping has natural alignment.
100 * To retain this behavior, we currently advertise that we support
101 * all page sizes that are an order of 4KiB.
103 * If at some point we'd like to utilize the IOMMU core's new behavior,
104 * we could change this to advertise the real page sizes we support.
106 #define INTEL_IOMMU_PGSIZES (~0xFFFUL)
108 static inline int agaw_to_level(int agaw)
113 static inline int agaw_to_width(int agaw)
115 return min_t(int, 30 + agaw * LEVEL_STRIDE, MAX_AGAW_WIDTH);
118 static inline int width_to_agaw(int width)
120 return DIV_ROUND_UP(width - 30, LEVEL_STRIDE);
123 static inline unsigned int level_to_offset_bits(int level)
125 return (level - 1) * LEVEL_STRIDE;
128 static inline int pfn_level_offset(unsigned long pfn, int level)
130 return (pfn >> level_to_offset_bits(level)) & LEVEL_MASK;
133 static inline unsigned long level_mask(int level)
135 return -1UL << level_to_offset_bits(level);
138 static inline unsigned long level_size(int level)
140 return 1UL << level_to_offset_bits(level);
143 static inline unsigned long align_to_level(unsigned long pfn, int level)
145 return (pfn + level_size(level) - 1) & level_mask(level);
148 static inline unsigned long lvl_to_nr_pages(unsigned int lvl)
150 return 1 << min_t(int, (lvl - 1) * LEVEL_STRIDE, MAX_AGAW_PFN_WIDTH);
153 /* VT-d pages must always be _smaller_ than MM pages. Otherwise things
154 are never going to work. */
155 static inline unsigned long dma_to_mm_pfn(unsigned long dma_pfn)
157 return dma_pfn >> (PAGE_SHIFT - VTD_PAGE_SHIFT);
160 static inline unsigned long mm_to_dma_pfn(unsigned long mm_pfn)
162 return mm_pfn << (PAGE_SHIFT - VTD_PAGE_SHIFT);
164 static inline unsigned long page_to_dma_pfn(struct page *pg)
166 return mm_to_dma_pfn(page_to_pfn(pg));
168 static inline unsigned long virt_to_dma_pfn(void *p)
170 return page_to_dma_pfn(virt_to_page(p));
173 /* global iommu list, set NULL for ignored DMAR units */
174 static struct intel_iommu **g_iommus;
176 static void __init check_tylersburg_isoch(void);
177 static int rwbf_quirk;
180 * set to 1 to panic kernel if can't successfully enable VT-d
181 * (used when kernel is launched w/ TXT)
183 static int force_on = 0;
184 int intel_iommu_tboot_noforce;
189 * 12-63: Context Ptr (12 - (haw-1))
196 #define ROOT_ENTRY_NR (VTD_PAGE_SIZE/sizeof(struct root_entry))
199 * Take a root_entry and return the Lower Context Table Pointer (LCTP)
202 static phys_addr_t root_entry_lctp(struct root_entry *re)
207 return re->lo & VTD_PAGE_MASK;
211 * Take a root_entry and return the Upper Context Table Pointer (UCTP)
214 static phys_addr_t root_entry_uctp(struct root_entry *re)
219 return re->hi & VTD_PAGE_MASK;
224 * 1: fault processing disable
225 * 2-3: translation type
226 * 12-63: address space root
232 struct context_entry {
237 static inline void context_clear_pasid_enable(struct context_entry *context)
239 context->lo &= ~(1ULL << 11);
242 static inline bool context_pasid_enabled(struct context_entry *context)
244 return !!(context->lo & (1ULL << 11));
247 static inline void context_set_copied(struct context_entry *context)
249 context->hi |= (1ull << 3);
252 static inline bool context_copied(struct context_entry *context)
254 return !!(context->hi & (1ULL << 3));
257 static inline bool __context_present(struct context_entry *context)
259 return (context->lo & 1);
262 static inline bool context_present(struct context_entry *context)
264 return context_pasid_enabled(context) ?
265 __context_present(context) :
266 __context_present(context) && !context_copied(context);
269 static inline void context_set_present(struct context_entry *context)
274 static inline void context_set_fault_enable(struct context_entry *context)
276 context->lo &= (((u64)-1) << 2) | 1;
279 static inline void context_set_translation_type(struct context_entry *context,
282 context->lo &= (((u64)-1) << 4) | 3;
283 context->lo |= (value & 3) << 2;
286 static inline void context_set_address_root(struct context_entry *context,
289 context->lo &= ~VTD_PAGE_MASK;
290 context->lo |= value & VTD_PAGE_MASK;
293 static inline void context_set_address_width(struct context_entry *context,
296 context->hi |= value & 7;
299 static inline void context_set_domain_id(struct context_entry *context,
302 context->hi |= (value & ((1 << 16) - 1)) << 8;
305 static inline int context_domain_id(struct context_entry *c)
307 return((c->hi >> 8) & 0xffff);
310 static inline void context_clear_entry(struct context_entry *context)
323 * 12-63: Host physcial address
329 static inline void dma_clear_pte(struct dma_pte *pte)
334 static inline u64 dma_pte_addr(struct dma_pte *pte)
337 return pte->val & VTD_PAGE_MASK;
339 /* Must have a full atomic 64-bit read */
340 return __cmpxchg64(&pte->val, 0ULL, 0ULL) & VTD_PAGE_MASK;
344 static inline bool dma_pte_present(struct dma_pte *pte)
346 return (pte->val & 3) != 0;
349 static inline bool dma_pte_superpage(struct dma_pte *pte)
351 return (pte->val & DMA_PTE_LARGE_PAGE);
354 static inline int first_pte_in_page(struct dma_pte *pte)
356 return !((unsigned long)pte & ~VTD_PAGE_MASK);
360 * This domain is a statically identity mapping domain.
361 * 1. This domain creats a static 1:1 mapping to all usable memory.
362 * 2. It maps to each iommu if successful.
363 * 3. Each iommu mapps to this domain if successful.
365 static struct dmar_domain *si_domain;
366 static int hw_pass_through = 1;
369 * Domain represents a virtual machine, more than one devices
370 * across iommus may be owned in one domain, e.g. kvm guest.
372 #define DOMAIN_FLAG_VIRTUAL_MACHINE (1 << 0)
374 /* si_domain contains mulitple devices */
375 #define DOMAIN_FLAG_STATIC_IDENTITY (1 << 1)
377 #define for_each_domain_iommu(idx, domain) \
378 for (idx = 0; idx < g_num_of_iommus; idx++) \
379 if (domain->iommu_refcnt[idx])
382 int nid; /* node id */
384 unsigned iommu_refcnt[DMAR_UNITS_SUPPORTED];
385 /* Refcount of devices per iommu */
388 u16 iommu_did[DMAR_UNITS_SUPPORTED];
389 /* Domain ids per IOMMU. Use u16 since
390 * domain ids are 16 bit wide according
391 * to VT-d spec, section 9.3 */
393 bool has_iotlb_device;
394 struct list_head devices; /* all devices' list */
395 struct iova_domain iovad; /* iova's that belong to this domain */
397 struct dma_pte *pgd; /* virtual address */
398 int gaw; /* max guest address width */
400 /* adjusted guest address width, 0 is level 2 30-bit */
403 int flags; /* flags to find out type of domain */
405 int iommu_coherency;/* indicate coherency of iommu access */
406 int iommu_snooping; /* indicate snooping control feature*/
407 int iommu_count; /* reference count of iommu */
408 int iommu_superpage;/* Level of superpages supported:
409 0 == 4KiB (no superpages), 1 == 2MiB,
410 2 == 1GiB, 3 == 512GiB, 4 == 1TiB */
411 u64 max_addr; /* maximum mapped address */
413 struct iommu_domain domain; /* generic domain data structure for
417 /* PCI domain-device relationship */
418 struct device_domain_info {
419 struct list_head link; /* link to domain siblings */
420 struct list_head global; /* link to global list */
421 u8 bus; /* PCI bus number */
422 u8 devfn; /* PCI devfn number */
423 u8 pasid_supported:3;
430 struct device *dev; /* it's NULL for PCIe-to-PCI bridge */
431 struct intel_iommu *iommu; /* IOMMU used by this device */
432 struct dmar_domain *domain; /* pointer to domain */
435 struct dmar_rmrr_unit {
436 struct list_head list; /* list of rmrr units */
437 struct acpi_dmar_header *hdr; /* ACPI header */
438 u64 base_address; /* reserved base address*/
439 u64 end_address; /* reserved end address */
440 struct dmar_dev_scope *devices; /* target devices */
441 int devices_cnt; /* target device count */
442 struct iommu_resv_region *resv; /* reserved region handle */
445 struct dmar_atsr_unit {
446 struct list_head list; /* list of ATSR units */
447 struct acpi_dmar_header *hdr; /* ACPI header */
448 struct dmar_dev_scope *devices; /* target devices */
449 int devices_cnt; /* target device count */
450 u8 include_all:1; /* include all ports */
453 static LIST_HEAD(dmar_atsr_units);
454 static LIST_HEAD(dmar_rmrr_units);
456 #define for_each_rmrr_units(rmrr) \
457 list_for_each_entry(rmrr, &dmar_rmrr_units, list)
459 /* bitmap for indexing intel_iommus */
460 static int g_num_of_iommus;
462 static void domain_exit(struct dmar_domain *domain);
463 static void domain_remove_dev_info(struct dmar_domain *domain);
464 static void dmar_remove_one_dev_info(struct dmar_domain *domain,
466 static void __dmar_remove_one_dev_info(struct device_domain_info *info);
467 static void domain_context_clear(struct intel_iommu *iommu,
469 static int domain_detach_iommu(struct dmar_domain *domain,
470 struct intel_iommu *iommu);
472 #ifdef CONFIG_INTEL_IOMMU_DEFAULT_ON
473 int dmar_disabled = 0;
475 int dmar_disabled = 1;
476 #endif /*CONFIG_INTEL_IOMMU_DEFAULT_ON*/
478 int intel_iommu_enabled = 0;
479 EXPORT_SYMBOL_GPL(intel_iommu_enabled);
481 static int dmar_map_gfx = 1;
482 static int dmar_forcedac;
483 static int intel_iommu_strict;
484 static int intel_iommu_superpage = 1;
485 static int intel_iommu_ecs = 1;
486 static int intel_iommu_pasid28;
487 static int iommu_identity_mapping;
489 #define IDENTMAP_ALL 1
490 #define IDENTMAP_GFX 2
491 #define IDENTMAP_AZALIA 4
493 /* Broadwell and Skylake have broken ECS support — normal so-called "second
494 * level" translation of DMA requests-without-PASID doesn't actually happen
495 * unless you also set the NESTE bit in an extended context-entry. Which of
496 * course means that SVM doesn't work because it's trying to do nested
497 * translation of the physical addresses it finds in the process page tables,
498 * through the IOVA->phys mapping found in the "second level" page tables.
500 * The VT-d specification was retroactively changed to change the definition
501 * of the capability bits and pretend that Broadwell/Skylake never happened...
502 * but unfortunately the wrong bit was changed. It's ECS which is broken, but
503 * for some reason it was the PASID capability bit which was redefined (from
504 * bit 28 on BDW/SKL to bit 40 in future).
506 * So our test for ECS needs to eschew those implementations which set the old
507 * PASID capabiity bit 28, since those are the ones on which ECS is broken.
508 * Unless we are working around the 'pasid28' limitations, that is, by putting
509 * the device into passthrough mode for normal DMA and thus masking the bug.
511 #define ecs_enabled(iommu) (intel_iommu_ecs && ecap_ecs(iommu->ecap) && \
512 (intel_iommu_pasid28 || !ecap_broken_pasid(iommu->ecap)))
513 /* PASID support is thus enabled if ECS is enabled and *either* of the old
514 * or new capability bits are set. */
515 #define pasid_enabled(iommu) (ecs_enabled(iommu) && \
516 (ecap_pasid(iommu->ecap) || ecap_broken_pasid(iommu->ecap)))
518 int intel_iommu_gfx_mapped;
519 EXPORT_SYMBOL_GPL(intel_iommu_gfx_mapped);
521 #define DUMMY_DEVICE_DOMAIN_INFO ((struct device_domain_info *)(-1))
522 static DEFINE_SPINLOCK(device_domain_lock);
523 static LIST_HEAD(device_domain_list);
525 const struct iommu_ops intel_iommu_ops;
527 static bool translation_pre_enabled(struct intel_iommu *iommu)
529 return (iommu->flags & VTD_FLAG_TRANS_PRE_ENABLED);
532 static void clear_translation_pre_enabled(struct intel_iommu *iommu)
534 iommu->flags &= ~VTD_FLAG_TRANS_PRE_ENABLED;
537 static void init_translation_status(struct intel_iommu *iommu)
541 gsts = readl(iommu->reg + DMAR_GSTS_REG);
542 if (gsts & DMA_GSTS_TES)
543 iommu->flags |= VTD_FLAG_TRANS_PRE_ENABLED;
546 /* Convert generic 'struct iommu_domain to private struct dmar_domain */
547 static struct dmar_domain *to_dmar_domain(struct iommu_domain *dom)
549 return container_of(dom, struct dmar_domain, domain);
552 static int __init intel_iommu_setup(char *str)
557 if (!strncmp(str, "on", 2)) {
559 pr_info("IOMMU enabled\n");
560 } else if (!strncmp(str, "off", 3)) {
562 pr_info("IOMMU disabled\n");
563 } else if (!strncmp(str, "igfx_off", 8)) {
565 pr_info("Disable GFX device mapping\n");
566 } else if (!strncmp(str, "forcedac", 8)) {
567 pr_info("Forcing DAC for PCI devices\n");
569 } else if (!strncmp(str, "strict", 6)) {
570 pr_info("Disable batched IOTLB flush\n");
571 intel_iommu_strict = 1;
572 } else if (!strncmp(str, "sp_off", 6)) {
573 pr_info("Disable supported super page\n");
574 intel_iommu_superpage = 0;
575 } else if (!strncmp(str, "ecs_off", 7)) {
577 "Intel-IOMMU: disable extended context table support\n");
579 } else if (!strncmp(str, "pasid28", 7)) {
581 "Intel-IOMMU: enable pre-production PASID support\n");
582 intel_iommu_pasid28 = 1;
583 iommu_identity_mapping |= IDENTMAP_GFX;
584 } else if (!strncmp(str, "tboot_noforce", 13)) {
586 "Intel-IOMMU: not forcing on after tboot. This could expose security risk for tboot\n");
587 intel_iommu_tboot_noforce = 1;
590 str += strcspn(str, ",");
596 __setup("intel_iommu=", intel_iommu_setup);
598 static struct kmem_cache *iommu_domain_cache;
599 static struct kmem_cache *iommu_devinfo_cache;
601 static struct dmar_domain* get_iommu_domain(struct intel_iommu *iommu, u16 did)
603 struct dmar_domain **domains;
606 domains = iommu->domains[idx];
610 return domains[did & 0xff];
613 static void set_iommu_domain(struct intel_iommu *iommu, u16 did,
614 struct dmar_domain *domain)
616 struct dmar_domain **domains;
619 if (!iommu->domains[idx]) {
620 size_t size = 256 * sizeof(struct dmar_domain *);
621 iommu->domains[idx] = kzalloc(size, GFP_ATOMIC);
624 domains = iommu->domains[idx];
625 if (WARN_ON(!domains))
628 domains[did & 0xff] = domain;
631 static inline void *alloc_pgtable_page(int node)
636 page = alloc_pages_node(node, GFP_ATOMIC | __GFP_ZERO, 0);
638 vaddr = page_address(page);
642 static inline void free_pgtable_page(void *vaddr)
644 free_page((unsigned long)vaddr);
647 static inline void *alloc_domain_mem(void)
649 return kmem_cache_alloc(iommu_domain_cache, GFP_ATOMIC);
652 static void free_domain_mem(void *vaddr)
654 kmem_cache_free(iommu_domain_cache, vaddr);
657 static inline void * alloc_devinfo_mem(void)
659 return kmem_cache_alloc(iommu_devinfo_cache, GFP_ATOMIC);
662 static inline void free_devinfo_mem(void *vaddr)
664 kmem_cache_free(iommu_devinfo_cache, vaddr);
667 static inline int domain_type_is_vm(struct dmar_domain *domain)
669 return domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE;
672 static inline int domain_type_is_si(struct dmar_domain *domain)
674 return domain->flags & DOMAIN_FLAG_STATIC_IDENTITY;
677 static inline int domain_type_is_vm_or_si(struct dmar_domain *domain)
679 return domain->flags & (DOMAIN_FLAG_VIRTUAL_MACHINE |
680 DOMAIN_FLAG_STATIC_IDENTITY);
683 static inline int domain_pfn_supported(struct dmar_domain *domain,
686 int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT;
688 return !(addr_width < BITS_PER_LONG && pfn >> addr_width);
691 static int __iommu_calculate_agaw(struct intel_iommu *iommu, int max_gaw)
696 sagaw = cap_sagaw(iommu->cap);
697 for (agaw = width_to_agaw(max_gaw);
699 if (test_bit(agaw, &sagaw))
707 * Calculate max SAGAW for each iommu.
709 int iommu_calculate_max_sagaw(struct intel_iommu *iommu)
711 return __iommu_calculate_agaw(iommu, MAX_AGAW_WIDTH);
715 * calculate agaw for each iommu.
716 * "SAGAW" may be different across iommus, use a default agaw, and
717 * get a supported less agaw for iommus that don't support the default agaw.
719 int iommu_calculate_agaw(struct intel_iommu *iommu)
721 return __iommu_calculate_agaw(iommu, DEFAULT_DOMAIN_ADDRESS_WIDTH);
724 /* This functionin only returns single iommu in a domain */
725 static struct intel_iommu *domain_get_iommu(struct dmar_domain *domain)
729 /* si_domain and vm domain should not get here. */
730 BUG_ON(domain_type_is_vm_or_si(domain));
731 for_each_domain_iommu(iommu_id, domain)
734 if (iommu_id < 0 || iommu_id >= g_num_of_iommus)
737 return g_iommus[iommu_id];
740 static void domain_update_iommu_coherency(struct dmar_domain *domain)
742 struct dmar_drhd_unit *drhd;
743 struct intel_iommu *iommu;
747 domain->iommu_coherency = 1;
749 for_each_domain_iommu(i, domain) {
751 if (!ecap_coherent(g_iommus[i]->ecap)) {
752 domain->iommu_coherency = 0;
759 /* No hardware attached; use lowest common denominator */
761 for_each_active_iommu(iommu, drhd) {
762 if (!ecap_coherent(iommu->ecap)) {
763 domain->iommu_coherency = 0;
770 static int domain_update_iommu_snooping(struct intel_iommu *skip)
772 struct dmar_drhd_unit *drhd;
773 struct intel_iommu *iommu;
777 for_each_active_iommu(iommu, drhd) {
779 if (!ecap_sc_support(iommu->ecap)) {
790 static int domain_update_iommu_superpage(struct intel_iommu *skip)
792 struct dmar_drhd_unit *drhd;
793 struct intel_iommu *iommu;
796 if (!intel_iommu_superpage) {
800 /* set iommu_superpage to the smallest common denominator */
802 for_each_active_iommu(iommu, drhd) {
804 mask &= cap_super_page_val(iommu->cap);
814 /* Some capabilities may be different across iommus */
815 static void domain_update_iommu_cap(struct dmar_domain *domain)
817 domain_update_iommu_coherency(domain);
818 domain->iommu_snooping = domain_update_iommu_snooping(NULL);
819 domain->iommu_superpage = domain_update_iommu_superpage(NULL);
822 static inline struct context_entry *iommu_context_addr(struct intel_iommu *iommu,
823 u8 bus, u8 devfn, int alloc)
825 struct root_entry *root = &iommu->root_entry[bus];
826 struct context_entry *context;
830 if (ecs_enabled(iommu)) {
838 context = phys_to_virt(*entry & VTD_PAGE_MASK);
840 unsigned long phy_addr;
844 context = alloc_pgtable_page(iommu->node);
848 __iommu_flush_cache(iommu, (void *)context, CONTEXT_SIZE);
849 phy_addr = virt_to_phys((void *)context);
850 *entry = phy_addr | 1;
851 __iommu_flush_cache(iommu, entry, sizeof(*entry));
853 return &context[devfn];
856 static int iommu_dummy(struct device *dev)
858 return dev->archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO;
861 static struct intel_iommu *device_to_iommu(struct device *dev, u8 *bus, u8 *devfn)
863 struct dmar_drhd_unit *drhd = NULL;
864 struct intel_iommu *iommu;
866 struct pci_dev *ptmp, *pdev = NULL;
870 if (iommu_dummy(dev))
873 if (dev_is_pci(dev)) {
874 struct pci_dev *pf_pdev;
876 pdev = to_pci_dev(dev);
879 /* VMD child devices currently cannot be handled individually */
880 if (is_vmd(pdev->bus))
884 /* VFs aren't listed in scope tables; we need to look up
885 * the PF instead to find the IOMMU. */
886 pf_pdev = pci_physfn(pdev);
888 segment = pci_domain_nr(pdev->bus);
889 } else if (has_acpi_companion(dev))
890 dev = &ACPI_COMPANION(dev)->dev;
893 for_each_active_iommu(iommu, drhd) {
894 if (pdev && segment != drhd->segment)
897 for_each_active_dev_scope(drhd->devices,
898 drhd->devices_cnt, i, tmp) {
900 /* For a VF use its original BDF# not that of the PF
901 * which we used for the IOMMU lookup. Strictly speaking
902 * we could do this for all PCI devices; we only need to
903 * get the BDF# from the scope table for ACPI matches. */
904 if (pdev && pdev->is_virtfn)
907 *bus = drhd->devices[i].bus;
908 *devfn = drhd->devices[i].devfn;
912 if (!pdev || !dev_is_pci(tmp))
915 ptmp = to_pci_dev(tmp);
916 if (ptmp->subordinate &&
917 ptmp->subordinate->number <= pdev->bus->number &&
918 ptmp->subordinate->busn_res.end >= pdev->bus->number)
922 if (pdev && drhd->include_all) {
924 *bus = pdev->bus->number;
925 *devfn = pdev->devfn;
936 static void domain_flush_cache(struct dmar_domain *domain,
937 void *addr, int size)
939 if (!domain->iommu_coherency)
940 clflush_cache_range(addr, size);
943 static int device_context_mapped(struct intel_iommu *iommu, u8 bus, u8 devfn)
945 struct context_entry *context;
949 spin_lock_irqsave(&iommu->lock, flags);
950 context = iommu_context_addr(iommu, bus, devfn, 0);
952 ret = context_present(context);
953 spin_unlock_irqrestore(&iommu->lock, flags);
957 static void free_context_table(struct intel_iommu *iommu)
961 struct context_entry *context;
963 spin_lock_irqsave(&iommu->lock, flags);
964 if (!iommu->root_entry) {
967 for (i = 0; i < ROOT_ENTRY_NR; i++) {
968 context = iommu_context_addr(iommu, i, 0, 0);
970 free_pgtable_page(context);
972 if (!ecs_enabled(iommu))
975 context = iommu_context_addr(iommu, i, 0x80, 0);
977 free_pgtable_page(context);
980 free_pgtable_page(iommu->root_entry);
981 iommu->root_entry = NULL;
983 spin_unlock_irqrestore(&iommu->lock, flags);
986 static struct dma_pte *pfn_to_dma_pte(struct dmar_domain *domain,
987 unsigned long pfn, int *target_level)
989 struct dma_pte *parent, *pte = NULL;
990 int level = agaw_to_level(domain->agaw);
993 BUG_ON(!domain->pgd);
995 if (!domain_pfn_supported(domain, pfn))
996 /* Address beyond IOMMU's addressing capabilities. */
999 parent = domain->pgd;
1004 offset = pfn_level_offset(pfn, level);
1005 pte = &parent[offset];
1006 if (!*target_level && (dma_pte_superpage(pte) || !dma_pte_present(pte)))
1008 if (level == *target_level)
1011 if (!dma_pte_present(pte)) {
1014 tmp_page = alloc_pgtable_page(domain->nid);
1019 domain_flush_cache(domain, tmp_page, VTD_PAGE_SIZE);
1020 pteval = ((uint64_t)virt_to_dma_pfn(tmp_page) << VTD_PAGE_SHIFT) | DMA_PTE_READ | DMA_PTE_WRITE;
1021 if (cmpxchg64(&pte->val, 0ULL, pteval))
1022 /* Someone else set it while we were thinking; use theirs. */
1023 free_pgtable_page(tmp_page);
1025 domain_flush_cache(domain, pte, sizeof(*pte));
1030 parent = phys_to_virt(dma_pte_addr(pte));
1035 *target_level = level;
1041 /* return address's pte at specific level */
1042 static struct dma_pte *dma_pfn_level_pte(struct dmar_domain *domain,
1044 int level, int *large_page)
1046 struct dma_pte *parent, *pte = NULL;
1047 int total = agaw_to_level(domain->agaw);
1050 parent = domain->pgd;
1051 while (level <= total) {
1052 offset = pfn_level_offset(pfn, total);
1053 pte = &parent[offset];
1057 if (!dma_pte_present(pte)) {
1058 *large_page = total;
1062 if (dma_pte_superpage(pte)) {
1063 *large_page = total;
1067 parent = phys_to_virt(dma_pte_addr(pte));
1073 /* clear last level pte, a tlb flush should be followed */
1074 static void dma_pte_clear_range(struct dmar_domain *domain,
1075 unsigned long start_pfn,
1076 unsigned long last_pfn)
1078 unsigned int large_page = 1;
1079 struct dma_pte *first_pte, *pte;
1081 BUG_ON(!domain_pfn_supported(domain, start_pfn));
1082 BUG_ON(!domain_pfn_supported(domain, last_pfn));
1083 BUG_ON(start_pfn > last_pfn);
1085 /* we don't need lock here; nobody else touches the iova range */
1088 first_pte = pte = dma_pfn_level_pte(domain, start_pfn, 1, &large_page);
1090 start_pfn = align_to_level(start_pfn + 1, large_page + 1);
1095 start_pfn += lvl_to_nr_pages(large_page);
1097 } while (start_pfn <= last_pfn && !first_pte_in_page(pte));
1099 domain_flush_cache(domain, first_pte,
1100 (void *)pte - (void *)first_pte);
1102 } while (start_pfn && start_pfn <= last_pfn);
1105 static void dma_pte_free_level(struct dmar_domain *domain, int level,
1106 int retain_level, struct dma_pte *pte,
1107 unsigned long pfn, unsigned long start_pfn,
1108 unsigned long last_pfn)
1110 pfn = max(start_pfn, pfn);
1111 pte = &pte[pfn_level_offset(pfn, level)];
1114 unsigned long level_pfn;
1115 struct dma_pte *level_pte;
1117 if (!dma_pte_present(pte) || dma_pte_superpage(pte))
1120 level_pfn = pfn & level_mask(level);
1121 level_pte = phys_to_virt(dma_pte_addr(pte));
1124 dma_pte_free_level(domain, level - 1, retain_level,
1125 level_pte, level_pfn, start_pfn,
1130 * Free the page table if we're below the level we want to
1131 * retain and the range covers the entire table.
1133 if (level < retain_level && !(start_pfn > level_pfn ||
1134 last_pfn < level_pfn + level_size(level) - 1)) {
1136 domain_flush_cache(domain, pte, sizeof(*pte));
1137 free_pgtable_page(level_pte);
1140 pfn += level_size(level);
1141 } while (!first_pte_in_page(++pte) && pfn <= last_pfn);
1145 * clear last level (leaf) ptes and free page table pages below the
1146 * level we wish to keep intact.
1148 static void dma_pte_free_pagetable(struct dmar_domain *domain,
1149 unsigned long start_pfn,
1150 unsigned long last_pfn,
1153 BUG_ON(!domain_pfn_supported(domain, start_pfn));
1154 BUG_ON(!domain_pfn_supported(domain, last_pfn));
1155 BUG_ON(start_pfn > last_pfn);
1157 dma_pte_clear_range(domain, start_pfn, last_pfn);
1159 /* We don't need lock here; nobody else touches the iova range */
1160 dma_pte_free_level(domain, agaw_to_level(domain->agaw), retain_level,
1161 domain->pgd, 0, start_pfn, last_pfn);
1164 if (start_pfn == 0 && last_pfn == DOMAIN_MAX_PFN(domain->gaw)) {
1165 free_pgtable_page(domain->pgd);
1170 /* When a page at a given level is being unlinked from its parent, we don't
1171 need to *modify* it at all. All we need to do is make a list of all the
1172 pages which can be freed just as soon as we've flushed the IOTLB and we
1173 know the hardware page-walk will no longer touch them.
1174 The 'pte' argument is the *parent* PTE, pointing to the page that is to
1176 static struct page *dma_pte_list_pagetables(struct dmar_domain *domain,
1177 int level, struct dma_pte *pte,
1178 struct page *freelist)
1182 pg = pfn_to_page(dma_pte_addr(pte) >> PAGE_SHIFT);
1183 pg->freelist = freelist;
1189 pte = page_address(pg);
1191 if (dma_pte_present(pte) && !dma_pte_superpage(pte))
1192 freelist = dma_pte_list_pagetables(domain, level - 1,
1195 } while (!first_pte_in_page(pte));
1200 static struct page *dma_pte_clear_level(struct dmar_domain *domain, int level,
1201 struct dma_pte *pte, unsigned long pfn,
1202 unsigned long start_pfn,
1203 unsigned long last_pfn,
1204 struct page *freelist)
1206 struct dma_pte *first_pte = NULL, *last_pte = NULL;
1208 pfn = max(start_pfn, pfn);
1209 pte = &pte[pfn_level_offset(pfn, level)];
1212 unsigned long level_pfn;
1214 if (!dma_pte_present(pte))
1217 level_pfn = pfn & level_mask(level);
1219 /* If range covers entire pagetable, free it */
1220 if (start_pfn <= level_pfn &&
1221 last_pfn >= level_pfn + level_size(level) - 1) {
1222 /* These suborbinate page tables are going away entirely. Don't
1223 bother to clear them; we're just going to *free* them. */
1224 if (level > 1 && !dma_pte_superpage(pte))
1225 freelist = dma_pte_list_pagetables(domain, level - 1, pte, freelist);
1231 } else if (level > 1) {
1232 /* Recurse down into a level that isn't *entirely* obsolete */
1233 freelist = dma_pte_clear_level(domain, level - 1,
1234 phys_to_virt(dma_pte_addr(pte)),
1235 level_pfn, start_pfn, last_pfn,
1239 pfn += level_size(level);
1240 } while (!first_pte_in_page(++pte) && pfn <= last_pfn);
1243 domain_flush_cache(domain, first_pte,
1244 (void *)++last_pte - (void *)first_pte);
1249 /* We can't just free the pages because the IOMMU may still be walking
1250 the page tables, and may have cached the intermediate levels. The
1251 pages can only be freed after the IOTLB flush has been done. */
1252 static struct page *domain_unmap(struct dmar_domain *domain,
1253 unsigned long start_pfn,
1254 unsigned long last_pfn)
1256 struct page *freelist = NULL;
1258 BUG_ON(!domain_pfn_supported(domain, start_pfn));
1259 BUG_ON(!domain_pfn_supported(domain, last_pfn));
1260 BUG_ON(start_pfn > last_pfn);
1262 /* we don't need lock here; nobody else touches the iova range */
1263 freelist = dma_pte_clear_level(domain, agaw_to_level(domain->agaw),
1264 domain->pgd, 0, start_pfn, last_pfn, NULL);
1267 if (start_pfn == 0 && last_pfn == DOMAIN_MAX_PFN(domain->gaw)) {
1268 struct page *pgd_page = virt_to_page(domain->pgd);
1269 pgd_page->freelist = freelist;
1270 freelist = pgd_page;
1278 static void dma_free_pagelist(struct page *freelist)
1282 while ((pg = freelist)) {
1283 freelist = pg->freelist;
1284 free_pgtable_page(page_address(pg));
1288 static void iova_entry_free(unsigned long data)
1290 struct page *freelist = (struct page *)data;
1292 dma_free_pagelist(freelist);
1295 /* iommu handling */
1296 static int iommu_alloc_root_entry(struct intel_iommu *iommu)
1298 struct root_entry *root;
1299 unsigned long flags;
1301 root = (struct root_entry *)alloc_pgtable_page(iommu->node);
1303 pr_err("Allocating root entry for %s failed\n",
1308 __iommu_flush_cache(iommu, root, ROOT_SIZE);
1310 spin_lock_irqsave(&iommu->lock, flags);
1311 iommu->root_entry = root;
1312 spin_unlock_irqrestore(&iommu->lock, flags);
1317 static void iommu_set_root_entry(struct intel_iommu *iommu)
1323 addr = virt_to_phys(iommu->root_entry);
1324 if (ecs_enabled(iommu))
1325 addr |= DMA_RTADDR_RTT;
1327 raw_spin_lock_irqsave(&iommu->register_lock, flag);
1328 dmar_writeq(iommu->reg + DMAR_RTADDR_REG, addr);
1330 writel(iommu->gcmd | DMA_GCMD_SRTP, iommu->reg + DMAR_GCMD_REG);
1332 /* Make sure hardware complete it */
1333 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
1334 readl, (sts & DMA_GSTS_RTPS), sts);
1336 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
1339 static void iommu_flush_write_buffer(struct intel_iommu *iommu)
1344 if (!rwbf_quirk && !cap_rwbf(iommu->cap))
1347 raw_spin_lock_irqsave(&iommu->register_lock, flag);
1348 writel(iommu->gcmd | DMA_GCMD_WBF, iommu->reg + DMAR_GCMD_REG);
1350 /* Make sure hardware complete it */
1351 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
1352 readl, (!(val & DMA_GSTS_WBFS)), val);
1354 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
1357 /* return value determine if we need a write buffer flush */
1358 static void __iommu_flush_context(struct intel_iommu *iommu,
1359 u16 did, u16 source_id, u8 function_mask,
1366 case DMA_CCMD_GLOBAL_INVL:
1367 val = DMA_CCMD_GLOBAL_INVL;
1369 case DMA_CCMD_DOMAIN_INVL:
1370 val = DMA_CCMD_DOMAIN_INVL|DMA_CCMD_DID(did);
1372 case DMA_CCMD_DEVICE_INVL:
1373 val = DMA_CCMD_DEVICE_INVL|DMA_CCMD_DID(did)
1374 | DMA_CCMD_SID(source_id) | DMA_CCMD_FM(function_mask);
1379 val |= DMA_CCMD_ICC;
1381 raw_spin_lock_irqsave(&iommu->register_lock, flag);
1382 dmar_writeq(iommu->reg + DMAR_CCMD_REG, val);
1384 /* Make sure hardware complete it */
1385 IOMMU_WAIT_OP(iommu, DMAR_CCMD_REG,
1386 dmar_readq, (!(val & DMA_CCMD_ICC)), val);
1388 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
1391 /* return value determine if we need a write buffer flush */
1392 static void __iommu_flush_iotlb(struct intel_iommu *iommu, u16 did,
1393 u64 addr, unsigned int size_order, u64 type)
1395 int tlb_offset = ecap_iotlb_offset(iommu->ecap);
1396 u64 val = 0, val_iva = 0;
1400 case DMA_TLB_GLOBAL_FLUSH:
1401 /* global flush doesn't need set IVA_REG */
1402 val = DMA_TLB_GLOBAL_FLUSH|DMA_TLB_IVT;
1404 case DMA_TLB_DSI_FLUSH:
1405 val = DMA_TLB_DSI_FLUSH|DMA_TLB_IVT|DMA_TLB_DID(did);
1407 case DMA_TLB_PSI_FLUSH:
1408 val = DMA_TLB_PSI_FLUSH|DMA_TLB_IVT|DMA_TLB_DID(did);
1409 /* IH bit is passed in as part of address */
1410 val_iva = size_order | addr;
1415 /* Note: set drain read/write */
1418 * This is probably to be super secure.. Looks like we can
1419 * ignore it without any impact.
1421 if (cap_read_drain(iommu->cap))
1422 val |= DMA_TLB_READ_DRAIN;
1424 if (cap_write_drain(iommu->cap))
1425 val |= DMA_TLB_WRITE_DRAIN;
1427 raw_spin_lock_irqsave(&iommu->register_lock, flag);
1428 /* Note: Only uses first TLB reg currently */
1430 dmar_writeq(iommu->reg + tlb_offset, val_iva);
1431 dmar_writeq(iommu->reg + tlb_offset + 8, val);
1433 /* Make sure hardware complete it */
1434 IOMMU_WAIT_OP(iommu, tlb_offset + 8,
1435 dmar_readq, (!(val & DMA_TLB_IVT)), val);
1437 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
1439 /* check IOTLB invalidation granularity */
1440 if (DMA_TLB_IAIG(val) == 0)
1441 pr_err("Flush IOTLB failed\n");
1442 if (DMA_TLB_IAIG(val) != DMA_TLB_IIRG(type))
1443 pr_debug("TLB flush request %Lx, actual %Lx\n",
1444 (unsigned long long)DMA_TLB_IIRG(type),
1445 (unsigned long long)DMA_TLB_IAIG(val));
1448 static struct device_domain_info *
1449 iommu_support_dev_iotlb (struct dmar_domain *domain, struct intel_iommu *iommu,
1452 struct device_domain_info *info;
1454 assert_spin_locked(&device_domain_lock);
1459 list_for_each_entry(info, &domain->devices, link)
1460 if (info->iommu == iommu && info->bus == bus &&
1461 info->devfn == devfn) {
1462 if (info->ats_supported && info->dev)
1470 static void domain_update_iotlb(struct dmar_domain *domain)
1472 struct device_domain_info *info;
1473 bool has_iotlb_device = false;
1475 assert_spin_locked(&device_domain_lock);
1477 list_for_each_entry(info, &domain->devices, link) {
1478 struct pci_dev *pdev;
1480 if (!info->dev || !dev_is_pci(info->dev))
1483 pdev = to_pci_dev(info->dev);
1484 if (pdev->ats_enabled) {
1485 has_iotlb_device = true;
1490 domain->has_iotlb_device = has_iotlb_device;
1493 static void iommu_enable_dev_iotlb(struct device_domain_info *info)
1495 struct pci_dev *pdev;
1497 assert_spin_locked(&device_domain_lock);
1499 if (!info || !dev_is_pci(info->dev))
1502 pdev = to_pci_dev(info->dev);
1504 #ifdef CONFIG_INTEL_IOMMU_SVM
1505 /* The PCIe spec, in its wisdom, declares that the behaviour of
1506 the device if you enable PASID support after ATS support is
1507 undefined. So always enable PASID support on devices which
1508 have it, even if we can't yet know if we're ever going to
1510 if (info->pasid_supported && !pci_enable_pasid(pdev, info->pasid_supported & ~1))
1511 info->pasid_enabled = 1;
1513 if (info->pri_supported && !pci_reset_pri(pdev) && !pci_enable_pri(pdev, 32))
1514 info->pri_enabled = 1;
1516 if (info->ats_supported && !pci_enable_ats(pdev, VTD_PAGE_SHIFT)) {
1517 info->ats_enabled = 1;
1518 domain_update_iotlb(info->domain);
1519 info->ats_qdep = pci_ats_queue_depth(pdev);
1523 static void iommu_disable_dev_iotlb(struct device_domain_info *info)
1525 struct pci_dev *pdev;
1527 assert_spin_locked(&device_domain_lock);
1529 if (!dev_is_pci(info->dev))
1532 pdev = to_pci_dev(info->dev);
1534 if (info->ats_enabled) {
1535 pci_disable_ats(pdev);
1536 info->ats_enabled = 0;
1537 domain_update_iotlb(info->domain);
1539 #ifdef CONFIG_INTEL_IOMMU_SVM
1540 if (info->pri_enabled) {
1541 pci_disable_pri(pdev);
1542 info->pri_enabled = 0;
1544 if (info->pasid_enabled) {
1545 pci_disable_pasid(pdev);
1546 info->pasid_enabled = 0;
1551 static void iommu_flush_dev_iotlb(struct dmar_domain *domain,
1552 u64 addr, unsigned mask)
1555 unsigned long flags;
1556 struct device_domain_info *info;
1558 if (!domain->has_iotlb_device)
1561 spin_lock_irqsave(&device_domain_lock, flags);
1562 list_for_each_entry(info, &domain->devices, link) {
1563 if (!info->ats_enabled)
1566 sid = info->bus << 8 | info->devfn;
1567 qdep = info->ats_qdep;
1568 qi_flush_dev_iotlb(info->iommu, sid, qdep, addr, mask);
1570 spin_unlock_irqrestore(&device_domain_lock, flags);
1573 static void iommu_flush_iotlb_psi(struct intel_iommu *iommu,
1574 struct dmar_domain *domain,
1575 unsigned long pfn, unsigned int pages,
1578 unsigned int mask = ilog2(__roundup_pow_of_two(pages));
1579 uint64_t addr = (uint64_t)pfn << VTD_PAGE_SHIFT;
1580 u16 did = domain->iommu_did[iommu->seq_id];
1587 * Fallback to domain selective flush if no PSI support or the size is
1589 * PSI requires page size to be 2 ^ x, and the base address is naturally
1590 * aligned to the size
1592 if (!cap_pgsel_inv(iommu->cap) || mask > cap_max_amask_val(iommu->cap))
1593 iommu->flush.flush_iotlb(iommu, did, 0, 0,
1596 iommu->flush.flush_iotlb(iommu, did, addr | ih, mask,
1600 * In caching mode, changes of pages from non-present to present require
1601 * flush. However, device IOTLB doesn't need to be flushed in this case.
1603 if (!cap_caching_mode(iommu->cap) || !map)
1604 iommu_flush_dev_iotlb(get_iommu_domain(iommu, did),
1608 static void iommu_flush_iova(struct iova_domain *iovad)
1610 struct dmar_domain *domain;
1613 domain = container_of(iovad, struct dmar_domain, iovad);
1615 for_each_domain_iommu(idx, domain) {
1616 struct intel_iommu *iommu = g_iommus[idx];
1617 u16 did = domain->iommu_did[iommu->seq_id];
1619 iommu->flush.flush_iotlb(iommu, did, 0, 0, DMA_TLB_DSI_FLUSH);
1621 if (!cap_caching_mode(iommu->cap))
1622 iommu_flush_dev_iotlb(get_iommu_domain(iommu, did),
1623 0, MAX_AGAW_PFN_WIDTH);
1627 static void iommu_disable_protect_mem_regions(struct intel_iommu *iommu)
1630 unsigned long flags;
1632 raw_spin_lock_irqsave(&iommu->register_lock, flags);
1633 pmen = readl(iommu->reg + DMAR_PMEN_REG);
1634 pmen &= ~DMA_PMEN_EPM;
1635 writel(pmen, iommu->reg + DMAR_PMEN_REG);
1637 /* wait for the protected region status bit to clear */
1638 IOMMU_WAIT_OP(iommu, DMAR_PMEN_REG,
1639 readl, !(pmen & DMA_PMEN_PRS), pmen);
1641 raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
1644 static void iommu_enable_translation(struct intel_iommu *iommu)
1647 unsigned long flags;
1649 raw_spin_lock_irqsave(&iommu->register_lock, flags);
1650 iommu->gcmd |= DMA_GCMD_TE;
1651 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
1653 /* Make sure hardware complete it */
1654 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
1655 readl, (sts & DMA_GSTS_TES), sts);
1657 raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
1660 static void iommu_disable_translation(struct intel_iommu *iommu)
1665 raw_spin_lock_irqsave(&iommu->register_lock, flag);
1666 iommu->gcmd &= ~DMA_GCMD_TE;
1667 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
1669 /* Make sure hardware complete it */
1670 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
1671 readl, (!(sts & DMA_GSTS_TES)), sts);
1673 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
1677 static int iommu_init_domains(struct intel_iommu *iommu)
1679 u32 ndomains, nlongs;
1682 ndomains = cap_ndoms(iommu->cap);
1683 pr_debug("%s: Number of Domains supported <%d>\n",
1684 iommu->name, ndomains);
1685 nlongs = BITS_TO_LONGS(ndomains);
1687 spin_lock_init(&iommu->lock);
1689 iommu->domain_ids = kcalloc(nlongs, sizeof(unsigned long), GFP_KERNEL);
1690 if (!iommu->domain_ids) {
1691 pr_err("%s: Allocating domain id array failed\n",
1696 size = (ALIGN(ndomains, 256) >> 8) * sizeof(struct dmar_domain **);
1697 iommu->domains = kzalloc(size, GFP_KERNEL);
1699 if (iommu->domains) {
1700 size = 256 * sizeof(struct dmar_domain *);
1701 iommu->domains[0] = kzalloc(size, GFP_KERNEL);
1704 if (!iommu->domains || !iommu->domains[0]) {
1705 pr_err("%s: Allocating domain array failed\n",
1707 kfree(iommu->domain_ids);
1708 kfree(iommu->domains);
1709 iommu->domain_ids = NULL;
1710 iommu->domains = NULL;
1717 * If Caching mode is set, then invalid translations are tagged
1718 * with domain-id 0, hence we need to pre-allocate it. We also
1719 * use domain-id 0 as a marker for non-allocated domain-id, so
1720 * make sure it is not used for a real domain.
1722 set_bit(0, iommu->domain_ids);
1727 static void disable_dmar_iommu(struct intel_iommu *iommu)
1729 struct device_domain_info *info, *tmp;
1730 unsigned long flags;
1732 if (!iommu->domains || !iommu->domain_ids)
1736 spin_lock_irqsave(&device_domain_lock, flags);
1737 list_for_each_entry_safe(info, tmp, &device_domain_list, global) {
1738 struct dmar_domain *domain;
1740 if (info->iommu != iommu)
1743 if (!info->dev || !info->domain)
1746 domain = info->domain;
1748 __dmar_remove_one_dev_info(info);
1750 if (!domain_type_is_vm_or_si(domain)) {
1752 * The domain_exit() function can't be called under
1753 * device_domain_lock, as it takes this lock itself.
1754 * So release the lock here and re-run the loop
1757 spin_unlock_irqrestore(&device_domain_lock, flags);
1758 domain_exit(domain);
1762 spin_unlock_irqrestore(&device_domain_lock, flags);
1764 if (iommu->gcmd & DMA_GCMD_TE)
1765 iommu_disable_translation(iommu);
1768 static void free_dmar_iommu(struct intel_iommu *iommu)
1770 if ((iommu->domains) && (iommu->domain_ids)) {
1771 int elems = ALIGN(cap_ndoms(iommu->cap), 256) >> 8;
1774 for (i = 0; i < elems; i++)
1775 kfree(iommu->domains[i]);
1776 kfree(iommu->domains);
1777 kfree(iommu->domain_ids);
1778 iommu->domains = NULL;
1779 iommu->domain_ids = NULL;
1782 g_iommus[iommu->seq_id] = NULL;
1784 /* free context mapping */
1785 free_context_table(iommu);
1787 #ifdef CONFIG_INTEL_IOMMU_SVM
1788 if (pasid_enabled(iommu)) {
1789 if (ecap_prs(iommu->ecap))
1790 intel_svm_finish_prq(iommu);
1791 intel_svm_free_pasid_tables(iommu);
1796 static struct dmar_domain *alloc_domain(int flags)
1798 struct dmar_domain *domain;
1800 domain = alloc_domain_mem();
1804 memset(domain, 0, sizeof(*domain));
1806 domain->flags = flags;
1807 domain->has_iotlb_device = false;
1808 INIT_LIST_HEAD(&domain->devices);
1813 /* Must be called with iommu->lock */
1814 static int domain_attach_iommu(struct dmar_domain *domain,
1815 struct intel_iommu *iommu)
1817 unsigned long ndomains;
1820 assert_spin_locked(&device_domain_lock);
1821 assert_spin_locked(&iommu->lock);
1823 domain->iommu_refcnt[iommu->seq_id] += 1;
1824 domain->iommu_count += 1;
1825 if (domain->iommu_refcnt[iommu->seq_id] == 1) {
1826 ndomains = cap_ndoms(iommu->cap);
1827 num = find_first_zero_bit(iommu->domain_ids, ndomains);
1829 if (num >= ndomains) {
1830 pr_err("%s: No free domain ids\n", iommu->name);
1831 domain->iommu_refcnt[iommu->seq_id] -= 1;
1832 domain->iommu_count -= 1;
1836 set_bit(num, iommu->domain_ids);
1837 set_iommu_domain(iommu, num, domain);
1839 domain->iommu_did[iommu->seq_id] = num;
1840 domain->nid = iommu->node;
1842 domain_update_iommu_cap(domain);
1848 static int domain_detach_iommu(struct dmar_domain *domain,
1849 struct intel_iommu *iommu)
1851 int num, count = INT_MAX;
1853 assert_spin_locked(&device_domain_lock);
1854 assert_spin_locked(&iommu->lock);
1856 domain->iommu_refcnt[iommu->seq_id] -= 1;
1857 count = --domain->iommu_count;
1858 if (domain->iommu_refcnt[iommu->seq_id] == 0) {
1859 num = domain->iommu_did[iommu->seq_id];
1860 clear_bit(num, iommu->domain_ids);
1861 set_iommu_domain(iommu, num, NULL);
1863 domain_update_iommu_cap(domain);
1864 domain->iommu_did[iommu->seq_id] = 0;
1870 static struct iova_domain reserved_iova_list;
1871 static struct lock_class_key reserved_rbtree_key;
1873 static int dmar_init_reserved_ranges(void)
1875 struct pci_dev *pdev = NULL;
1879 init_iova_domain(&reserved_iova_list, VTD_PAGE_SIZE, IOVA_START_PFN);
1881 lockdep_set_class(&reserved_iova_list.iova_rbtree_lock,
1882 &reserved_rbtree_key);
1884 /* IOAPIC ranges shouldn't be accessed by DMA */
1885 iova = reserve_iova(&reserved_iova_list, IOVA_PFN(IOAPIC_RANGE_START),
1886 IOVA_PFN(IOAPIC_RANGE_END));
1888 pr_err("Reserve IOAPIC range failed\n");
1892 /* Reserve all PCI MMIO to avoid peer-to-peer access */
1893 for_each_pci_dev(pdev) {
1896 for (i = 0; i < PCI_NUM_RESOURCES; i++) {
1897 r = &pdev->resource[i];
1898 if (!r->flags || !(r->flags & IORESOURCE_MEM))
1900 iova = reserve_iova(&reserved_iova_list,
1904 pr_err("Reserve iova failed\n");
1912 static void domain_reserve_special_ranges(struct dmar_domain *domain)
1914 copy_reserved_iova(&reserved_iova_list, &domain->iovad);
1917 static inline int guestwidth_to_adjustwidth(int gaw)
1920 int r = (gaw - 12) % 9;
1931 static int domain_init(struct dmar_domain *domain, struct intel_iommu *iommu,
1934 int adjust_width, agaw;
1935 unsigned long sagaw;
1938 init_iova_domain(&domain->iovad, VTD_PAGE_SIZE, IOVA_START_PFN);
1940 err = init_iova_flush_queue(&domain->iovad,
1941 iommu_flush_iova, iova_entry_free);
1945 domain_reserve_special_ranges(domain);
1947 /* calculate AGAW */
1948 if (guest_width > cap_mgaw(iommu->cap))
1949 guest_width = cap_mgaw(iommu->cap);
1950 domain->gaw = guest_width;
1951 adjust_width = guestwidth_to_adjustwidth(guest_width);
1952 agaw = width_to_agaw(adjust_width);
1953 sagaw = cap_sagaw(iommu->cap);
1954 if (!test_bit(agaw, &sagaw)) {
1955 /* hardware doesn't support it, choose a bigger one */
1956 pr_debug("Hardware doesn't support agaw %d\n", agaw);
1957 agaw = find_next_bit(&sagaw, 5, agaw);
1961 domain->agaw = agaw;
1963 if (ecap_coherent(iommu->ecap))
1964 domain->iommu_coherency = 1;
1966 domain->iommu_coherency = 0;
1968 if (ecap_sc_support(iommu->ecap))
1969 domain->iommu_snooping = 1;
1971 domain->iommu_snooping = 0;
1973 if (intel_iommu_superpage)
1974 domain->iommu_superpage = fls(cap_super_page_val(iommu->cap));
1976 domain->iommu_superpage = 0;
1978 domain->nid = iommu->node;
1980 /* always allocate the top pgd */
1981 domain->pgd = (struct dma_pte *)alloc_pgtable_page(domain->nid);
1984 __iommu_flush_cache(iommu, domain->pgd, PAGE_SIZE);
1988 static void domain_exit(struct dmar_domain *domain)
1990 struct page *freelist = NULL;
1992 /* Domain 0 is reserved, so dont process it */
1996 /* Remove associated devices and clear attached or cached domains */
1998 domain_remove_dev_info(domain);
2002 put_iova_domain(&domain->iovad);
2004 freelist = domain_unmap(domain, 0, DOMAIN_MAX_PFN(domain->gaw));
2006 dma_free_pagelist(freelist);
2008 free_domain_mem(domain);
2011 static int domain_context_mapping_one(struct dmar_domain *domain,
2012 struct intel_iommu *iommu,
2015 u16 did = domain->iommu_did[iommu->seq_id];
2016 int translation = CONTEXT_TT_MULTI_LEVEL;
2017 struct device_domain_info *info = NULL;
2018 struct context_entry *context;
2019 unsigned long flags;
2020 struct dma_pte *pgd;
2025 if (hw_pass_through && domain_type_is_si(domain))
2026 translation = CONTEXT_TT_PASS_THROUGH;
2028 pr_debug("Set context mapping for %02x:%02x.%d\n",
2029 bus, PCI_SLOT(devfn), PCI_FUNC(devfn));
2031 BUG_ON(!domain->pgd);
2033 spin_lock_irqsave(&device_domain_lock, flags);
2034 spin_lock(&iommu->lock);
2037 context = iommu_context_addr(iommu, bus, devfn, 1);
2042 if (context_present(context))
2046 * For kdump cases, old valid entries may be cached due to the
2047 * in-flight DMA and copied pgtable, but there is no unmapping
2048 * behaviour for them, thus we need an explicit cache flush for
2049 * the newly-mapped device. For kdump, at this point, the device
2050 * is supposed to finish reset at its driver probe stage, so no
2051 * in-flight DMA will exist, and we don't need to worry anymore
2054 if (context_copied(context)) {
2055 u16 did_old = context_domain_id(context);
2057 if (did_old < cap_ndoms(iommu->cap)) {
2058 iommu->flush.flush_context(iommu, did_old,
2059 (((u16)bus) << 8) | devfn,
2060 DMA_CCMD_MASK_NOBIT,
2061 DMA_CCMD_DEVICE_INVL);
2062 iommu->flush.flush_iotlb(iommu, did_old, 0, 0,
2069 context_clear_entry(context);
2070 context_set_domain_id(context, did);
2073 * Skip top levels of page tables for iommu which has less agaw
2074 * than default. Unnecessary for PT mode.
2076 if (translation != CONTEXT_TT_PASS_THROUGH) {
2077 for (agaw = domain->agaw; agaw != iommu->agaw; agaw--) {
2079 pgd = phys_to_virt(dma_pte_addr(pgd));
2080 if (!dma_pte_present(pgd))
2084 info = iommu_support_dev_iotlb(domain, iommu, bus, devfn);
2085 if (info && info->ats_supported)
2086 translation = CONTEXT_TT_DEV_IOTLB;
2088 translation = CONTEXT_TT_MULTI_LEVEL;
2090 context_set_address_root(context, virt_to_phys(pgd));
2091 context_set_address_width(context, iommu->agaw);
2094 * In pass through mode, AW must be programmed to
2095 * indicate the largest AGAW value supported by
2096 * hardware. And ASR is ignored by hardware.
2098 context_set_address_width(context, iommu->msagaw);
2101 context_set_translation_type(context, translation);
2102 context_set_fault_enable(context);
2103 context_set_present(context);
2104 domain_flush_cache(domain, context, sizeof(*context));
2107 * It's a non-present to present mapping. If hardware doesn't cache
2108 * non-present entry we only need to flush the write-buffer. If the
2109 * _does_ cache non-present entries, then it does so in the special
2110 * domain #0, which we have to flush:
2112 if (cap_caching_mode(iommu->cap)) {
2113 iommu->flush.flush_context(iommu, 0,
2114 (((u16)bus) << 8) | devfn,
2115 DMA_CCMD_MASK_NOBIT,
2116 DMA_CCMD_DEVICE_INVL);
2117 iommu->flush.flush_iotlb(iommu, did, 0, 0, DMA_TLB_DSI_FLUSH);
2119 iommu_flush_write_buffer(iommu);
2121 iommu_enable_dev_iotlb(info);
2126 spin_unlock(&iommu->lock);
2127 spin_unlock_irqrestore(&device_domain_lock, flags);
2132 struct domain_context_mapping_data {
2133 struct dmar_domain *domain;
2134 struct intel_iommu *iommu;
2137 static int domain_context_mapping_cb(struct pci_dev *pdev,
2138 u16 alias, void *opaque)
2140 struct domain_context_mapping_data *data = opaque;
2142 return domain_context_mapping_one(data->domain, data->iommu,
2143 PCI_BUS_NUM(alias), alias & 0xff);
2147 domain_context_mapping(struct dmar_domain *domain, struct device *dev)
2149 struct intel_iommu *iommu;
2151 struct domain_context_mapping_data data;
2153 iommu = device_to_iommu(dev, &bus, &devfn);
2157 if (!dev_is_pci(dev))
2158 return domain_context_mapping_one(domain, iommu, bus, devfn);
2160 data.domain = domain;
2163 return pci_for_each_dma_alias(to_pci_dev(dev),
2164 &domain_context_mapping_cb, &data);
2167 static int domain_context_mapped_cb(struct pci_dev *pdev,
2168 u16 alias, void *opaque)
2170 struct intel_iommu *iommu = opaque;
2172 return !device_context_mapped(iommu, PCI_BUS_NUM(alias), alias & 0xff);
2175 static int domain_context_mapped(struct device *dev)
2177 struct intel_iommu *iommu;
2180 iommu = device_to_iommu(dev, &bus, &devfn);
2184 if (!dev_is_pci(dev))
2185 return device_context_mapped(iommu, bus, devfn);
2187 return !pci_for_each_dma_alias(to_pci_dev(dev),
2188 domain_context_mapped_cb, iommu);
2191 /* Returns a number of VTD pages, but aligned to MM page size */
2192 static inline unsigned long aligned_nrpages(unsigned long host_addr,
2195 host_addr &= ~PAGE_MASK;
2196 return PAGE_ALIGN(host_addr + size) >> VTD_PAGE_SHIFT;
2199 /* Return largest possible superpage level for a given mapping */
2200 static inline int hardware_largepage_caps(struct dmar_domain *domain,
2201 unsigned long iov_pfn,
2202 unsigned long phy_pfn,
2203 unsigned long pages)
2205 int support, level = 1;
2206 unsigned long pfnmerge;
2208 support = domain->iommu_superpage;
2210 /* To use a large page, the virtual *and* physical addresses
2211 must be aligned to 2MiB/1GiB/etc. Lower bits set in either
2212 of them will mean we have to use smaller pages. So just
2213 merge them and check both at once. */
2214 pfnmerge = iov_pfn | phy_pfn;
2216 while (support && !(pfnmerge & ~VTD_STRIDE_MASK)) {
2217 pages >>= VTD_STRIDE_SHIFT;
2220 pfnmerge >>= VTD_STRIDE_SHIFT;
2227 static int __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
2228 struct scatterlist *sg, unsigned long phys_pfn,
2229 unsigned long nr_pages, int prot)
2231 struct dma_pte *first_pte = NULL, *pte = NULL;
2232 phys_addr_t uninitialized_var(pteval);
2233 unsigned long sg_res = 0;
2234 unsigned int largepage_lvl = 0;
2235 unsigned long lvl_pages = 0;
2237 BUG_ON(!domain_pfn_supported(domain, iov_pfn + nr_pages - 1));
2239 if ((prot & (DMA_PTE_READ|DMA_PTE_WRITE)) == 0)
2242 prot &= DMA_PTE_READ | DMA_PTE_WRITE | DMA_PTE_SNP;
2246 pteval = ((phys_addr_t)phys_pfn << VTD_PAGE_SHIFT) | prot;
2249 while (nr_pages > 0) {
2253 sg_res = aligned_nrpages(sg->offset, sg->length);
2254 sg->dma_address = ((dma_addr_t)iov_pfn << VTD_PAGE_SHIFT) + sg->offset;
2255 sg->dma_length = sg->length;
2256 pteval = page_to_phys(sg_page(sg)) | prot;
2257 phys_pfn = pteval >> VTD_PAGE_SHIFT;
2261 largepage_lvl = hardware_largepage_caps(domain, iov_pfn, phys_pfn, sg_res);
2263 first_pte = pte = pfn_to_dma_pte(domain, iov_pfn, &largepage_lvl);
2266 /* It is large page*/
2267 if (largepage_lvl > 1) {
2268 unsigned long nr_superpages, end_pfn;
2270 pteval |= DMA_PTE_LARGE_PAGE;
2271 lvl_pages = lvl_to_nr_pages(largepage_lvl);
2273 nr_superpages = sg_res / lvl_pages;
2274 end_pfn = iov_pfn + nr_superpages * lvl_pages - 1;
2277 * Ensure that old small page tables are
2278 * removed to make room for superpage(s).
2279 * We're adding new large pages, so make sure
2280 * we don't remove their parent tables.
2282 dma_pte_free_pagetable(domain, iov_pfn, end_pfn,
2285 pteval &= ~(uint64_t)DMA_PTE_LARGE_PAGE;
2289 /* We don't need lock here, nobody else
2290 * touches the iova range
2292 tmp = cmpxchg64_local(&pte->val, 0ULL, pteval);
2294 static int dumps = 5;
2295 pr_crit("ERROR: DMA PTE for vPFN 0x%lx already set (to %llx not %llx)\n",
2296 iov_pfn, tmp, (unsigned long long)pteval);
2299 debug_dma_dump_mappings(NULL);
2304 lvl_pages = lvl_to_nr_pages(largepage_lvl);
2306 BUG_ON(nr_pages < lvl_pages);
2307 BUG_ON(sg_res < lvl_pages);
2309 nr_pages -= lvl_pages;
2310 iov_pfn += lvl_pages;
2311 phys_pfn += lvl_pages;
2312 pteval += lvl_pages * VTD_PAGE_SIZE;
2313 sg_res -= lvl_pages;
2315 /* If the next PTE would be the first in a new page, then we
2316 need to flush the cache on the entries we've just written.
2317 And then we'll need to recalculate 'pte', so clear it and
2318 let it get set again in the if (!pte) block above.
2320 If we're done (!nr_pages) we need to flush the cache too.
2322 Also if we've been setting superpages, we may need to
2323 recalculate 'pte' and switch back to smaller pages for the
2324 end of the mapping, if the trailing size is not enough to
2325 use another superpage (i.e. sg_res < lvl_pages). */
2327 if (!nr_pages || first_pte_in_page(pte) ||
2328 (largepage_lvl > 1 && sg_res < lvl_pages)) {
2329 domain_flush_cache(domain, first_pte,
2330 (void *)pte - (void *)first_pte);
2334 if (!sg_res && nr_pages)
2340 static inline int domain_sg_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
2341 struct scatterlist *sg, unsigned long nr_pages,
2344 return __domain_mapping(domain, iov_pfn, sg, 0, nr_pages, prot);
2347 static inline int domain_pfn_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
2348 unsigned long phys_pfn, unsigned long nr_pages,
2351 return __domain_mapping(domain, iov_pfn, NULL, phys_pfn, nr_pages, prot);
2354 static void domain_context_clear_one(struct intel_iommu *iommu, u8 bus, u8 devfn)
2356 unsigned long flags;
2357 struct context_entry *context;
2363 spin_lock_irqsave(&iommu->lock, flags);
2364 context = iommu_context_addr(iommu, bus, devfn, 0);
2366 spin_unlock_irqrestore(&iommu->lock, flags);
2369 did_old = context_domain_id(context);
2370 context_clear_entry(context);
2371 __iommu_flush_cache(iommu, context, sizeof(*context));
2372 spin_unlock_irqrestore(&iommu->lock, flags);
2373 iommu->flush.flush_context(iommu,
2375 (((u16)bus) << 8) | devfn,
2376 DMA_CCMD_MASK_NOBIT,
2377 DMA_CCMD_DEVICE_INVL);
2378 iommu->flush.flush_iotlb(iommu,
2385 static inline void unlink_domain_info(struct device_domain_info *info)
2387 assert_spin_locked(&device_domain_lock);
2388 list_del(&info->link);
2389 list_del(&info->global);
2391 info->dev->archdata.iommu = NULL;
2394 static void domain_remove_dev_info(struct dmar_domain *domain)
2396 struct device_domain_info *info, *tmp;
2397 unsigned long flags;
2399 spin_lock_irqsave(&device_domain_lock, flags);
2400 list_for_each_entry_safe(info, tmp, &domain->devices, link)
2401 __dmar_remove_one_dev_info(info);
2402 spin_unlock_irqrestore(&device_domain_lock, flags);
2407 * Note: we use struct device->archdata.iommu stores the info
2409 static struct dmar_domain *find_domain(struct device *dev)
2411 struct device_domain_info *info;
2413 /* No lock here, assumes no domain exit in normal case */
2414 info = dev->archdata.iommu;
2416 return info->domain;
2420 static inline struct device_domain_info *
2421 dmar_search_domain_by_dev_info(int segment, int bus, int devfn)
2423 struct device_domain_info *info;
2425 list_for_each_entry(info, &device_domain_list, global)
2426 if (info->iommu->segment == segment && info->bus == bus &&
2427 info->devfn == devfn)
2433 static struct dmar_domain *dmar_insert_one_dev_info(struct intel_iommu *iommu,
2436 struct dmar_domain *domain)
2438 struct dmar_domain *found = NULL;
2439 struct device_domain_info *info;
2440 unsigned long flags;
2443 info = alloc_devinfo_mem();
2448 info->devfn = devfn;
2449 info->ats_supported = info->pasid_supported = info->pri_supported = 0;
2450 info->ats_enabled = info->pasid_enabled = info->pri_enabled = 0;
2453 info->domain = domain;
2454 info->iommu = iommu;
2456 if (dev && dev_is_pci(dev)) {
2457 struct pci_dev *pdev = to_pci_dev(info->dev);
2459 if (ecap_dev_iotlb_support(iommu->ecap) &&
2460 pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_ATS) &&
2461 dmar_find_matched_atsr_unit(pdev))
2462 info->ats_supported = 1;
2464 if (ecs_enabled(iommu)) {
2465 if (pasid_enabled(iommu)) {
2466 int features = pci_pasid_features(pdev);
2468 info->pasid_supported = features | 1;
2471 if (info->ats_supported && ecap_prs(iommu->ecap) &&
2472 pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_PRI))
2473 info->pri_supported = 1;
2477 spin_lock_irqsave(&device_domain_lock, flags);
2479 found = find_domain(dev);
2482 struct device_domain_info *info2;
2483 info2 = dmar_search_domain_by_dev_info(iommu->segment, bus, devfn);
2485 found = info2->domain;
2491 spin_unlock_irqrestore(&device_domain_lock, flags);
2492 free_devinfo_mem(info);
2493 /* Caller must free the original domain */
2497 spin_lock(&iommu->lock);
2498 ret = domain_attach_iommu(domain, iommu);
2499 spin_unlock(&iommu->lock);
2502 spin_unlock_irqrestore(&device_domain_lock, flags);
2503 free_devinfo_mem(info);
2507 list_add(&info->link, &domain->devices);
2508 list_add(&info->global, &device_domain_list);
2510 dev->archdata.iommu = info;
2511 spin_unlock_irqrestore(&device_domain_lock, flags);
2513 if (dev && domain_context_mapping(domain, dev)) {
2514 pr_err("Domain context map for %s failed\n", dev_name(dev));
2515 dmar_remove_one_dev_info(domain, dev);
2522 static int get_last_alias(struct pci_dev *pdev, u16 alias, void *opaque)
2524 *(u16 *)opaque = alias;
2528 static struct dmar_domain *find_or_alloc_domain(struct device *dev, int gaw)
2530 struct device_domain_info *info = NULL;
2531 struct dmar_domain *domain = NULL;
2532 struct intel_iommu *iommu;
2533 u16 req_id, dma_alias;
2534 unsigned long flags;
2537 iommu = device_to_iommu(dev, &bus, &devfn);
2541 req_id = ((u16)bus << 8) | devfn;
2543 if (dev_is_pci(dev)) {
2544 struct pci_dev *pdev = to_pci_dev(dev);
2546 pci_for_each_dma_alias(pdev, get_last_alias, &dma_alias);
2548 spin_lock_irqsave(&device_domain_lock, flags);
2549 info = dmar_search_domain_by_dev_info(pci_domain_nr(pdev->bus),
2550 PCI_BUS_NUM(dma_alias),
2553 iommu = info->iommu;
2554 domain = info->domain;
2556 spin_unlock_irqrestore(&device_domain_lock, flags);
2558 /* DMA alias already has a domain, use it */
2563 /* Allocate and initialize new domain for the device */
2564 domain = alloc_domain(0);
2567 if (domain_init(domain, iommu, gaw)) {
2568 domain_exit(domain);
2577 static struct dmar_domain *set_domain_for_dev(struct device *dev,
2578 struct dmar_domain *domain)
2580 struct intel_iommu *iommu;
2581 struct dmar_domain *tmp;
2582 u16 req_id, dma_alias;
2585 iommu = device_to_iommu(dev, &bus, &devfn);
2589 req_id = ((u16)bus << 8) | devfn;
2591 if (dev_is_pci(dev)) {
2592 struct pci_dev *pdev = to_pci_dev(dev);
2594 pci_for_each_dma_alias(pdev, get_last_alias, &dma_alias);
2596 /* register PCI DMA alias device */
2597 if (req_id != dma_alias) {
2598 tmp = dmar_insert_one_dev_info(iommu, PCI_BUS_NUM(dma_alias),
2599 dma_alias & 0xff, NULL, domain);
2601 if (!tmp || tmp != domain)
2606 tmp = dmar_insert_one_dev_info(iommu, bus, devfn, dev, domain);
2607 if (!tmp || tmp != domain)
2613 static struct dmar_domain *get_domain_for_dev(struct device *dev, int gaw)
2615 struct dmar_domain *domain, *tmp;
2617 domain = find_domain(dev);
2621 domain = find_or_alloc_domain(dev, gaw);
2625 tmp = set_domain_for_dev(dev, domain);
2626 if (!tmp || domain != tmp) {
2627 domain_exit(domain);
2636 static int iommu_domain_identity_map(struct dmar_domain *domain,
2637 unsigned long long start,
2638 unsigned long long end)
2640 unsigned long first_vpfn = start >> VTD_PAGE_SHIFT;
2641 unsigned long last_vpfn = end >> VTD_PAGE_SHIFT;
2643 if (!reserve_iova(&domain->iovad, dma_to_mm_pfn(first_vpfn),
2644 dma_to_mm_pfn(last_vpfn))) {
2645 pr_err("Reserving iova failed\n");
2649 pr_debug("Mapping reserved region %llx-%llx\n", start, end);
2651 * RMRR range might have overlap with physical memory range,
2654 dma_pte_clear_range(domain, first_vpfn, last_vpfn);
2656 return domain_pfn_mapping(domain, first_vpfn, first_vpfn,
2657 last_vpfn - first_vpfn + 1,
2658 DMA_PTE_READ|DMA_PTE_WRITE);
2661 static int domain_prepare_identity_map(struct device *dev,
2662 struct dmar_domain *domain,
2663 unsigned long long start,
2664 unsigned long long end)
2666 /* For _hardware_ passthrough, don't bother. But for software
2667 passthrough, we do it anyway -- it may indicate a memory
2668 range which is reserved in E820, so which didn't get set
2669 up to start with in si_domain */
2670 if (domain == si_domain && hw_pass_through) {
2671 pr_warn("Ignoring identity map for HW passthrough device %s [0x%Lx - 0x%Lx]\n",
2672 dev_name(dev), start, end);
2676 pr_info("Setting identity map for device %s [0x%Lx - 0x%Lx]\n",
2677 dev_name(dev), start, end);
2680 WARN(1, "Your BIOS is broken; RMRR ends before it starts!\n"
2681 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
2682 dmi_get_system_info(DMI_BIOS_VENDOR),
2683 dmi_get_system_info(DMI_BIOS_VERSION),
2684 dmi_get_system_info(DMI_PRODUCT_VERSION));
2688 if (end >> agaw_to_width(domain->agaw)) {
2689 WARN(1, "Your BIOS is broken; RMRR exceeds permitted address width (%d bits)\n"
2690 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
2691 agaw_to_width(domain->agaw),
2692 dmi_get_system_info(DMI_BIOS_VENDOR),
2693 dmi_get_system_info(DMI_BIOS_VERSION),
2694 dmi_get_system_info(DMI_PRODUCT_VERSION));
2698 return iommu_domain_identity_map(domain, start, end);
2701 static int iommu_prepare_identity_map(struct device *dev,
2702 unsigned long long start,
2703 unsigned long long end)
2705 struct dmar_domain *domain;
2708 domain = get_domain_for_dev(dev, DEFAULT_DOMAIN_ADDRESS_WIDTH);
2712 ret = domain_prepare_identity_map(dev, domain, start, end);
2714 domain_exit(domain);
2719 static inline int iommu_prepare_rmrr_dev(struct dmar_rmrr_unit *rmrr,
2722 if (dev->archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO)
2724 return iommu_prepare_identity_map(dev, rmrr->base_address,
2728 #ifdef CONFIG_INTEL_IOMMU_FLOPPY_WA
2729 static inline void iommu_prepare_isa(void)
2731 struct pci_dev *pdev;
2734 pdev = pci_get_class(PCI_CLASS_BRIDGE_ISA << 8, NULL);
2738 pr_info("Prepare 0-16MiB unity mapping for LPC\n");
2739 ret = iommu_prepare_identity_map(&pdev->dev, 0, 16*1024*1024 - 1);
2742 pr_err("Failed to create 0-16MiB identity map - floppy might not work\n");
2747 static inline void iommu_prepare_isa(void)
2751 #endif /* !CONFIG_INTEL_IOMMU_FLPY_WA */
2753 static int md_domain_init(struct dmar_domain *domain, int guest_width);
2755 static int __init si_domain_init(int hw)
2759 si_domain = alloc_domain(DOMAIN_FLAG_STATIC_IDENTITY);
2763 if (md_domain_init(si_domain, DEFAULT_DOMAIN_ADDRESS_WIDTH)) {
2764 domain_exit(si_domain);
2768 pr_debug("Identity mapping domain allocated\n");
2773 for_each_online_node(nid) {
2774 unsigned long start_pfn, end_pfn;
2777 for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, NULL) {
2778 ret = iommu_domain_identity_map(si_domain,
2779 PFN_PHYS(start_pfn), PFN_PHYS(end_pfn));
2788 static int identity_mapping(struct device *dev)
2790 struct device_domain_info *info;
2792 if (likely(!iommu_identity_mapping))
2795 info = dev->archdata.iommu;
2796 if (info && info != DUMMY_DEVICE_DOMAIN_INFO)
2797 return (info->domain == si_domain);
2802 static int domain_add_dev_info(struct dmar_domain *domain, struct device *dev)
2804 struct dmar_domain *ndomain;
2805 struct intel_iommu *iommu;
2808 iommu = device_to_iommu(dev, &bus, &devfn);
2812 ndomain = dmar_insert_one_dev_info(iommu, bus, devfn, dev, domain);
2813 if (ndomain != domain)
2819 static bool device_has_rmrr(struct device *dev)
2821 struct dmar_rmrr_unit *rmrr;
2826 for_each_rmrr_units(rmrr) {
2828 * Return TRUE if this RMRR contains the device that
2831 for_each_active_dev_scope(rmrr->devices,
2832 rmrr->devices_cnt, i, tmp)
2843 * There are a couple cases where we need to restrict the functionality of
2844 * devices associated with RMRRs. The first is when evaluating a device for
2845 * identity mapping because problems exist when devices are moved in and out
2846 * of domains and their respective RMRR information is lost. This means that
2847 * a device with associated RMRRs will never be in a "passthrough" domain.
2848 * The second is use of the device through the IOMMU API. This interface
2849 * expects to have full control of the IOVA space for the device. We cannot
2850 * satisfy both the requirement that RMRR access is maintained and have an
2851 * unencumbered IOVA space. We also have no ability to quiesce the device's
2852 * use of the RMRR space or even inform the IOMMU API user of the restriction.
2853 * We therefore prevent devices associated with an RMRR from participating in
2854 * the IOMMU API, which eliminates them from device assignment.
2856 * In both cases we assume that PCI USB devices with RMRRs have them largely
2857 * for historical reasons and that the RMRR space is not actively used post
2858 * boot. This exclusion may change if vendors begin to abuse it.
2860 * The same exception is made for graphics devices, with the requirement that
2861 * any use of the RMRR regions will be torn down before assigning the device
2864 static bool device_is_rmrr_locked(struct device *dev)
2866 if (!device_has_rmrr(dev))
2869 if (dev_is_pci(dev)) {
2870 struct pci_dev *pdev = to_pci_dev(dev);
2872 if (IS_USB_DEVICE(pdev) || IS_GFX_DEVICE(pdev))
2879 static int iommu_should_identity_map(struct device *dev, int startup)
2882 if (dev_is_pci(dev)) {
2883 struct pci_dev *pdev = to_pci_dev(dev);
2885 if (device_is_rmrr_locked(dev))
2888 if ((iommu_identity_mapping & IDENTMAP_AZALIA) && IS_AZALIA(pdev))
2891 if ((iommu_identity_mapping & IDENTMAP_GFX) && IS_GFX_DEVICE(pdev))
2894 if (!(iommu_identity_mapping & IDENTMAP_ALL))
2898 * We want to start off with all devices in the 1:1 domain, and
2899 * take them out later if we find they can't access all of memory.
2901 * However, we can't do this for PCI devices behind bridges,
2902 * because all PCI devices behind the same bridge will end up
2903 * with the same source-id on their transactions.
2905 * Practically speaking, we can't change things around for these
2906 * devices at run-time, because we can't be sure there'll be no
2907 * DMA transactions in flight for any of their siblings.
2909 * So PCI devices (unless they're on the root bus) as well as
2910 * their parent PCI-PCI or PCIe-PCI bridges must be left _out_ of
2911 * the 1:1 domain, just in _case_ one of their siblings turns out
2912 * not to be able to map all of memory.
2914 if (!pci_is_pcie(pdev)) {
2915 if (!pci_is_root_bus(pdev->bus))
2917 if (pdev->class >> 8 == PCI_CLASS_BRIDGE_PCI)
2919 } else if (pci_pcie_type(pdev) == PCI_EXP_TYPE_PCI_BRIDGE)
2922 if (device_has_rmrr(dev))
2927 * At boot time, we don't yet know if devices will be 64-bit capable.
2928 * Assume that they will — if they turn out not to be, then we can
2929 * take them out of the 1:1 domain later.
2933 * If the device's dma_mask is less than the system's memory
2934 * size then this is not a candidate for identity mapping.
2936 u64 dma_mask = *dev->dma_mask;
2938 if (dev->coherent_dma_mask &&
2939 dev->coherent_dma_mask < dma_mask)
2940 dma_mask = dev->coherent_dma_mask;
2942 return dma_mask >= dma_get_required_mask(dev);
2948 static int __init dev_prepare_static_identity_mapping(struct device *dev, int hw)
2952 if (!iommu_should_identity_map(dev, 1))
2955 ret = domain_add_dev_info(si_domain, dev);
2957 pr_info("%s identity mapping for device %s\n",
2958 hw ? "Hardware" : "Software", dev_name(dev));
2959 else if (ret == -ENODEV)
2960 /* device not associated with an iommu */
2967 static int __init iommu_prepare_static_identity_mapping(int hw)
2969 struct pci_dev *pdev = NULL;
2970 struct dmar_drhd_unit *drhd;
2971 struct intel_iommu *iommu;
2976 for_each_pci_dev(pdev) {
2977 ret = dev_prepare_static_identity_mapping(&pdev->dev, hw);
2982 for_each_active_iommu(iommu, drhd)
2983 for_each_active_dev_scope(drhd->devices, drhd->devices_cnt, i, dev) {
2984 struct acpi_device_physical_node *pn;
2985 struct acpi_device *adev;
2987 if (dev->bus != &acpi_bus_type)
2990 adev= to_acpi_device(dev);
2991 mutex_lock(&adev->physical_node_lock);
2992 list_for_each_entry(pn, &adev->physical_node_list, node) {
2993 ret = dev_prepare_static_identity_mapping(pn->dev, hw);
2997 mutex_unlock(&adev->physical_node_lock);
3005 static void intel_iommu_init_qi(struct intel_iommu *iommu)
3008 * Start from the sane iommu hardware state.
3009 * If the queued invalidation is already initialized by us
3010 * (for example, while enabling interrupt-remapping) then
3011 * we got the things already rolling from a sane state.
3015 * Clear any previous faults.
3017 dmar_fault(-1, iommu);
3019 * Disable queued invalidation if supported and already enabled
3020 * before OS handover.
3022 dmar_disable_qi(iommu);
3025 if (dmar_enable_qi(iommu)) {
3027 * Queued Invalidate not enabled, use Register Based Invalidate
3029 iommu->flush.flush_context = __iommu_flush_context;
3030 iommu->flush.flush_iotlb = __iommu_flush_iotlb;
3031 pr_info("%s: Using Register based invalidation\n",
3034 iommu->flush.flush_context = qi_flush_context;
3035 iommu->flush.flush_iotlb = qi_flush_iotlb;
3036 pr_info("%s: Using Queued invalidation\n", iommu->name);
3040 static int copy_context_table(struct intel_iommu *iommu,
3041 struct root_entry *old_re,
3042 struct context_entry **tbl,
3045 int tbl_idx, pos = 0, idx, devfn, ret = 0, did;
3046 struct context_entry *new_ce = NULL, ce;
3047 struct context_entry *old_ce = NULL;
3048 struct root_entry re;
3049 phys_addr_t old_ce_phys;
3051 tbl_idx = ext ? bus * 2 : bus;
3052 memcpy(&re, old_re, sizeof(re));
3054 for (devfn = 0; devfn < 256; devfn++) {
3055 /* First calculate the correct index */
3056 idx = (ext ? devfn * 2 : devfn) % 256;
3059 /* First save what we may have and clean up */
3061 tbl[tbl_idx] = new_ce;
3062 __iommu_flush_cache(iommu, new_ce,
3072 old_ce_phys = root_entry_lctp(&re);
3074 old_ce_phys = root_entry_uctp(&re);
3077 if (ext && devfn == 0) {
3078 /* No LCTP, try UCTP */
3087 old_ce = memremap(old_ce_phys, PAGE_SIZE,
3092 new_ce = alloc_pgtable_page(iommu->node);
3099 /* Now copy the context entry */
3100 memcpy(&ce, old_ce + idx, sizeof(ce));
3102 if (!__context_present(&ce))
3105 did = context_domain_id(&ce);
3106 if (did >= 0 && did < cap_ndoms(iommu->cap))
3107 set_bit(did, iommu->domain_ids);
3110 * We need a marker for copied context entries. This
3111 * marker needs to work for the old format as well as
3112 * for extended context entries.
3114 * Bit 67 of the context entry is used. In the old
3115 * format this bit is available to software, in the
3116 * extended format it is the PGE bit, but PGE is ignored
3117 * by HW if PASIDs are disabled (and thus still
3120 * So disable PASIDs first and then mark the entry
3121 * copied. This means that we don't copy PASID
3122 * translations from the old kernel, but this is fine as
3123 * faults there are not fatal.
3125 context_clear_pasid_enable(&ce);
3126 context_set_copied(&ce);
3131 tbl[tbl_idx + pos] = new_ce;
3133 __iommu_flush_cache(iommu, new_ce, VTD_PAGE_SIZE);
3142 static int copy_translation_tables(struct intel_iommu *iommu)
3144 struct context_entry **ctxt_tbls;
3145 struct root_entry *old_rt;
3146 phys_addr_t old_rt_phys;
3147 int ctxt_table_entries;
3148 unsigned long flags;
3153 rtaddr_reg = dmar_readq(iommu->reg + DMAR_RTADDR_REG);
3154 ext = !!(rtaddr_reg & DMA_RTADDR_RTT);
3155 new_ext = !!ecap_ecs(iommu->ecap);
3158 * The RTT bit can only be changed when translation is disabled,
3159 * but disabling translation means to open a window for data
3160 * corruption. So bail out and don't copy anything if we would
3161 * have to change the bit.
3166 old_rt_phys = rtaddr_reg & VTD_PAGE_MASK;
3170 old_rt = memremap(old_rt_phys, PAGE_SIZE, MEMREMAP_WB);
3174 /* This is too big for the stack - allocate it from slab */
3175 ctxt_table_entries = ext ? 512 : 256;
3177 ctxt_tbls = kzalloc(ctxt_table_entries * sizeof(void *), GFP_KERNEL);
3181 for (bus = 0; bus < 256; bus++) {
3182 ret = copy_context_table(iommu, &old_rt[bus],
3183 ctxt_tbls, bus, ext);
3185 pr_err("%s: Failed to copy context table for bus %d\n",
3191 spin_lock_irqsave(&iommu->lock, flags);
3193 /* Context tables are copied, now write them to the root_entry table */
3194 for (bus = 0; bus < 256; bus++) {
3195 int idx = ext ? bus * 2 : bus;
3198 if (ctxt_tbls[idx]) {
3199 val = virt_to_phys(ctxt_tbls[idx]) | 1;
3200 iommu->root_entry[bus].lo = val;
3203 if (!ext || !ctxt_tbls[idx + 1])
3206 val = virt_to_phys(ctxt_tbls[idx + 1]) | 1;
3207 iommu->root_entry[bus].hi = val;
3210 spin_unlock_irqrestore(&iommu->lock, flags);
3214 __iommu_flush_cache(iommu, iommu->root_entry, PAGE_SIZE);
3224 static int __init init_dmars(void)
3226 struct dmar_drhd_unit *drhd;
3227 struct dmar_rmrr_unit *rmrr;
3228 bool copied_tables = false;
3230 struct intel_iommu *iommu;
3236 * initialize and program root entry to not present
3239 for_each_drhd_unit(drhd) {
3241 * lock not needed as this is only incremented in the single
3242 * threaded kernel __init code path all other access are read
3245 if (g_num_of_iommus < DMAR_UNITS_SUPPORTED) {
3249 pr_err_once("Exceeded %d IOMMUs\n", DMAR_UNITS_SUPPORTED);
3252 /* Preallocate enough resources for IOMMU hot-addition */
3253 if (g_num_of_iommus < DMAR_UNITS_SUPPORTED)
3254 g_num_of_iommus = DMAR_UNITS_SUPPORTED;
3256 g_iommus = kcalloc(g_num_of_iommus, sizeof(struct intel_iommu *),
3259 pr_err("Allocating global iommu array failed\n");
3264 for_each_active_iommu(iommu, drhd) {
3265 g_iommus[iommu->seq_id] = iommu;
3267 intel_iommu_init_qi(iommu);
3269 ret = iommu_init_domains(iommu);
3273 init_translation_status(iommu);
3275 if (translation_pre_enabled(iommu) && !is_kdump_kernel()) {
3276 iommu_disable_translation(iommu);
3277 clear_translation_pre_enabled(iommu);
3278 pr_warn("Translation was enabled for %s but we are not in kdump mode\n",
3284 * we could share the same root & context tables
3285 * among all IOMMU's. Need to Split it later.
3287 ret = iommu_alloc_root_entry(iommu);
3291 if (translation_pre_enabled(iommu)) {
3292 pr_info("Translation already enabled - trying to copy translation structures\n");
3294 ret = copy_translation_tables(iommu);
3297 * We found the IOMMU with translation
3298 * enabled - but failed to copy over the
3299 * old root-entry table. Try to proceed
3300 * by disabling translation now and
3301 * allocating a clean root-entry table.
3302 * This might cause DMAR faults, but
3303 * probably the dump will still succeed.
3305 pr_err("Failed to copy translation tables from previous kernel for %s\n",
3307 iommu_disable_translation(iommu);
3308 clear_translation_pre_enabled(iommu);
3310 pr_info("Copied translation tables from previous kernel for %s\n",
3312 copied_tables = true;
3316 if (!ecap_pass_through(iommu->ecap))
3317 hw_pass_through = 0;
3318 #ifdef CONFIG_INTEL_IOMMU_SVM
3319 if (pasid_enabled(iommu))
3320 intel_svm_alloc_pasid_tables(iommu);
3325 * Now that qi is enabled on all iommus, set the root entry and flush
3326 * caches. This is required on some Intel X58 chipsets, otherwise the
3327 * flush_context function will loop forever and the boot hangs.
3329 for_each_active_iommu(iommu, drhd) {
3330 iommu_flush_write_buffer(iommu);
3331 iommu_set_root_entry(iommu);
3332 iommu->flush.flush_context(iommu, 0, 0, 0, DMA_CCMD_GLOBAL_INVL);
3333 iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
3336 if (iommu_pass_through)
3337 iommu_identity_mapping |= IDENTMAP_ALL;
3339 #ifdef CONFIG_INTEL_IOMMU_BROKEN_GFX_WA
3340 iommu_identity_mapping |= IDENTMAP_GFX;
3343 check_tylersburg_isoch();
3345 if (iommu_identity_mapping) {
3346 ret = si_domain_init(hw_pass_through);
3353 * If we copied translations from a previous kernel in the kdump
3354 * case, we can not assign the devices to domains now, as that
3355 * would eliminate the old mappings. So skip this part and defer
3356 * the assignment to device driver initialization time.
3362 * If pass through is not set or not enabled, setup context entries for
3363 * identity mappings for rmrr, gfx, and isa and may fall back to static
3364 * identity mapping if iommu_identity_mapping is set.
3366 if (iommu_identity_mapping) {
3367 ret = iommu_prepare_static_identity_mapping(hw_pass_through);
3369 pr_crit("Failed to setup IOMMU pass-through\n");
3375 * for each dev attached to rmrr
3377 * locate drhd for dev, alloc domain for dev
3378 * allocate free domain
3379 * allocate page table entries for rmrr
3380 * if context not allocated for bus
3381 * allocate and init context
3382 * set present in root table for this bus
3383 * init context with domain, translation etc
3387 pr_info("Setting RMRR:\n");
3388 for_each_rmrr_units(rmrr) {
3389 /* some BIOS lists non-exist devices in DMAR table. */
3390 for_each_active_dev_scope(rmrr->devices, rmrr->devices_cnt,
3392 ret = iommu_prepare_rmrr_dev(rmrr, dev);
3394 pr_err("Mapping reserved region failed\n");
3398 iommu_prepare_isa();
3405 * global invalidate context cache
3406 * global invalidate iotlb
3407 * enable translation
3409 for_each_iommu(iommu, drhd) {
3410 if (drhd->ignored) {
3412 * we always have to disable PMRs or DMA may fail on
3416 iommu_disable_protect_mem_regions(iommu);
3420 iommu_flush_write_buffer(iommu);
3422 #ifdef CONFIG_INTEL_IOMMU_SVM
3423 if (pasid_enabled(iommu) && ecap_prs(iommu->ecap)) {
3424 ret = intel_svm_enable_prq(iommu);
3429 ret = dmar_set_interrupt(iommu);
3433 if (!translation_pre_enabled(iommu))
3434 iommu_enable_translation(iommu);
3436 iommu_disable_protect_mem_regions(iommu);
3442 for_each_active_iommu(iommu, drhd) {
3443 disable_dmar_iommu(iommu);
3444 free_dmar_iommu(iommu);
3453 /* This takes a number of _MM_ pages, not VTD pages */
3454 static unsigned long intel_alloc_iova(struct device *dev,
3455 struct dmar_domain *domain,
3456 unsigned long nrpages, uint64_t dma_mask)
3458 unsigned long iova_pfn = 0;
3460 /* Restrict dma_mask to the width that the iommu can handle */
3461 dma_mask = min_t(uint64_t, DOMAIN_MAX_ADDR(domain->gaw), dma_mask);
3462 /* Ensure we reserve the whole size-aligned region */
3463 nrpages = __roundup_pow_of_two(nrpages);
3465 if (!dmar_forcedac && dma_mask > DMA_BIT_MASK(32)) {
3467 * First try to allocate an io virtual address in
3468 * DMA_BIT_MASK(32) and if that fails then try allocating
3471 iova_pfn = alloc_iova_fast(&domain->iovad, nrpages,
3472 IOVA_PFN(DMA_BIT_MASK(32)), false);
3476 iova_pfn = alloc_iova_fast(&domain->iovad, nrpages,
3477 IOVA_PFN(dma_mask), true);
3478 if (unlikely(!iova_pfn)) {
3479 pr_err("Allocating %ld-page iova for %s failed",
3480 nrpages, dev_name(dev));
3487 static struct dmar_domain *get_valid_domain_for_dev(struct device *dev)
3489 struct dmar_domain *domain, *tmp;
3490 struct dmar_rmrr_unit *rmrr;
3491 struct device *i_dev;
3494 domain = find_domain(dev);
3498 domain = find_or_alloc_domain(dev, DEFAULT_DOMAIN_ADDRESS_WIDTH);
3502 /* We have a new domain - setup possible RMRRs for the device */
3504 for_each_rmrr_units(rmrr) {
3505 for_each_active_dev_scope(rmrr->devices, rmrr->devices_cnt,
3510 ret = domain_prepare_identity_map(dev, domain,
3514 dev_err(dev, "Mapping reserved region failed\n");
3519 tmp = set_domain_for_dev(dev, domain);
3520 if (!tmp || domain != tmp) {
3521 domain_exit(domain);
3528 pr_err("Allocating domain for %s failed\n", dev_name(dev));
3534 /* Check if the dev needs to go through non-identity map and unmap process.*/
3535 static int iommu_no_mapping(struct device *dev)
3539 if (iommu_dummy(dev))
3542 if (!iommu_identity_mapping)
3545 found = identity_mapping(dev);
3547 if (iommu_should_identity_map(dev, 0))
3551 * 32 bit DMA is removed from si_domain and fall back
3552 * to non-identity mapping.
3554 dmar_remove_one_dev_info(si_domain, dev);
3555 pr_info("32bit %s uses non-identity mapping\n",
3561 * In case of a detached 64 bit DMA device from vm, the device
3562 * is put into si_domain for identity mapping.
3564 if (iommu_should_identity_map(dev, 0)) {
3566 ret = domain_add_dev_info(si_domain, dev);
3568 pr_info("64bit %s uses identity mapping\n",
3578 static dma_addr_t __intel_map_single(struct device *dev, phys_addr_t paddr,
3579 size_t size, int dir, u64 dma_mask)
3581 struct dmar_domain *domain;
3582 phys_addr_t start_paddr;
3583 unsigned long iova_pfn;
3586 struct intel_iommu *iommu;
3587 unsigned long paddr_pfn = paddr >> PAGE_SHIFT;
3589 BUG_ON(dir == DMA_NONE);
3591 if (iommu_no_mapping(dev))
3594 domain = get_valid_domain_for_dev(dev);
3598 iommu = domain_get_iommu(domain);
3599 size = aligned_nrpages(paddr, size);
3601 iova_pfn = intel_alloc_iova(dev, domain, dma_to_mm_pfn(size), dma_mask);
3606 * Check if DMAR supports zero-length reads on write only
3609 if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL || \
3610 !cap_zlr(iommu->cap))
3611 prot |= DMA_PTE_READ;
3612 if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL)
3613 prot |= DMA_PTE_WRITE;
3615 * paddr - (paddr + size) might be partial page, we should map the whole
3616 * page. Note: if two part of one page are separately mapped, we
3617 * might have two guest_addr mapping to the same host paddr, but this
3618 * is not a big problem
3620 ret = domain_pfn_mapping(domain, mm_to_dma_pfn(iova_pfn),
3621 mm_to_dma_pfn(paddr_pfn), size, prot);
3625 /* it's a non-present to present mapping. Only flush if caching mode */
3626 if (cap_caching_mode(iommu->cap))
3627 iommu_flush_iotlb_psi(iommu, domain,
3628 mm_to_dma_pfn(iova_pfn),
3631 iommu_flush_write_buffer(iommu);
3633 start_paddr = (phys_addr_t)iova_pfn << PAGE_SHIFT;
3634 start_paddr += paddr & ~PAGE_MASK;
3639 free_iova_fast(&domain->iovad, iova_pfn, dma_to_mm_pfn(size));
3640 pr_err("Device %s request: %zx@%llx dir %d --- failed\n",
3641 dev_name(dev), size, (unsigned long long)paddr, dir);
3645 static dma_addr_t intel_map_page(struct device *dev, struct page *page,
3646 unsigned long offset, size_t size,
3647 enum dma_data_direction dir,
3648 unsigned long attrs)
3650 return __intel_map_single(dev, page_to_phys(page) + offset, size,
3651 dir, *dev->dma_mask);
3654 static void intel_unmap(struct device *dev, dma_addr_t dev_addr, size_t size)
3656 struct dmar_domain *domain;
3657 unsigned long start_pfn, last_pfn;
3658 unsigned long nrpages;
3659 unsigned long iova_pfn;
3660 struct intel_iommu *iommu;
3661 struct page *freelist;
3663 if (iommu_no_mapping(dev))
3666 domain = find_domain(dev);
3669 iommu = domain_get_iommu(domain);
3671 iova_pfn = IOVA_PFN(dev_addr);
3673 nrpages = aligned_nrpages(dev_addr, size);
3674 start_pfn = mm_to_dma_pfn(iova_pfn);
3675 last_pfn = start_pfn + nrpages - 1;
3677 pr_debug("Device %s unmapping: pfn %lx-%lx\n",
3678 dev_name(dev), start_pfn, last_pfn);
3680 freelist = domain_unmap(domain, start_pfn, last_pfn);
3682 if (intel_iommu_strict) {
3683 iommu_flush_iotlb_psi(iommu, domain, start_pfn,
3684 nrpages, !freelist, 0);
3686 free_iova_fast(&domain->iovad, iova_pfn, dma_to_mm_pfn(nrpages));
3687 dma_free_pagelist(freelist);
3689 queue_iova(&domain->iovad, iova_pfn, nrpages,
3690 (unsigned long)freelist);
3692 * queue up the release of the unmap to save the 1/6th of the
3693 * cpu used up by the iotlb flush operation...
3698 static void intel_unmap_page(struct device *dev, dma_addr_t dev_addr,
3699 size_t size, enum dma_data_direction dir,
3700 unsigned long attrs)
3702 intel_unmap(dev, dev_addr, size);
3705 static void *intel_alloc_coherent(struct device *dev, size_t size,
3706 dma_addr_t *dma_handle, gfp_t flags,
3707 unsigned long attrs)
3709 struct page *page = NULL;
3712 size = PAGE_ALIGN(size);
3713 order = get_order(size);
3715 if (!iommu_no_mapping(dev))
3716 flags &= ~(GFP_DMA | GFP_DMA32);
3717 else if (dev->coherent_dma_mask < dma_get_required_mask(dev)) {
3718 if (dev->coherent_dma_mask < DMA_BIT_MASK(32))
3724 if (gfpflags_allow_blocking(flags)) {
3725 unsigned int count = size >> PAGE_SHIFT;
3727 page = dma_alloc_from_contiguous(dev, count, order, flags);
3728 if (page && iommu_no_mapping(dev) &&
3729 page_to_phys(page) + size > dev->coherent_dma_mask) {
3730 dma_release_from_contiguous(dev, page, count);
3736 page = alloc_pages(flags, order);
3739 memset(page_address(page), 0, size);
3741 *dma_handle = __intel_map_single(dev, page_to_phys(page), size,
3743 dev->coherent_dma_mask);
3745 return page_address(page);
3746 if (!dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT))
3747 __free_pages(page, order);
3752 static void intel_free_coherent(struct device *dev, size_t size, void *vaddr,
3753 dma_addr_t dma_handle, unsigned long attrs)
3756 struct page *page = virt_to_page(vaddr);
3758 size = PAGE_ALIGN(size);
3759 order = get_order(size);
3761 intel_unmap(dev, dma_handle, size);
3762 if (!dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT))
3763 __free_pages(page, order);
3766 static void intel_unmap_sg(struct device *dev, struct scatterlist *sglist,
3767 int nelems, enum dma_data_direction dir,
3768 unsigned long attrs)
3770 dma_addr_t startaddr = sg_dma_address(sglist) & PAGE_MASK;
3771 unsigned long nrpages = 0;
3772 struct scatterlist *sg;
3775 for_each_sg(sglist, sg, nelems, i) {
3776 nrpages += aligned_nrpages(sg_dma_address(sg), sg_dma_len(sg));
3779 intel_unmap(dev, startaddr, nrpages << VTD_PAGE_SHIFT);
3782 static int intel_nontranslate_map_sg(struct device *hddev,
3783 struct scatterlist *sglist, int nelems, int dir)
3786 struct scatterlist *sg;
3788 for_each_sg(sglist, sg, nelems, i) {
3789 BUG_ON(!sg_page(sg));
3790 sg->dma_address = page_to_phys(sg_page(sg)) + sg->offset;
3791 sg->dma_length = sg->length;
3796 static int intel_map_sg(struct device *dev, struct scatterlist *sglist, int nelems,
3797 enum dma_data_direction dir, unsigned long attrs)
3800 struct dmar_domain *domain;
3803 unsigned long iova_pfn;
3805 struct scatterlist *sg;
3806 unsigned long start_vpfn;
3807 struct intel_iommu *iommu;
3809 BUG_ON(dir == DMA_NONE);
3810 if (iommu_no_mapping(dev))
3811 return intel_nontranslate_map_sg(dev, sglist, nelems, dir);
3813 domain = get_valid_domain_for_dev(dev);
3817 iommu = domain_get_iommu(domain);
3819 for_each_sg(sglist, sg, nelems, i)
3820 size += aligned_nrpages(sg->offset, sg->length);
3822 iova_pfn = intel_alloc_iova(dev, domain, dma_to_mm_pfn(size),
3825 sglist->dma_length = 0;
3830 * Check if DMAR supports zero-length reads on write only
3833 if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL || \
3834 !cap_zlr(iommu->cap))
3835 prot |= DMA_PTE_READ;
3836 if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL)
3837 prot |= DMA_PTE_WRITE;
3839 start_vpfn = mm_to_dma_pfn(iova_pfn);
3841 ret = domain_sg_mapping(domain, start_vpfn, sglist, size, prot);
3842 if (unlikely(ret)) {
3843 dma_pte_free_pagetable(domain, start_vpfn,
3844 start_vpfn + size - 1,
3845 agaw_to_level(domain->agaw) + 1);
3846 free_iova_fast(&domain->iovad, iova_pfn, dma_to_mm_pfn(size));
3850 /* it's a non-present to present mapping. Only flush if caching mode */
3851 if (cap_caching_mode(iommu->cap))
3852 iommu_flush_iotlb_psi(iommu, domain, start_vpfn, size, 0, 1);
3854 iommu_flush_write_buffer(iommu);
3859 static int intel_mapping_error(struct device *dev, dma_addr_t dma_addr)
3864 const struct dma_map_ops intel_dma_ops = {
3865 .alloc = intel_alloc_coherent,
3866 .free = intel_free_coherent,
3867 .map_sg = intel_map_sg,
3868 .unmap_sg = intel_unmap_sg,
3869 .map_page = intel_map_page,
3870 .unmap_page = intel_unmap_page,
3871 .mapping_error = intel_mapping_error,
3873 .dma_supported = x86_dma_supported,
3877 static inline int iommu_domain_cache_init(void)
3881 iommu_domain_cache = kmem_cache_create("iommu_domain",
3882 sizeof(struct dmar_domain),
3887 if (!iommu_domain_cache) {
3888 pr_err("Couldn't create iommu_domain cache\n");
3895 static inline int iommu_devinfo_cache_init(void)
3899 iommu_devinfo_cache = kmem_cache_create("iommu_devinfo",
3900 sizeof(struct device_domain_info),
3904 if (!iommu_devinfo_cache) {
3905 pr_err("Couldn't create devinfo cache\n");
3912 static int __init iommu_init_mempool(void)
3915 ret = iova_cache_get();
3919 ret = iommu_domain_cache_init();
3923 ret = iommu_devinfo_cache_init();
3927 kmem_cache_destroy(iommu_domain_cache);
3934 static void __init iommu_exit_mempool(void)
3936 kmem_cache_destroy(iommu_devinfo_cache);
3937 kmem_cache_destroy(iommu_domain_cache);
3941 static void quirk_ioat_snb_local_iommu(struct pci_dev *pdev)
3943 struct dmar_drhd_unit *drhd;
3947 /* We know that this device on this chipset has its own IOMMU.
3948 * If we find it under a different IOMMU, then the BIOS is lying
3949 * to us. Hope that the IOMMU for this device is actually
3950 * disabled, and it needs no translation...
3952 rc = pci_bus_read_config_dword(pdev->bus, PCI_DEVFN(0, 0), 0xb0, &vtbar);
3954 /* "can't" happen */
3955 dev_info(&pdev->dev, "failed to run vt-d quirk\n");
3958 vtbar &= 0xffff0000;
3960 /* we know that the this iommu should be at offset 0xa000 from vtbar */
3961 drhd = dmar_find_matched_drhd_unit(pdev);
3962 if (WARN_TAINT_ONCE(!drhd || drhd->reg_base_addr - vtbar != 0xa000,
3963 TAINT_FIRMWARE_WORKAROUND,
3964 "BIOS assigned incorrect VT-d unit for Intel(R) QuickData Technology device\n"))
3965 pdev->dev.archdata.iommu = DUMMY_DEVICE_DOMAIN_INFO;
3967 DECLARE_PCI_FIXUP_ENABLE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB, quirk_ioat_snb_local_iommu);
3969 static void __init init_no_remapping_devices(void)
3971 struct dmar_drhd_unit *drhd;
3975 for_each_drhd_unit(drhd) {
3976 if (!drhd->include_all) {
3977 for_each_active_dev_scope(drhd->devices,
3978 drhd->devices_cnt, i, dev)
3980 /* ignore DMAR unit if no devices exist */
3981 if (i == drhd->devices_cnt)
3986 for_each_active_drhd_unit(drhd) {
3987 if (drhd->include_all)
3990 for_each_active_dev_scope(drhd->devices,
3991 drhd->devices_cnt, i, dev)
3992 if (!dev_is_pci(dev) || !IS_GFX_DEVICE(to_pci_dev(dev)))
3994 if (i < drhd->devices_cnt)
3997 /* This IOMMU has *only* gfx devices. Either bypass it or
3998 set the gfx_mapped flag, as appropriate */
4000 intel_iommu_gfx_mapped = 1;
4003 for_each_active_dev_scope(drhd->devices,
4004 drhd->devices_cnt, i, dev)
4005 dev->archdata.iommu = DUMMY_DEVICE_DOMAIN_INFO;
4010 #ifdef CONFIG_SUSPEND
4011 static int init_iommu_hw(void)
4013 struct dmar_drhd_unit *drhd;
4014 struct intel_iommu *iommu = NULL;
4016 for_each_active_iommu(iommu, drhd)
4018 dmar_reenable_qi(iommu);
4020 for_each_iommu(iommu, drhd) {
4021 if (drhd->ignored) {
4023 * we always have to disable PMRs or DMA may fail on
4027 iommu_disable_protect_mem_regions(iommu);
4031 iommu_flush_write_buffer(iommu);
4033 iommu_set_root_entry(iommu);
4035 iommu->flush.flush_context(iommu, 0, 0, 0,
4036 DMA_CCMD_GLOBAL_INVL);
4037 iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
4038 iommu_enable_translation(iommu);
4039 iommu_disable_protect_mem_regions(iommu);
4045 static void iommu_flush_all(void)
4047 struct dmar_drhd_unit *drhd;
4048 struct intel_iommu *iommu;
4050 for_each_active_iommu(iommu, drhd) {
4051 iommu->flush.flush_context(iommu, 0, 0, 0,
4052 DMA_CCMD_GLOBAL_INVL);
4053 iommu->flush.flush_iotlb(iommu, 0, 0, 0,
4054 DMA_TLB_GLOBAL_FLUSH);
4058 static int iommu_suspend(void)
4060 struct dmar_drhd_unit *drhd;
4061 struct intel_iommu *iommu = NULL;
4064 for_each_active_iommu(iommu, drhd) {
4065 iommu->iommu_state = kzalloc(sizeof(u32) * MAX_SR_DMAR_REGS,
4067 if (!iommu->iommu_state)
4073 for_each_active_iommu(iommu, drhd) {
4074 iommu_disable_translation(iommu);
4076 raw_spin_lock_irqsave(&iommu->register_lock, flag);
4078 iommu->iommu_state[SR_DMAR_FECTL_REG] =
4079 readl(iommu->reg + DMAR_FECTL_REG);
4080 iommu->iommu_state[SR_DMAR_FEDATA_REG] =
4081 readl(iommu->reg + DMAR_FEDATA_REG);
4082 iommu->iommu_state[SR_DMAR_FEADDR_REG] =
4083 readl(iommu->reg + DMAR_FEADDR_REG);
4084 iommu->iommu_state[SR_DMAR_FEUADDR_REG] =
4085 readl(iommu->reg + DMAR_FEUADDR_REG);
4087 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
4092 for_each_active_iommu(iommu, drhd)
4093 kfree(iommu->iommu_state);
4098 static void iommu_resume(void)
4100 struct dmar_drhd_unit *drhd;
4101 struct intel_iommu *iommu = NULL;
4104 if (init_iommu_hw()) {
4106 panic("tboot: IOMMU setup failed, DMAR can not resume!\n");
4108 WARN(1, "IOMMU setup failed, DMAR can not resume!\n");
4112 for_each_active_iommu(iommu, drhd) {
4114 raw_spin_lock_irqsave(&iommu->register_lock, flag);
4116 writel(iommu->iommu_state[SR_DMAR_FECTL_REG],
4117 iommu->reg + DMAR_FECTL_REG);
4118 writel(iommu->iommu_state[SR_DMAR_FEDATA_REG],
4119 iommu->reg + DMAR_FEDATA_REG);
4120 writel(iommu->iommu_state[SR_DMAR_FEADDR_REG],
4121 iommu->reg + DMAR_FEADDR_REG);
4122 writel(iommu->iommu_state[SR_DMAR_FEUADDR_REG],
4123 iommu->reg + DMAR_FEUADDR_REG);
4125 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
4128 for_each_active_iommu(iommu, drhd)
4129 kfree(iommu->iommu_state);
4132 static struct syscore_ops iommu_syscore_ops = {
4133 .resume = iommu_resume,
4134 .suspend = iommu_suspend,
4137 static void __init init_iommu_pm_ops(void)
4139 register_syscore_ops(&iommu_syscore_ops);
4143 static inline void init_iommu_pm_ops(void) {}
4144 #endif /* CONFIG_PM */
4147 int __init dmar_parse_one_rmrr(struct acpi_dmar_header *header, void *arg)
4149 struct acpi_dmar_reserved_memory *rmrr;
4150 int prot = DMA_PTE_READ|DMA_PTE_WRITE;
4151 struct dmar_rmrr_unit *rmrru;
4154 rmrru = kzalloc(sizeof(*rmrru), GFP_KERNEL);
4158 rmrru->hdr = header;
4159 rmrr = (struct acpi_dmar_reserved_memory *)header;
4160 rmrru->base_address = rmrr->base_address;
4161 rmrru->end_address = rmrr->end_address;
4163 length = rmrr->end_address - rmrr->base_address + 1;
4164 rmrru->resv = iommu_alloc_resv_region(rmrr->base_address, length, prot,
4169 rmrru->devices = dmar_alloc_dev_scope((void *)(rmrr + 1),
4170 ((void *)rmrr) + rmrr->header.length,
4171 &rmrru->devices_cnt);
4172 if (rmrru->devices_cnt && rmrru->devices == NULL)
4175 list_add(&rmrru->list, &dmar_rmrr_units);
4186 static struct dmar_atsr_unit *dmar_find_atsr(struct acpi_dmar_atsr *atsr)
4188 struct dmar_atsr_unit *atsru;
4189 struct acpi_dmar_atsr *tmp;
4191 list_for_each_entry_rcu(atsru, &dmar_atsr_units, list) {
4192 tmp = (struct acpi_dmar_atsr *)atsru->hdr;
4193 if (atsr->segment != tmp->segment)
4195 if (atsr->header.length != tmp->header.length)
4197 if (memcmp(atsr, tmp, atsr->header.length) == 0)
4204 int dmar_parse_one_atsr(struct acpi_dmar_header *hdr, void *arg)
4206 struct acpi_dmar_atsr *atsr;
4207 struct dmar_atsr_unit *atsru;
4209 if (system_state >= SYSTEM_RUNNING && !intel_iommu_enabled)
4212 atsr = container_of(hdr, struct acpi_dmar_atsr, header);
4213 atsru = dmar_find_atsr(atsr);
4217 atsru = kzalloc(sizeof(*atsru) + hdr->length, GFP_KERNEL);
4222 * If memory is allocated from slab by ACPI _DSM method, we need to
4223 * copy the memory content because the memory buffer will be freed
4226 atsru->hdr = (void *)(atsru + 1);
4227 memcpy(atsru->hdr, hdr, hdr->length);
4228 atsru->include_all = atsr->flags & 0x1;
4229 if (!atsru->include_all) {
4230 atsru->devices = dmar_alloc_dev_scope((void *)(atsr + 1),
4231 (void *)atsr + atsr->header.length,
4232 &atsru->devices_cnt);
4233 if (atsru->devices_cnt && atsru->devices == NULL) {
4239 list_add_rcu(&atsru->list, &dmar_atsr_units);
4244 static void intel_iommu_free_atsr(struct dmar_atsr_unit *atsru)
4246 dmar_free_dev_scope(&atsru->devices, &atsru->devices_cnt);
4250 int dmar_release_one_atsr(struct acpi_dmar_header *hdr, void *arg)
4252 struct acpi_dmar_atsr *atsr;
4253 struct dmar_atsr_unit *atsru;
4255 atsr = container_of(hdr, struct acpi_dmar_atsr, header);
4256 atsru = dmar_find_atsr(atsr);
4258 list_del_rcu(&atsru->list);
4260 intel_iommu_free_atsr(atsru);
4266 int dmar_check_one_atsr(struct acpi_dmar_header *hdr, void *arg)
4270 struct acpi_dmar_atsr *atsr;
4271 struct dmar_atsr_unit *atsru;
4273 atsr = container_of(hdr, struct acpi_dmar_atsr, header);
4274 atsru = dmar_find_atsr(atsr);
4278 if (!atsru->include_all && atsru->devices && atsru->devices_cnt) {
4279 for_each_active_dev_scope(atsru->devices, atsru->devices_cnt,
4287 static int intel_iommu_add(struct dmar_drhd_unit *dmaru)
4290 struct intel_iommu *iommu = dmaru->iommu;
4292 if (g_iommus[iommu->seq_id])
4295 if (hw_pass_through && !ecap_pass_through(iommu->ecap)) {
4296 pr_warn("%s: Doesn't support hardware pass through.\n",
4300 if (!ecap_sc_support(iommu->ecap) &&
4301 domain_update_iommu_snooping(iommu)) {
4302 pr_warn("%s: Doesn't support snooping.\n",
4306 sp = domain_update_iommu_superpage(iommu) - 1;
4307 if (sp >= 0 && !(cap_super_page_val(iommu->cap) & (1 << sp))) {
4308 pr_warn("%s: Doesn't support large page.\n",
4314 * Disable translation if already enabled prior to OS handover.
4316 if (iommu->gcmd & DMA_GCMD_TE)
4317 iommu_disable_translation(iommu);
4319 g_iommus[iommu->seq_id] = iommu;
4320 ret = iommu_init_domains(iommu);
4322 ret = iommu_alloc_root_entry(iommu);
4326 #ifdef CONFIG_INTEL_IOMMU_SVM
4327 if (pasid_enabled(iommu))
4328 intel_svm_alloc_pasid_tables(iommu);
4331 if (dmaru->ignored) {
4333 * we always have to disable PMRs or DMA may fail on this device
4336 iommu_disable_protect_mem_regions(iommu);
4340 intel_iommu_init_qi(iommu);
4341 iommu_flush_write_buffer(iommu);
4343 #ifdef CONFIG_INTEL_IOMMU_SVM
4344 if (pasid_enabled(iommu) && ecap_prs(iommu->ecap)) {
4345 ret = intel_svm_enable_prq(iommu);
4350 ret = dmar_set_interrupt(iommu);
4354 iommu_set_root_entry(iommu);
4355 iommu->flush.flush_context(iommu, 0, 0, 0, DMA_CCMD_GLOBAL_INVL);
4356 iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
4357 iommu_enable_translation(iommu);
4359 iommu_disable_protect_mem_regions(iommu);
4363 disable_dmar_iommu(iommu);
4365 free_dmar_iommu(iommu);
4369 int dmar_iommu_hotplug(struct dmar_drhd_unit *dmaru, bool insert)
4372 struct intel_iommu *iommu = dmaru->iommu;
4374 if (!intel_iommu_enabled)
4380 ret = intel_iommu_add(dmaru);
4382 disable_dmar_iommu(iommu);
4383 free_dmar_iommu(iommu);
4389 static void intel_iommu_free_dmars(void)
4391 struct dmar_rmrr_unit *rmrru, *rmrr_n;
4392 struct dmar_atsr_unit *atsru, *atsr_n;
4394 list_for_each_entry_safe(rmrru, rmrr_n, &dmar_rmrr_units, list) {
4395 list_del(&rmrru->list);
4396 dmar_free_dev_scope(&rmrru->devices, &rmrru->devices_cnt);
4401 list_for_each_entry_safe(atsru, atsr_n, &dmar_atsr_units, list) {
4402 list_del(&atsru->list);
4403 intel_iommu_free_atsr(atsru);
4407 int dmar_find_matched_atsr_unit(struct pci_dev *dev)
4410 struct pci_bus *bus;
4411 struct pci_dev *bridge = NULL;
4413 struct acpi_dmar_atsr *atsr;
4414 struct dmar_atsr_unit *atsru;
4416 dev = pci_physfn(dev);
4417 for (bus = dev->bus; bus; bus = bus->parent) {
4419 /* If it's an integrated device, allow ATS */
4422 /* Connected via non-PCIe: no ATS */
4423 if (!pci_is_pcie(bridge) ||
4424 pci_pcie_type(bridge) == PCI_EXP_TYPE_PCI_BRIDGE)
4426 /* If we found the root port, look it up in the ATSR */
4427 if (pci_pcie_type(bridge) == PCI_EXP_TYPE_ROOT_PORT)
4432 list_for_each_entry_rcu(atsru, &dmar_atsr_units, list) {
4433 atsr = container_of(atsru->hdr, struct acpi_dmar_atsr, header);
4434 if (atsr->segment != pci_domain_nr(dev->bus))
4437 for_each_dev_scope(atsru->devices, atsru->devices_cnt, i, tmp)
4438 if (tmp == &bridge->dev)
4441 if (atsru->include_all)
4451 int dmar_iommu_notify_scope_dev(struct dmar_pci_notify_info *info)
4454 struct dmar_rmrr_unit *rmrru;
4455 struct dmar_atsr_unit *atsru;
4456 struct acpi_dmar_atsr *atsr;
4457 struct acpi_dmar_reserved_memory *rmrr;
4459 if (!intel_iommu_enabled && system_state >= SYSTEM_RUNNING)
4462 list_for_each_entry(rmrru, &dmar_rmrr_units, list) {
4463 rmrr = container_of(rmrru->hdr,
4464 struct acpi_dmar_reserved_memory, header);
4465 if (info->event == BUS_NOTIFY_ADD_DEVICE) {
4466 ret = dmar_insert_dev_scope(info, (void *)(rmrr + 1),
4467 ((void *)rmrr) + rmrr->header.length,
4468 rmrr->segment, rmrru->devices,
4469 rmrru->devices_cnt);
4472 } else if (info->event == BUS_NOTIFY_REMOVED_DEVICE) {
4473 dmar_remove_dev_scope(info, rmrr->segment,
4474 rmrru->devices, rmrru->devices_cnt);
4478 list_for_each_entry(atsru, &dmar_atsr_units, list) {
4479 if (atsru->include_all)
4482 atsr = container_of(atsru->hdr, struct acpi_dmar_atsr, header);
4483 if (info->event == BUS_NOTIFY_ADD_DEVICE) {
4484 ret = dmar_insert_dev_scope(info, (void *)(atsr + 1),
4485 (void *)atsr + atsr->header.length,
4486 atsr->segment, atsru->devices,
4487 atsru->devices_cnt);
4492 } else if (info->event == BUS_NOTIFY_REMOVED_DEVICE) {
4493 if (dmar_remove_dev_scope(info, atsr->segment,
4494 atsru->devices, atsru->devices_cnt))
4503 * Here we only respond to action of unbound device from driver.
4505 * Added device is not attached to its DMAR domain here yet. That will happen
4506 * when mapping the device to iova.
4508 static int device_notifier(struct notifier_block *nb,
4509 unsigned long action, void *data)
4511 struct device *dev = data;
4512 struct dmar_domain *domain;
4514 if (iommu_dummy(dev))
4517 if (action != BUS_NOTIFY_REMOVED_DEVICE)
4520 domain = find_domain(dev);
4524 dmar_remove_one_dev_info(domain, dev);
4525 if (!domain_type_is_vm_or_si(domain) && list_empty(&domain->devices))
4526 domain_exit(domain);
4531 static struct notifier_block device_nb = {
4532 .notifier_call = device_notifier,
4535 static int intel_iommu_memory_notifier(struct notifier_block *nb,
4536 unsigned long val, void *v)
4538 struct memory_notify *mhp = v;
4539 unsigned long long start, end;
4540 unsigned long start_vpfn, last_vpfn;
4543 case MEM_GOING_ONLINE:
4544 start = mhp->start_pfn << PAGE_SHIFT;
4545 end = ((mhp->start_pfn + mhp->nr_pages) << PAGE_SHIFT) - 1;
4546 if (iommu_domain_identity_map(si_domain, start, end)) {
4547 pr_warn("Failed to build identity map for [%llx-%llx]\n",
4554 case MEM_CANCEL_ONLINE:
4555 start_vpfn = mm_to_dma_pfn(mhp->start_pfn);
4556 last_vpfn = mm_to_dma_pfn(mhp->start_pfn + mhp->nr_pages - 1);
4557 while (start_vpfn <= last_vpfn) {
4559 struct dmar_drhd_unit *drhd;
4560 struct intel_iommu *iommu;
4561 struct page *freelist;
4563 iova = find_iova(&si_domain->iovad, start_vpfn);
4565 pr_debug("Failed get IOVA for PFN %lx\n",
4570 iova = split_and_remove_iova(&si_domain->iovad, iova,
4571 start_vpfn, last_vpfn);
4573 pr_warn("Failed to split IOVA PFN [%lx-%lx]\n",
4574 start_vpfn, last_vpfn);
4578 freelist = domain_unmap(si_domain, iova->pfn_lo,
4582 for_each_active_iommu(iommu, drhd)
4583 iommu_flush_iotlb_psi(iommu, si_domain,
4584 iova->pfn_lo, iova_size(iova),
4587 dma_free_pagelist(freelist);
4589 start_vpfn = iova->pfn_hi + 1;
4590 free_iova_mem(iova);
4598 static struct notifier_block intel_iommu_memory_nb = {
4599 .notifier_call = intel_iommu_memory_notifier,
4603 static void free_all_cpu_cached_iovas(unsigned int cpu)
4607 for (i = 0; i < g_num_of_iommus; i++) {
4608 struct intel_iommu *iommu = g_iommus[i];
4609 struct dmar_domain *domain;
4615 for (did = 0; did < cap_ndoms(iommu->cap); did++) {
4616 domain = get_iommu_domain(iommu, (u16)did);
4620 free_cpu_cached_iovas(cpu, &domain->iovad);
4625 static int intel_iommu_cpu_dead(unsigned int cpu)
4627 free_all_cpu_cached_iovas(cpu);
4631 static void intel_disable_iommus(void)
4633 struct intel_iommu *iommu = NULL;
4634 struct dmar_drhd_unit *drhd;
4636 for_each_iommu(iommu, drhd)
4637 iommu_disable_translation(iommu);
4640 static inline struct intel_iommu *dev_to_intel_iommu(struct device *dev)
4642 struct iommu_device *iommu_dev = dev_to_iommu_device(dev);
4644 return container_of(iommu_dev, struct intel_iommu, iommu);
4647 static ssize_t intel_iommu_show_version(struct device *dev,
4648 struct device_attribute *attr,
4651 struct intel_iommu *iommu = dev_to_intel_iommu(dev);
4652 u32 ver = readl(iommu->reg + DMAR_VER_REG);
4653 return sprintf(buf, "%d:%d\n",
4654 DMAR_VER_MAJOR(ver), DMAR_VER_MINOR(ver));
4656 static DEVICE_ATTR(version, S_IRUGO, intel_iommu_show_version, NULL);
4658 static ssize_t intel_iommu_show_address(struct device *dev,
4659 struct device_attribute *attr,
4662 struct intel_iommu *iommu = dev_to_intel_iommu(dev);
4663 return sprintf(buf, "%llx\n", iommu->reg_phys);
4665 static DEVICE_ATTR(address, S_IRUGO, intel_iommu_show_address, NULL);
4667 static ssize_t intel_iommu_show_cap(struct device *dev,
4668 struct device_attribute *attr,
4671 struct intel_iommu *iommu = dev_to_intel_iommu(dev);
4672 return sprintf(buf, "%llx\n", iommu->cap);
4674 static DEVICE_ATTR(cap, S_IRUGO, intel_iommu_show_cap, NULL);
4676 static ssize_t intel_iommu_show_ecap(struct device *dev,
4677 struct device_attribute *attr,
4680 struct intel_iommu *iommu = dev_to_intel_iommu(dev);
4681 return sprintf(buf, "%llx\n", iommu->ecap);
4683 static DEVICE_ATTR(ecap, S_IRUGO, intel_iommu_show_ecap, NULL);
4685 static ssize_t intel_iommu_show_ndoms(struct device *dev,
4686 struct device_attribute *attr,
4689 struct intel_iommu *iommu = dev_to_intel_iommu(dev);
4690 return sprintf(buf, "%ld\n", cap_ndoms(iommu->cap));
4692 static DEVICE_ATTR(domains_supported, S_IRUGO, intel_iommu_show_ndoms, NULL);
4694 static ssize_t intel_iommu_show_ndoms_used(struct device *dev,
4695 struct device_attribute *attr,
4698 struct intel_iommu *iommu = dev_to_intel_iommu(dev);
4699 return sprintf(buf, "%d\n", bitmap_weight(iommu->domain_ids,
4700 cap_ndoms(iommu->cap)));
4702 static DEVICE_ATTR(domains_used, S_IRUGO, intel_iommu_show_ndoms_used, NULL);
4704 static struct attribute *intel_iommu_attrs[] = {
4705 &dev_attr_version.attr,
4706 &dev_attr_address.attr,
4708 &dev_attr_ecap.attr,
4709 &dev_attr_domains_supported.attr,
4710 &dev_attr_domains_used.attr,
4714 static struct attribute_group intel_iommu_group = {
4715 .name = "intel-iommu",
4716 .attrs = intel_iommu_attrs,
4719 const struct attribute_group *intel_iommu_groups[] = {
4724 int __init intel_iommu_init(void)
4727 struct dmar_drhd_unit *drhd;
4728 struct intel_iommu *iommu;
4730 /* VT-d is required for a TXT/tboot launch, so enforce that */
4731 force_on = tboot_force_iommu();
4733 if (iommu_init_mempool()) {
4735 panic("tboot: Failed to initialize iommu memory\n");
4739 down_write(&dmar_global_lock);
4740 if (dmar_table_init()) {
4742 panic("tboot: Failed to initialize DMAR table\n");
4746 if (dmar_dev_scope_init() < 0) {
4748 panic("tboot: Failed to initialize DMAR device scope\n");
4752 up_write(&dmar_global_lock);
4755 * The bus notifier takes the dmar_global_lock, so lockdep will
4756 * complain later when we register it under the lock.
4758 dmar_register_bus_notifier();
4760 down_write(&dmar_global_lock);
4762 if (no_iommu || dmar_disabled) {
4764 * We exit the function here to ensure IOMMU's remapping and
4765 * mempool aren't setup, which means that the IOMMU's PMRs
4766 * won't be disabled via the call to init_dmars(). So disable
4767 * it explicitly here. The PMRs were setup by tboot prior to
4768 * calling SENTER, but the kernel is expected to reset/tear
4771 if (intel_iommu_tboot_noforce) {
4772 for_each_iommu(iommu, drhd)
4773 iommu_disable_protect_mem_regions(iommu);
4777 * Make sure the IOMMUs are switched off, even when we
4778 * boot into a kexec kernel and the previous kernel left
4781 intel_disable_iommus();
4785 if (list_empty(&dmar_rmrr_units))
4786 pr_info("No RMRR found\n");
4788 if (list_empty(&dmar_atsr_units))
4789 pr_info("No ATSR found\n");
4791 if (dmar_init_reserved_ranges()) {
4793 panic("tboot: Failed to reserve iommu ranges\n");
4794 goto out_free_reserved_range;
4797 init_no_remapping_devices();
4802 panic("tboot: Failed to initialize DMARs\n");
4803 pr_err("Initialization failed\n");
4804 goto out_free_reserved_range;
4806 up_write(&dmar_global_lock);
4807 pr_info("Intel(R) Virtualization Technology for Directed I/O\n");
4809 #ifdef CONFIG_SWIOTLB
4812 dma_ops = &intel_dma_ops;
4814 init_iommu_pm_ops();
4816 for_each_active_iommu(iommu, drhd) {
4817 iommu_device_sysfs_add(&iommu->iommu, NULL,
4820 iommu_device_set_ops(&iommu->iommu, &intel_iommu_ops);
4821 iommu_device_register(&iommu->iommu);
4824 bus_set_iommu(&pci_bus_type, &intel_iommu_ops);
4825 bus_register_notifier(&pci_bus_type, &device_nb);
4826 if (si_domain && !hw_pass_through)
4827 register_memory_notifier(&intel_iommu_memory_nb);
4828 cpuhp_setup_state(CPUHP_IOMMU_INTEL_DEAD, "iommu/intel:dead", NULL,
4829 intel_iommu_cpu_dead);
4830 intel_iommu_enabled = 1;
4834 out_free_reserved_range:
4835 put_iova_domain(&reserved_iova_list);
4837 intel_iommu_free_dmars();
4838 up_write(&dmar_global_lock);
4839 iommu_exit_mempool();
4843 static int domain_context_clear_one_cb(struct pci_dev *pdev, u16 alias, void *opaque)
4845 struct intel_iommu *iommu = opaque;
4847 domain_context_clear_one(iommu, PCI_BUS_NUM(alias), alias & 0xff);
4852 * NB - intel-iommu lacks any sort of reference counting for the users of
4853 * dependent devices. If multiple endpoints have intersecting dependent
4854 * devices, unbinding the driver from any one of them will possibly leave
4855 * the others unable to operate.
4857 static void domain_context_clear(struct intel_iommu *iommu, struct device *dev)
4859 if (!iommu || !dev || !dev_is_pci(dev))
4862 pci_for_each_dma_alias(to_pci_dev(dev), &domain_context_clear_one_cb, iommu);
4865 static void __dmar_remove_one_dev_info(struct device_domain_info *info)
4867 struct intel_iommu *iommu;
4868 unsigned long flags;
4870 assert_spin_locked(&device_domain_lock);
4875 iommu = info->iommu;
4878 iommu_disable_dev_iotlb(info);
4879 domain_context_clear(iommu, info->dev);
4882 unlink_domain_info(info);
4884 spin_lock_irqsave(&iommu->lock, flags);
4885 domain_detach_iommu(info->domain, iommu);
4886 spin_unlock_irqrestore(&iommu->lock, flags);
4888 free_devinfo_mem(info);
4891 static void dmar_remove_one_dev_info(struct dmar_domain *domain,
4894 struct device_domain_info *info;
4895 unsigned long flags;
4897 spin_lock_irqsave(&device_domain_lock, flags);
4898 info = dev->archdata.iommu;
4899 __dmar_remove_one_dev_info(info);
4900 spin_unlock_irqrestore(&device_domain_lock, flags);
4903 static int md_domain_init(struct dmar_domain *domain, int guest_width)
4907 init_iova_domain(&domain->iovad, VTD_PAGE_SIZE, IOVA_START_PFN);
4908 domain_reserve_special_ranges(domain);
4910 /* calculate AGAW */
4911 domain->gaw = guest_width;
4912 adjust_width = guestwidth_to_adjustwidth(guest_width);
4913 domain->agaw = width_to_agaw(adjust_width);
4915 domain->iommu_coherency = 0;
4916 domain->iommu_snooping = 0;
4917 domain->iommu_superpage = 0;
4918 domain->max_addr = 0;
4920 /* always allocate the top pgd */
4921 domain->pgd = (struct dma_pte *)alloc_pgtable_page(domain->nid);
4924 domain_flush_cache(domain, domain->pgd, PAGE_SIZE);
4928 static struct iommu_domain *intel_iommu_domain_alloc(unsigned type)
4930 struct dmar_domain *dmar_domain;
4931 struct iommu_domain *domain;
4933 if (type != IOMMU_DOMAIN_UNMANAGED)
4936 dmar_domain = alloc_domain(DOMAIN_FLAG_VIRTUAL_MACHINE);
4938 pr_err("Can't allocate dmar_domain\n");
4941 if (md_domain_init(dmar_domain, DEFAULT_DOMAIN_ADDRESS_WIDTH)) {
4942 pr_err("Domain initialization failed\n");
4943 domain_exit(dmar_domain);
4946 domain_update_iommu_cap(dmar_domain);
4948 domain = &dmar_domain->domain;
4949 domain->geometry.aperture_start = 0;
4950 domain->geometry.aperture_end = __DOMAIN_MAX_ADDR(dmar_domain->gaw);
4951 domain->geometry.force_aperture = true;
4956 static void intel_iommu_domain_free(struct iommu_domain *domain)
4958 domain_exit(to_dmar_domain(domain));
4961 static int intel_iommu_attach_device(struct iommu_domain *domain,
4964 struct dmar_domain *dmar_domain = to_dmar_domain(domain);
4965 struct intel_iommu *iommu;
4969 if (device_is_rmrr_locked(dev)) {
4970 dev_warn(dev, "Device is ineligible for IOMMU domain attach due to platform RMRR requirement. Contact your platform vendor.\n");
4974 /* normally dev is not mapped */
4975 if (unlikely(domain_context_mapped(dev))) {
4976 struct dmar_domain *old_domain;
4978 old_domain = find_domain(dev);
4981 dmar_remove_one_dev_info(old_domain, dev);
4984 if (!domain_type_is_vm_or_si(old_domain) &&
4985 list_empty(&old_domain->devices))
4986 domain_exit(old_domain);
4990 iommu = device_to_iommu(dev, &bus, &devfn);
4994 /* check if this iommu agaw is sufficient for max mapped address */
4995 addr_width = agaw_to_width(iommu->agaw);
4996 if (addr_width > cap_mgaw(iommu->cap))
4997 addr_width = cap_mgaw(iommu->cap);
4999 if (dmar_domain->max_addr > (1LL << addr_width)) {
5000 pr_err("%s: iommu width (%d) is not "
5001 "sufficient for the mapped address (%llx)\n",
5002 __func__, addr_width, dmar_domain->max_addr);
5005 dmar_domain->gaw = addr_width;
5008 * Knock out extra levels of page tables if necessary
5010 while (iommu->agaw < dmar_domain->agaw) {
5011 struct dma_pte *pte;
5013 pte = dmar_domain->pgd;
5014 if (dma_pte_present(pte)) {
5015 dmar_domain->pgd = (struct dma_pte *)
5016 phys_to_virt(dma_pte_addr(pte));
5017 free_pgtable_page(pte);
5019 dmar_domain->agaw--;
5022 return domain_add_dev_info(dmar_domain, dev);
5025 static void intel_iommu_detach_device(struct iommu_domain *domain,
5028 dmar_remove_one_dev_info(to_dmar_domain(domain), dev);
5031 static int intel_iommu_map(struct iommu_domain *domain,
5032 unsigned long iova, phys_addr_t hpa,
5033 size_t size, int iommu_prot)
5035 struct dmar_domain *dmar_domain = to_dmar_domain(domain);
5040 if (iommu_prot & IOMMU_READ)
5041 prot |= DMA_PTE_READ;
5042 if (iommu_prot & IOMMU_WRITE)
5043 prot |= DMA_PTE_WRITE;
5044 if ((iommu_prot & IOMMU_CACHE) && dmar_domain->iommu_snooping)
5045 prot |= DMA_PTE_SNP;
5047 max_addr = iova + size;
5048 if (dmar_domain->max_addr < max_addr) {
5051 /* check if minimum agaw is sufficient for mapped address */
5052 end = __DOMAIN_MAX_ADDR(dmar_domain->gaw) + 1;
5053 if (end < max_addr) {
5054 pr_err("%s: iommu width (%d) is not "
5055 "sufficient for the mapped address (%llx)\n",
5056 __func__, dmar_domain->gaw, max_addr);
5059 dmar_domain->max_addr = max_addr;
5061 /* Round up size to next multiple of PAGE_SIZE, if it and
5062 the low bits of hpa would take us onto the next page */
5063 size = aligned_nrpages(hpa, size);
5064 ret = domain_pfn_mapping(dmar_domain, iova >> VTD_PAGE_SHIFT,
5065 hpa >> VTD_PAGE_SHIFT, size, prot);
5069 static size_t intel_iommu_unmap(struct iommu_domain *domain,
5070 unsigned long iova, size_t size)
5072 struct dmar_domain *dmar_domain = to_dmar_domain(domain);
5073 struct page *freelist = NULL;
5074 struct intel_iommu *iommu;
5075 unsigned long start_pfn, last_pfn;
5076 unsigned int npages;
5077 int iommu_id, level = 0;
5079 /* Cope with horrid API which requires us to unmap more than the
5080 size argument if it happens to be a large-page mapping. */
5081 BUG_ON(!pfn_to_dma_pte(dmar_domain, iova >> VTD_PAGE_SHIFT, &level));
5083 if (size < VTD_PAGE_SIZE << level_to_offset_bits(level))
5084 size = VTD_PAGE_SIZE << level_to_offset_bits(level);
5086 start_pfn = iova >> VTD_PAGE_SHIFT;
5087 last_pfn = (iova + size - 1) >> VTD_PAGE_SHIFT;
5089 freelist = domain_unmap(dmar_domain, start_pfn, last_pfn);
5091 npages = last_pfn - start_pfn + 1;
5093 for_each_domain_iommu(iommu_id, dmar_domain) {
5094 iommu = g_iommus[iommu_id];
5096 iommu_flush_iotlb_psi(g_iommus[iommu_id], dmar_domain,
5097 start_pfn, npages, !freelist, 0);
5100 dma_free_pagelist(freelist);
5102 if (dmar_domain->max_addr == iova + size)
5103 dmar_domain->max_addr = iova;
5108 static phys_addr_t intel_iommu_iova_to_phys(struct iommu_domain *domain,
5111 struct dmar_domain *dmar_domain = to_dmar_domain(domain);
5112 struct dma_pte *pte;
5116 pte = pfn_to_dma_pte(dmar_domain, iova >> VTD_PAGE_SHIFT, &level);
5118 phys = dma_pte_addr(pte);
5123 static bool intel_iommu_capable(enum iommu_cap cap)
5125 if (cap == IOMMU_CAP_CACHE_COHERENCY)
5126 return domain_update_iommu_snooping(NULL) == 1;
5127 if (cap == IOMMU_CAP_INTR_REMAP)
5128 return irq_remapping_enabled == 1;
5133 static int intel_iommu_add_device(struct device *dev)
5135 struct intel_iommu *iommu;
5136 struct iommu_group *group;
5139 iommu = device_to_iommu(dev, &bus, &devfn);
5143 iommu_device_link(&iommu->iommu, dev);
5145 group = iommu_group_get_for_dev(dev);
5148 return PTR_ERR(group);
5150 iommu_group_put(group);
5154 static void intel_iommu_remove_device(struct device *dev)
5156 struct intel_iommu *iommu;
5159 iommu = device_to_iommu(dev, &bus, &devfn);
5163 iommu_group_remove_device(dev);
5165 iommu_device_unlink(&iommu->iommu, dev);
5168 static void intel_iommu_get_resv_regions(struct device *device,
5169 struct list_head *head)
5171 struct iommu_resv_region *reg;
5172 struct dmar_rmrr_unit *rmrr;
5173 struct device *i_dev;
5177 for_each_rmrr_units(rmrr) {
5178 for_each_active_dev_scope(rmrr->devices, rmrr->devices_cnt,
5180 if (i_dev != device)
5183 list_add_tail(&rmrr->resv->list, head);
5188 reg = iommu_alloc_resv_region(IOAPIC_RANGE_START,
5189 IOAPIC_RANGE_END - IOAPIC_RANGE_START + 1,
5193 list_add_tail(®->list, head);
5196 static void intel_iommu_put_resv_regions(struct device *dev,
5197 struct list_head *head)
5199 struct iommu_resv_region *entry, *next;
5201 list_for_each_entry_safe(entry, next, head, list) {
5202 if (entry->type == IOMMU_RESV_RESERVED)
5207 #ifdef CONFIG_INTEL_IOMMU_SVM
5208 #define MAX_NR_PASID_BITS (20)
5209 static inline unsigned long intel_iommu_get_pts(struct intel_iommu *iommu)
5212 * Convert ecap_pss to extend context entry pts encoding, also
5213 * respect the soft pasid_max value set by the iommu.
5214 * - number of PASID bits = ecap_pss + 1
5215 * - number of PASID table entries = 2^(pts + 5)
5216 * Therefore, pts = ecap_pss - 4
5217 * e.g. KBL ecap_pss = 0x13, PASID has 20 bits, pts = 15
5219 if (ecap_pss(iommu->ecap) < 5)
5222 /* pasid_max is encoded as actual number of entries not the bits */
5223 return find_first_bit((unsigned long *)&iommu->pasid_max,
5224 MAX_NR_PASID_BITS) - 5;
5227 int intel_iommu_enable_pasid(struct intel_iommu *iommu, struct intel_svm_dev *sdev)
5229 struct device_domain_info *info;
5230 struct context_entry *context;
5231 struct dmar_domain *domain;
5232 unsigned long flags;
5236 domain = get_valid_domain_for_dev(sdev->dev);
5240 spin_lock_irqsave(&device_domain_lock, flags);
5241 spin_lock(&iommu->lock);
5244 info = sdev->dev->archdata.iommu;
5245 if (!info || !info->pasid_supported)
5248 context = iommu_context_addr(iommu, info->bus, info->devfn, 0);
5249 if (WARN_ON(!context))
5252 ctx_lo = context[0].lo;
5254 sdev->did = domain->iommu_did[iommu->seq_id];
5255 sdev->sid = PCI_DEVID(info->bus, info->devfn);
5257 if (!(ctx_lo & CONTEXT_PASIDE)) {
5258 if (iommu->pasid_state_table)
5259 context[1].hi = (u64)virt_to_phys(iommu->pasid_state_table);
5260 context[1].lo = (u64)virt_to_phys(iommu->pasid_table) |
5261 intel_iommu_get_pts(iommu);
5264 /* CONTEXT_TT_MULTI_LEVEL and CONTEXT_TT_DEV_IOTLB are both
5265 * extended to permit requests-with-PASID if the PASIDE bit
5266 * is set. which makes sense. For CONTEXT_TT_PASS_THROUGH,
5267 * however, the PASIDE bit is ignored and requests-with-PASID
5268 * are unconditionally blocked. Which makes less sense.
5269 * So convert from CONTEXT_TT_PASS_THROUGH to one of the new
5270 * "guest mode" translation types depending on whether ATS
5271 * is available or not. Annoyingly, we can't use the new
5272 * modes *unless* PASIDE is set. */
5273 if ((ctx_lo & CONTEXT_TT_MASK) == (CONTEXT_TT_PASS_THROUGH << 2)) {
5274 ctx_lo &= ~CONTEXT_TT_MASK;
5275 if (info->ats_supported)
5276 ctx_lo |= CONTEXT_TT_PT_PASID_DEV_IOTLB << 2;
5278 ctx_lo |= CONTEXT_TT_PT_PASID << 2;
5280 ctx_lo |= CONTEXT_PASIDE;
5281 if (iommu->pasid_state_table)
5282 ctx_lo |= CONTEXT_DINVE;
5283 if (info->pri_supported)
5284 ctx_lo |= CONTEXT_PRS;
5285 context[0].lo = ctx_lo;
5287 iommu->flush.flush_context(iommu, sdev->did, sdev->sid,
5288 DMA_CCMD_MASK_NOBIT,
5289 DMA_CCMD_DEVICE_INVL);
5292 /* Enable PASID support in the device, if it wasn't already */
5293 if (!info->pasid_enabled)
5294 iommu_enable_dev_iotlb(info);
5296 if (info->ats_enabled) {
5297 sdev->dev_iotlb = 1;
5298 sdev->qdep = info->ats_qdep;
5299 if (sdev->qdep >= QI_DEV_EIOTLB_MAX_INVS)
5305 spin_unlock(&iommu->lock);
5306 spin_unlock_irqrestore(&device_domain_lock, flags);
5311 struct intel_iommu *intel_svm_device_to_iommu(struct device *dev)
5313 struct intel_iommu *iommu;
5316 if (iommu_dummy(dev)) {
5318 "No IOMMU translation for device; cannot enable SVM\n");
5322 iommu = device_to_iommu(dev, &bus, &devfn);
5324 dev_err(dev, "No IOMMU for device; cannot enable SVM\n");
5328 if (!iommu->pasid_table) {
5329 dev_err(dev, "PASID not enabled on IOMMU; cannot enable SVM\n");
5335 #endif /* CONFIG_INTEL_IOMMU_SVM */
5337 const struct iommu_ops intel_iommu_ops = {
5338 .capable = intel_iommu_capable,
5339 .domain_alloc = intel_iommu_domain_alloc,
5340 .domain_free = intel_iommu_domain_free,
5341 .attach_dev = intel_iommu_attach_device,
5342 .detach_dev = intel_iommu_detach_device,
5343 .map = intel_iommu_map,
5344 .unmap = intel_iommu_unmap,
5345 .map_sg = default_iommu_map_sg,
5346 .iova_to_phys = intel_iommu_iova_to_phys,
5347 .add_device = intel_iommu_add_device,
5348 .remove_device = intel_iommu_remove_device,
5349 .get_resv_regions = intel_iommu_get_resv_regions,
5350 .put_resv_regions = intel_iommu_put_resv_regions,
5351 .device_group = pci_device_group,
5352 .pgsize_bitmap = INTEL_IOMMU_PGSIZES,
5355 static void quirk_iommu_g4x_gfx(struct pci_dev *dev)
5357 /* G4x/GM45 integrated gfx dmar support is totally busted. */
5358 pr_info("Disabling IOMMU for graphics on this chipset\n");
5362 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2a40, quirk_iommu_g4x_gfx);
5363 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e00, quirk_iommu_g4x_gfx);
5364 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e10, quirk_iommu_g4x_gfx);
5365 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e20, quirk_iommu_g4x_gfx);
5366 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e30, quirk_iommu_g4x_gfx);
5367 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e40, quirk_iommu_g4x_gfx);
5368 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e90, quirk_iommu_g4x_gfx);
5370 static void quirk_iommu_rwbf(struct pci_dev *dev)
5373 * Mobile 4 Series Chipset neglects to set RWBF capability,
5374 * but needs it. Same seems to hold for the desktop versions.
5376 pr_info("Forcing write-buffer flush capability\n");
5380 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2a40, quirk_iommu_rwbf);
5381 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e00, quirk_iommu_rwbf);
5382 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e10, quirk_iommu_rwbf);
5383 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e20, quirk_iommu_rwbf);
5384 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e30, quirk_iommu_rwbf);
5385 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e40, quirk_iommu_rwbf);
5386 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e90, quirk_iommu_rwbf);
5389 #define GGC_MEMORY_SIZE_MASK (0xf << 8)
5390 #define GGC_MEMORY_SIZE_NONE (0x0 << 8)
5391 #define GGC_MEMORY_SIZE_1M (0x1 << 8)
5392 #define GGC_MEMORY_SIZE_2M (0x3 << 8)
5393 #define GGC_MEMORY_VT_ENABLED (0x8 << 8)
5394 #define GGC_MEMORY_SIZE_2M_VT (0x9 << 8)
5395 #define GGC_MEMORY_SIZE_3M_VT (0xa << 8)
5396 #define GGC_MEMORY_SIZE_4M_VT (0xb << 8)
5398 static void quirk_calpella_no_shadow_gtt(struct pci_dev *dev)
5402 if (pci_read_config_word(dev, GGC, &ggc))
5405 if (!(ggc & GGC_MEMORY_VT_ENABLED)) {
5406 pr_info("BIOS has allocated no shadow GTT; disabling IOMMU for graphics\n");
5408 } else if (dmar_map_gfx) {
5409 /* we have to ensure the gfx device is idle before we flush */
5410 pr_info("Disabling batched IOTLB flush on Ironlake\n");
5411 intel_iommu_strict = 1;
5414 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0040, quirk_calpella_no_shadow_gtt);
5415 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0044, quirk_calpella_no_shadow_gtt);
5416 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0062, quirk_calpella_no_shadow_gtt);
5417 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x006a, quirk_calpella_no_shadow_gtt);
5419 /* On Tylersburg chipsets, some BIOSes have been known to enable the
5420 ISOCH DMAR unit for the Azalia sound device, but not give it any
5421 TLB entries, which causes it to deadlock. Check for that. We do
5422 this in a function called from init_dmars(), instead of in a PCI
5423 quirk, because we don't want to print the obnoxious "BIOS broken"
5424 message if VT-d is actually disabled.
5426 static void __init check_tylersburg_isoch(void)
5428 struct pci_dev *pdev;
5429 uint32_t vtisochctrl;
5431 /* If there's no Azalia in the system anyway, forget it. */
5432 pdev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x3a3e, NULL);
5437 /* System Management Registers. Might be hidden, in which case
5438 we can't do the sanity check. But that's OK, because the
5439 known-broken BIOSes _don't_ actually hide it, so far. */
5440 pdev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x342e, NULL);
5444 if (pci_read_config_dword(pdev, 0x188, &vtisochctrl)) {
5451 /* If Azalia DMA is routed to the non-isoch DMAR unit, fine. */
5452 if (vtisochctrl & 1)
5455 /* Drop all bits other than the number of TLB entries */
5456 vtisochctrl &= 0x1c;
5458 /* If we have the recommended number of TLB entries (16), fine. */
5459 if (vtisochctrl == 0x10)
5462 /* Zero TLB entries? You get to ride the short bus to school. */
5464 WARN(1, "Your BIOS is broken; DMA routed to ISOCH DMAR unit but no TLB space.\n"
5465 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
5466 dmi_get_system_info(DMI_BIOS_VENDOR),
5467 dmi_get_system_info(DMI_BIOS_VERSION),
5468 dmi_get_system_info(DMI_PRODUCT_VERSION));
5469 iommu_identity_mapping |= IDENTMAP_AZALIA;
5473 pr_warn("Recommended TLB entries for ISOCH unit is 16; your BIOS set %d\n",