intel-iommu: dump mappings but don't die on pte already set
[linux-2.6-block.git] / drivers / pci / intel-iommu.c
CommitLineData
ba395927
KA
1/*
2 * Copyright (c) 2006, Intel Corporation.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
15 * Place - Suite 330, Boston, MA 02111-1307 USA.
16 *
98bcef56 17 * Copyright (C) 2006-2008 Intel Corporation
18 * Author: Ashok Raj <ashok.raj@intel.com>
19 * Author: Shaohua Li <shaohua.li@intel.com>
20 * Author: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
5b6985ce 21 * Author: Fenghua Yu <fenghua.yu@intel.com>
ba395927
KA
22 */
23
24#include <linux/init.h>
25#include <linux/bitmap.h>
5e0d2a6f 26#include <linux/debugfs.h>
ba395927
KA
27#include <linux/slab.h>
28#include <linux/irq.h>
29#include <linux/interrupt.h>
ba395927
KA
30#include <linux/spinlock.h>
31#include <linux/pci.h>
32#include <linux/dmar.h>
33#include <linux/dma-mapping.h>
34#include <linux/mempool.h>
5e0d2a6f 35#include <linux/timer.h>
38717946 36#include <linux/iova.h>
5d450806 37#include <linux/iommu.h>
38717946 38#include <linux/intel-iommu.h>
f59c7b69 39#include <linux/sysdev.h>
ba395927 40#include <asm/cacheflush.h>
46a7fa27 41#include <asm/iommu.h>
ba395927
KA
42#include "pci.h"
43
5b6985ce
FY
44#define ROOT_SIZE VTD_PAGE_SIZE
45#define CONTEXT_SIZE VTD_PAGE_SIZE
46
ba395927
KA
47#define IS_GFX_DEVICE(pdev) ((pdev->class >> 16) == PCI_BASE_CLASS_DISPLAY)
48#define IS_ISA_DEVICE(pdev) ((pdev->class >> 8) == PCI_CLASS_BRIDGE_ISA)
49
50#define IOAPIC_RANGE_START (0xfee00000)
51#define IOAPIC_RANGE_END (0xfeefffff)
52#define IOVA_START_ADDR (0x1000)
53
54#define DEFAULT_DOMAIN_ADDRESS_WIDTH 48
55
4ed0d3e6
FY
56#define MAX_AGAW_WIDTH 64
57
ba395927 58#define DOMAIN_MAX_ADDR(gaw) ((((u64)1) << gaw) - 1)
595badf5 59#define DOMAIN_MAX_PFN(gaw) ((((u64)1) << (gaw-VTD_PAGE_SHIFT)) - 1)
ba395927 60
f27be03b 61#define IOVA_PFN(addr) ((addr) >> PAGE_SHIFT)
284901a9 62#define DMA_32BIT_PFN IOVA_PFN(DMA_BIT_MASK(32))
6a35528a 63#define DMA_64BIT_PFN IOVA_PFN(DMA_BIT_MASK(64))
5e0d2a6f 64
fd18de50 65
dd4e8319
DW
66/* VT-d pages must always be _smaller_ than MM pages. Otherwise things
67 are never going to work. */
68static inline unsigned long dma_to_mm_pfn(unsigned long dma_pfn)
69{
70 return dma_pfn >> (PAGE_SHIFT - VTD_PAGE_SHIFT);
71}
72
73static inline unsigned long mm_to_dma_pfn(unsigned long mm_pfn)
74{
75 return mm_pfn << (PAGE_SHIFT - VTD_PAGE_SHIFT);
76}
77static inline unsigned long page_to_dma_pfn(struct page *pg)
78{
79 return mm_to_dma_pfn(page_to_pfn(pg));
80}
81static inline unsigned long virt_to_dma_pfn(void *p)
82{
83 return page_to_dma_pfn(virt_to_page(p));
84}
85
d9630fe9
WH
86/* global iommu list, set NULL for ignored DMAR units */
87static struct intel_iommu **g_iommus;
88
9af88143
DW
89static int rwbf_quirk;
90
46b08e1a
MM
91/*
92 * 0: Present
93 * 1-11: Reserved
94 * 12-63: Context Ptr (12 - (haw-1))
95 * 64-127: Reserved
96 */
97struct root_entry {
98 u64 val;
99 u64 rsvd1;
100};
101#define ROOT_ENTRY_NR (VTD_PAGE_SIZE/sizeof(struct root_entry))
102static inline bool root_present(struct root_entry *root)
103{
104 return (root->val & 1);
105}
106static inline void set_root_present(struct root_entry *root)
107{
108 root->val |= 1;
109}
110static inline void set_root_value(struct root_entry *root, unsigned long value)
111{
112 root->val |= value & VTD_PAGE_MASK;
113}
114
115static inline struct context_entry *
116get_context_addr_from_root(struct root_entry *root)
117{
118 return (struct context_entry *)
119 (root_present(root)?phys_to_virt(
120 root->val & VTD_PAGE_MASK) :
121 NULL);
122}
123
7a8fc25e
MM
124/*
125 * low 64 bits:
126 * 0: present
127 * 1: fault processing disable
128 * 2-3: translation type
129 * 12-63: address space root
130 * high 64 bits:
131 * 0-2: address width
132 * 3-6: aval
133 * 8-23: domain id
134 */
135struct context_entry {
136 u64 lo;
137 u64 hi;
138};
c07e7d21
MM
139
140static inline bool context_present(struct context_entry *context)
141{
142 return (context->lo & 1);
143}
144static inline void context_set_present(struct context_entry *context)
145{
146 context->lo |= 1;
147}
148
149static inline void context_set_fault_enable(struct context_entry *context)
150{
151 context->lo &= (((u64)-1) << 2) | 1;
152}
153
c07e7d21
MM
154static inline void context_set_translation_type(struct context_entry *context,
155 unsigned long value)
156{
157 context->lo &= (((u64)-1) << 4) | 3;
158 context->lo |= (value & 3) << 2;
159}
160
161static inline void context_set_address_root(struct context_entry *context,
162 unsigned long value)
163{
164 context->lo |= value & VTD_PAGE_MASK;
165}
166
167static inline void context_set_address_width(struct context_entry *context,
168 unsigned long value)
169{
170 context->hi |= value & 7;
171}
172
173static inline void context_set_domain_id(struct context_entry *context,
174 unsigned long value)
175{
176 context->hi |= (value & ((1 << 16) - 1)) << 8;
177}
178
179static inline void context_clear_entry(struct context_entry *context)
180{
181 context->lo = 0;
182 context->hi = 0;
183}
7a8fc25e 184
622ba12a
MM
185/*
186 * 0: readable
187 * 1: writable
188 * 2-6: reserved
189 * 7: super page
9cf06697
SY
190 * 8-10: available
191 * 11: snoop behavior
622ba12a
MM
192 * 12-63: Host physcial address
193 */
194struct dma_pte {
195 u64 val;
196};
622ba12a 197
19c239ce
MM
198static inline void dma_clear_pte(struct dma_pte *pte)
199{
200 pte->val = 0;
201}
202
203static inline void dma_set_pte_readable(struct dma_pte *pte)
204{
205 pte->val |= DMA_PTE_READ;
206}
207
208static inline void dma_set_pte_writable(struct dma_pte *pte)
209{
210 pte->val |= DMA_PTE_WRITE;
211}
212
9cf06697
SY
213static inline void dma_set_pte_snp(struct dma_pte *pte)
214{
215 pte->val |= DMA_PTE_SNP;
216}
217
19c239ce
MM
218static inline void dma_set_pte_prot(struct dma_pte *pte, unsigned long prot)
219{
220 pte->val = (pte->val & ~3) | (prot & 3);
221}
222
223static inline u64 dma_pte_addr(struct dma_pte *pte)
224{
225 return (pte->val & VTD_PAGE_MASK);
226}
227
dd4e8319 228static inline void dma_set_pte_pfn(struct dma_pte *pte, unsigned long pfn)
19c239ce 229{
dd4e8319 230 pte->val |= (uint64_t)pfn << VTD_PAGE_SHIFT;
19c239ce
MM
231}
232
233static inline bool dma_pte_present(struct dma_pte *pte)
234{
235 return (pte->val & 3) != 0;
236}
622ba12a 237
2c2e2c38
FY
238/*
239 * This domain is a statically identity mapping domain.
240 * 1. This domain creats a static 1:1 mapping to all usable memory.
241 * 2. It maps to each iommu if successful.
242 * 3. Each iommu mapps to this domain if successful.
243 */
244struct dmar_domain *si_domain;
245
3b5410e7 246/* devices under the same p2p bridge are owned in one domain */
cdc7b837 247#define DOMAIN_FLAG_P2P_MULTIPLE_DEVICES (1 << 0)
3b5410e7 248
1ce28feb
WH
249/* domain represents a virtual machine, more than one devices
250 * across iommus may be owned in one domain, e.g. kvm guest.
251 */
252#define DOMAIN_FLAG_VIRTUAL_MACHINE (1 << 1)
253
2c2e2c38
FY
254/* si_domain contains mulitple devices */
255#define DOMAIN_FLAG_STATIC_IDENTITY (1 << 2)
256
99126f7c
MM
257struct dmar_domain {
258 int id; /* domain id */
8c11e798 259 unsigned long iommu_bmp; /* bitmap of iommus this domain uses*/
99126f7c
MM
260
261 struct list_head devices; /* all devices' list */
262 struct iova_domain iovad; /* iova's that belong to this domain */
263
264 struct dma_pte *pgd; /* virtual address */
265 spinlock_t mapping_lock; /* page table lock */
266 int gaw; /* max guest address width */
267
268 /* adjusted guest address width, 0 is level 2 30-bit */
269 int agaw;
270
3b5410e7 271 int flags; /* flags to find out type of domain */
8e604097
WH
272
273 int iommu_coherency;/* indicate coherency of iommu access */
58c610bd 274 int iommu_snooping; /* indicate snooping control feature*/
c7151a8d
WH
275 int iommu_count; /* reference count of iommu */
276 spinlock_t iommu_lock; /* protect iommu set in domain */
fe40f1e0 277 u64 max_addr; /* maximum mapped address */
99126f7c
MM
278};
279
a647dacb
MM
280/* PCI domain-device relationship */
281struct device_domain_info {
282 struct list_head link; /* link to domain siblings */
283 struct list_head global; /* link to global list */
276dbf99
DW
284 int segment; /* PCI domain */
285 u8 bus; /* PCI bus number */
a647dacb
MM
286 u8 devfn; /* PCI devfn number */
287 struct pci_dev *dev; /* it's NULL for PCIE-to-PCI bridge */
93a23a72 288 struct intel_iommu *iommu; /* IOMMU used by this device */
a647dacb
MM
289 struct dmar_domain *domain; /* pointer to domain */
290};
291
5e0d2a6f 292static void flush_unmaps_timeout(unsigned long data);
293
294DEFINE_TIMER(unmap_timer, flush_unmaps_timeout, 0, 0);
295
80b20dd8 296#define HIGH_WATER_MARK 250
297struct deferred_flush_tables {
298 int next;
299 struct iova *iova[HIGH_WATER_MARK];
300 struct dmar_domain *domain[HIGH_WATER_MARK];
301};
302
303static struct deferred_flush_tables *deferred_flush;
304
5e0d2a6f 305/* bitmap for indexing intel_iommus */
5e0d2a6f 306static int g_num_of_iommus;
307
308static DEFINE_SPINLOCK(async_umap_flush_lock);
309static LIST_HEAD(unmaps_to_do);
310
311static int timer_on;
312static long list_size;
5e0d2a6f 313
ba395927
KA
314static void domain_remove_dev_info(struct dmar_domain *domain);
315
0cd5c3c8
KM
316#ifdef CONFIG_DMAR_DEFAULT_ON
317int dmar_disabled = 0;
318#else
319int dmar_disabled = 1;
320#endif /*CONFIG_DMAR_DEFAULT_ON*/
321
ba395927 322static int __initdata dmar_map_gfx = 1;
7d3b03ce 323static int dmar_forcedac;
5e0d2a6f 324static int intel_iommu_strict;
ba395927
KA
325
326#define DUMMY_DEVICE_DOMAIN_INFO ((struct device_domain_info *)(-1))
327static DEFINE_SPINLOCK(device_domain_lock);
328static LIST_HEAD(device_domain_list);
329
a8bcbb0d
JR
330static struct iommu_ops intel_iommu_ops;
331
ba395927
KA
332static int __init intel_iommu_setup(char *str)
333{
334 if (!str)
335 return -EINVAL;
336 while (*str) {
0cd5c3c8
KM
337 if (!strncmp(str, "on", 2)) {
338 dmar_disabled = 0;
339 printk(KERN_INFO "Intel-IOMMU: enabled\n");
340 } else if (!strncmp(str, "off", 3)) {
ba395927 341 dmar_disabled = 1;
0cd5c3c8 342 printk(KERN_INFO "Intel-IOMMU: disabled\n");
ba395927
KA
343 } else if (!strncmp(str, "igfx_off", 8)) {
344 dmar_map_gfx = 0;
345 printk(KERN_INFO
346 "Intel-IOMMU: disable GFX device mapping\n");
7d3b03ce 347 } else if (!strncmp(str, "forcedac", 8)) {
5e0d2a6f 348 printk(KERN_INFO
7d3b03ce
KA
349 "Intel-IOMMU: Forcing DAC for PCI devices\n");
350 dmar_forcedac = 1;
5e0d2a6f 351 } else if (!strncmp(str, "strict", 6)) {
352 printk(KERN_INFO
353 "Intel-IOMMU: disable batched IOTLB flush\n");
354 intel_iommu_strict = 1;
ba395927
KA
355 }
356
357 str += strcspn(str, ",");
358 while (*str == ',')
359 str++;
360 }
361 return 0;
362}
363__setup("intel_iommu=", intel_iommu_setup);
364
365static struct kmem_cache *iommu_domain_cache;
366static struct kmem_cache *iommu_devinfo_cache;
367static struct kmem_cache *iommu_iova_cache;
368
eb3fa7cb
KA
369static inline void *iommu_kmem_cache_alloc(struct kmem_cache *cachep)
370{
371 unsigned int flags;
372 void *vaddr;
373
374 /* trying to avoid low memory issues */
375 flags = current->flags & PF_MEMALLOC;
376 current->flags |= PF_MEMALLOC;
377 vaddr = kmem_cache_alloc(cachep, GFP_ATOMIC);
378 current->flags &= (~PF_MEMALLOC | flags);
379 return vaddr;
380}
381
382
ba395927
KA
383static inline void *alloc_pgtable_page(void)
384{
eb3fa7cb
KA
385 unsigned int flags;
386 void *vaddr;
387
388 /* trying to avoid low memory issues */
389 flags = current->flags & PF_MEMALLOC;
390 current->flags |= PF_MEMALLOC;
391 vaddr = (void *)get_zeroed_page(GFP_ATOMIC);
392 current->flags &= (~PF_MEMALLOC | flags);
393 return vaddr;
ba395927
KA
394}
395
396static inline void free_pgtable_page(void *vaddr)
397{
398 free_page((unsigned long)vaddr);
399}
400
401static inline void *alloc_domain_mem(void)
402{
eb3fa7cb 403 return iommu_kmem_cache_alloc(iommu_domain_cache);
ba395927
KA
404}
405
38717946 406static void free_domain_mem(void *vaddr)
ba395927
KA
407{
408 kmem_cache_free(iommu_domain_cache, vaddr);
409}
410
411static inline void * alloc_devinfo_mem(void)
412{
eb3fa7cb 413 return iommu_kmem_cache_alloc(iommu_devinfo_cache);
ba395927
KA
414}
415
416static inline void free_devinfo_mem(void *vaddr)
417{
418 kmem_cache_free(iommu_devinfo_cache, vaddr);
419}
420
421struct iova *alloc_iova_mem(void)
422{
eb3fa7cb 423 return iommu_kmem_cache_alloc(iommu_iova_cache);
ba395927
KA
424}
425
426void free_iova_mem(struct iova *iova)
427{
428 kmem_cache_free(iommu_iova_cache, iova);
429}
430
1b573683
WH
431
432static inline int width_to_agaw(int width);
433
4ed0d3e6 434static int __iommu_calculate_agaw(struct intel_iommu *iommu, int max_gaw)
1b573683
WH
435{
436 unsigned long sagaw;
437 int agaw = -1;
438
439 sagaw = cap_sagaw(iommu->cap);
4ed0d3e6 440 for (agaw = width_to_agaw(max_gaw);
1b573683
WH
441 agaw >= 0; agaw--) {
442 if (test_bit(agaw, &sagaw))
443 break;
444 }
445
446 return agaw;
447}
448
4ed0d3e6
FY
449/*
450 * Calculate max SAGAW for each iommu.
451 */
452int iommu_calculate_max_sagaw(struct intel_iommu *iommu)
453{
454 return __iommu_calculate_agaw(iommu, MAX_AGAW_WIDTH);
455}
456
457/*
458 * calculate agaw for each iommu.
459 * "SAGAW" may be different across iommus, use a default agaw, and
460 * get a supported less agaw for iommus that don't support the default agaw.
461 */
462int iommu_calculate_agaw(struct intel_iommu *iommu)
463{
464 return __iommu_calculate_agaw(iommu, DEFAULT_DOMAIN_ADDRESS_WIDTH);
465}
466
2c2e2c38 467/* This functionin only returns single iommu in a domain */
8c11e798
WH
468static struct intel_iommu *domain_get_iommu(struct dmar_domain *domain)
469{
470 int iommu_id;
471
2c2e2c38 472 /* si_domain and vm domain should not get here. */
1ce28feb 473 BUG_ON(domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE);
2c2e2c38 474 BUG_ON(domain->flags & DOMAIN_FLAG_STATIC_IDENTITY);
1ce28feb 475
8c11e798
WH
476 iommu_id = find_first_bit(&domain->iommu_bmp, g_num_of_iommus);
477 if (iommu_id < 0 || iommu_id >= g_num_of_iommus)
478 return NULL;
479
480 return g_iommus[iommu_id];
481}
482
8e604097
WH
483static void domain_update_iommu_coherency(struct dmar_domain *domain)
484{
485 int i;
486
487 domain->iommu_coherency = 1;
488
489 i = find_first_bit(&domain->iommu_bmp, g_num_of_iommus);
490 for (; i < g_num_of_iommus; ) {
491 if (!ecap_coherent(g_iommus[i]->ecap)) {
492 domain->iommu_coherency = 0;
493 break;
494 }
495 i = find_next_bit(&domain->iommu_bmp, g_num_of_iommus, i+1);
496 }
497}
498
58c610bd
SY
499static void domain_update_iommu_snooping(struct dmar_domain *domain)
500{
501 int i;
502
503 domain->iommu_snooping = 1;
504
505 i = find_first_bit(&domain->iommu_bmp, g_num_of_iommus);
506 for (; i < g_num_of_iommus; ) {
507 if (!ecap_sc_support(g_iommus[i]->ecap)) {
508 domain->iommu_snooping = 0;
509 break;
510 }
511 i = find_next_bit(&domain->iommu_bmp, g_num_of_iommus, i+1);
512 }
513}
514
515/* Some capabilities may be different across iommus */
516static void domain_update_iommu_cap(struct dmar_domain *domain)
517{
518 domain_update_iommu_coherency(domain);
519 domain_update_iommu_snooping(domain);
520}
521
276dbf99 522static struct intel_iommu *device_to_iommu(int segment, u8 bus, u8 devfn)
c7151a8d
WH
523{
524 struct dmar_drhd_unit *drhd = NULL;
525 int i;
526
527 for_each_drhd_unit(drhd) {
528 if (drhd->ignored)
529 continue;
276dbf99
DW
530 if (segment != drhd->segment)
531 continue;
c7151a8d 532
924b6231 533 for (i = 0; i < drhd->devices_cnt; i++) {
288e4877
DH
534 if (drhd->devices[i] &&
535 drhd->devices[i]->bus->number == bus &&
c7151a8d
WH
536 drhd->devices[i]->devfn == devfn)
537 return drhd->iommu;
4958c5dc
DW
538 if (drhd->devices[i] &&
539 drhd->devices[i]->subordinate &&
924b6231
DW
540 drhd->devices[i]->subordinate->number <= bus &&
541 drhd->devices[i]->subordinate->subordinate >= bus)
542 return drhd->iommu;
543 }
c7151a8d
WH
544
545 if (drhd->include_all)
546 return drhd->iommu;
547 }
548
549 return NULL;
550}
551
5331fe6f
WH
552static void domain_flush_cache(struct dmar_domain *domain,
553 void *addr, int size)
554{
555 if (!domain->iommu_coherency)
556 clflush_cache_range(addr, size);
557}
558
ba395927
KA
559/* Gets context entry for a given bus and devfn */
560static struct context_entry * device_to_context_entry(struct intel_iommu *iommu,
561 u8 bus, u8 devfn)
562{
563 struct root_entry *root;
564 struct context_entry *context;
565 unsigned long phy_addr;
566 unsigned long flags;
567
568 spin_lock_irqsave(&iommu->lock, flags);
569 root = &iommu->root_entry[bus];
570 context = get_context_addr_from_root(root);
571 if (!context) {
572 context = (struct context_entry *)alloc_pgtable_page();
573 if (!context) {
574 spin_unlock_irqrestore(&iommu->lock, flags);
575 return NULL;
576 }
5b6985ce 577 __iommu_flush_cache(iommu, (void *)context, CONTEXT_SIZE);
ba395927
KA
578 phy_addr = virt_to_phys((void *)context);
579 set_root_value(root, phy_addr);
580 set_root_present(root);
581 __iommu_flush_cache(iommu, root, sizeof(*root));
582 }
583 spin_unlock_irqrestore(&iommu->lock, flags);
584 return &context[devfn];
585}
586
587static int device_context_mapped(struct intel_iommu *iommu, u8 bus, u8 devfn)
588{
589 struct root_entry *root;
590 struct context_entry *context;
591 int ret;
592 unsigned long flags;
593
594 spin_lock_irqsave(&iommu->lock, flags);
595 root = &iommu->root_entry[bus];
596 context = get_context_addr_from_root(root);
597 if (!context) {
598 ret = 0;
599 goto out;
600 }
c07e7d21 601 ret = context_present(&context[devfn]);
ba395927
KA
602out:
603 spin_unlock_irqrestore(&iommu->lock, flags);
604 return ret;
605}
606
607static void clear_context_table(struct intel_iommu *iommu, u8 bus, u8 devfn)
608{
609 struct root_entry *root;
610 struct context_entry *context;
611 unsigned long flags;
612
613 spin_lock_irqsave(&iommu->lock, flags);
614 root = &iommu->root_entry[bus];
615 context = get_context_addr_from_root(root);
616 if (context) {
c07e7d21 617 context_clear_entry(&context[devfn]);
ba395927
KA
618 __iommu_flush_cache(iommu, &context[devfn], \
619 sizeof(*context));
620 }
621 spin_unlock_irqrestore(&iommu->lock, flags);
622}
623
624static void free_context_table(struct intel_iommu *iommu)
625{
626 struct root_entry *root;
627 int i;
628 unsigned long flags;
629 struct context_entry *context;
630
631 spin_lock_irqsave(&iommu->lock, flags);
632 if (!iommu->root_entry) {
633 goto out;
634 }
635 for (i = 0; i < ROOT_ENTRY_NR; i++) {
636 root = &iommu->root_entry[i];
637 context = get_context_addr_from_root(root);
638 if (context)
639 free_pgtable_page(context);
640 }
641 free_pgtable_page(iommu->root_entry);
642 iommu->root_entry = NULL;
643out:
644 spin_unlock_irqrestore(&iommu->lock, flags);
645}
646
647/* page table handling */
648#define LEVEL_STRIDE (9)
649#define LEVEL_MASK (((u64)1 << LEVEL_STRIDE) - 1)
650
651static inline int agaw_to_level(int agaw)
652{
653 return agaw + 2;
654}
655
656static inline int agaw_to_width(int agaw)
657{
658 return 30 + agaw * LEVEL_STRIDE;
659
660}
661
662static inline int width_to_agaw(int width)
663{
664 return (width - 30) / LEVEL_STRIDE;
665}
666
667static inline unsigned int level_to_offset_bits(int level)
668{
6660c63a 669 return (level - 1) * LEVEL_STRIDE;
ba395927
KA
670}
671
77dfa56c 672static inline int pfn_level_offset(unsigned long pfn, int level)
ba395927 673{
6660c63a 674 return (pfn >> level_to_offset_bits(level)) & LEVEL_MASK;
ba395927
KA
675}
676
6660c63a 677static inline unsigned long level_mask(int level)
ba395927 678{
6660c63a 679 return -1UL << level_to_offset_bits(level);
ba395927
KA
680}
681
6660c63a 682static inline unsigned long level_size(int level)
ba395927 683{
6660c63a 684 return 1UL << level_to_offset_bits(level);
ba395927
KA
685}
686
6660c63a 687static inline unsigned long align_to_level(unsigned long pfn, int level)
ba395927 688{
6660c63a 689 return (pfn + level_size(level) - 1) & level_mask(level);
ba395927
KA
690}
691
b026fd28
DW
692static struct dma_pte *pfn_to_dma_pte(struct dmar_domain *domain,
693 unsigned long pfn)
ba395927 694{
b026fd28 695 int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT;
ba395927
KA
696 struct dma_pte *parent, *pte = NULL;
697 int level = agaw_to_level(domain->agaw);
698 int offset;
699 unsigned long flags;
700
701 BUG_ON(!domain->pgd);
b026fd28 702 BUG_ON(addr_width < BITS_PER_LONG && pfn >> addr_width);
ba395927
KA
703 parent = domain->pgd;
704
705 spin_lock_irqsave(&domain->mapping_lock, flags);
706 while (level > 0) {
707 void *tmp_page;
708
b026fd28 709 offset = pfn_level_offset(pfn, level);
ba395927
KA
710 pte = &parent[offset];
711 if (level == 1)
712 break;
713
19c239ce 714 if (!dma_pte_present(pte)) {
ba395927
KA
715 tmp_page = alloc_pgtable_page();
716
717 if (!tmp_page) {
718 spin_unlock_irqrestore(&domain->mapping_lock,
719 flags);
720 return NULL;
721 }
5331fe6f 722 domain_flush_cache(domain, tmp_page, PAGE_SIZE);
dd4e8319 723 dma_set_pte_pfn(pte, virt_to_dma_pfn(tmp_page));
ba395927
KA
724 /*
725 * high level table always sets r/w, last level page
726 * table control read/write
727 */
19c239ce
MM
728 dma_set_pte_readable(pte);
729 dma_set_pte_writable(pte);
5331fe6f 730 domain_flush_cache(domain, pte, sizeof(*pte));
ba395927 731 }
19c239ce 732 parent = phys_to_virt(dma_pte_addr(pte));
ba395927
KA
733 level--;
734 }
735
736 spin_unlock_irqrestore(&domain->mapping_lock, flags);
737 return pte;
738}
739
740/* return address's pte at specific level */
90dcfb5e
DW
741static struct dma_pte *dma_pfn_level_pte(struct dmar_domain *domain,
742 unsigned long pfn,
743 int level)
ba395927
KA
744{
745 struct dma_pte *parent, *pte = NULL;
746 int total = agaw_to_level(domain->agaw);
747 int offset;
748
749 parent = domain->pgd;
750 while (level <= total) {
90dcfb5e 751 offset = pfn_level_offset(pfn, total);
ba395927
KA
752 pte = &parent[offset];
753 if (level == total)
754 return pte;
755
19c239ce 756 if (!dma_pte_present(pte))
ba395927 757 break;
19c239ce 758 parent = phys_to_virt(dma_pte_addr(pte));
ba395927
KA
759 total--;
760 }
761 return NULL;
762}
763
ba395927 764/* clear last level pte, a tlb flush should be followed */
595badf5
DW
765static void dma_pte_clear_range(struct dmar_domain *domain,
766 unsigned long start_pfn,
767 unsigned long last_pfn)
ba395927 768{
04b18e65 769 int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT;
310a5ab9 770 struct dma_pte *first_pte, *pte;
66eae846 771
04b18e65 772 BUG_ON(addr_width < BITS_PER_LONG && start_pfn >> addr_width);
595badf5 773 BUG_ON(addr_width < BITS_PER_LONG && last_pfn >> addr_width);
ba395927 774
04b18e65 775 /* we don't need lock here; nobody else touches the iova range */
595badf5 776 while (start_pfn <= last_pfn) {
310a5ab9
DW
777 first_pte = pte = dma_pfn_level_pte(domain, start_pfn, 1);
778 if (!pte) {
779 start_pfn = align_to_level(start_pfn + 1, 2);
780 continue;
781 }
782 while (start_pfn <= last_pfn &&
783 (unsigned long)pte >> VTD_PAGE_SHIFT ==
784 (unsigned long)first_pte >> VTD_PAGE_SHIFT) {
785 dma_clear_pte(pte);
786 start_pfn++;
787 pte++;
788 }
789 domain_flush_cache(domain, first_pte,
790 (void *)pte - (void *)first_pte);
ba395927
KA
791 }
792}
793
794/* free page table pages. last level pte should already be cleared */
795static void dma_pte_free_pagetable(struct dmar_domain *domain,
d794dc9b
DW
796 unsigned long start_pfn,
797 unsigned long last_pfn)
ba395927 798{
6660c63a 799 int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT;
ba395927
KA
800 struct dma_pte *pte;
801 int total = agaw_to_level(domain->agaw);
802 int level;
6660c63a 803 unsigned long tmp;
ba395927 804
6660c63a
DW
805 BUG_ON(addr_width < BITS_PER_LONG && start_pfn >> addr_width);
806 BUG_ON(addr_width < BITS_PER_LONG && last_pfn >> addr_width);
ba395927
KA
807
808 /* we don't need lock here, nobody else touches the iova range */
809 level = 2;
810 while (level <= total) {
6660c63a
DW
811 tmp = align_to_level(start_pfn, level);
812
813 /* Only clear this pte/pmd if we're asked to clear its
814 _whole_ range */
815 if (tmp + level_size(level) - 1 > last_pfn)
ba395927
KA
816 return;
817
6660c63a
DW
818 while (tmp <= last_pfn) {
819 pte = dma_pfn_level_pte(domain, tmp, level);
ba395927
KA
820 if (pte) {
821 free_pgtable_page(
19c239ce
MM
822 phys_to_virt(dma_pte_addr(pte)));
823 dma_clear_pte(pte);
5331fe6f 824 domain_flush_cache(domain, pte, sizeof(*pte));
ba395927
KA
825 }
826 tmp += level_size(level);
827 }
828 level++;
829 }
830 /* free pgd */
d794dc9b 831 if (start_pfn == 0 && last_pfn == DOMAIN_MAX_PFN(domain->gaw)) {
ba395927
KA
832 free_pgtable_page(domain->pgd);
833 domain->pgd = NULL;
834 }
835}
836
837/* iommu handling */
838static int iommu_alloc_root_entry(struct intel_iommu *iommu)
839{
840 struct root_entry *root;
841 unsigned long flags;
842
843 root = (struct root_entry *)alloc_pgtable_page();
844 if (!root)
845 return -ENOMEM;
846
5b6985ce 847 __iommu_flush_cache(iommu, root, ROOT_SIZE);
ba395927
KA
848
849 spin_lock_irqsave(&iommu->lock, flags);
850 iommu->root_entry = root;
851 spin_unlock_irqrestore(&iommu->lock, flags);
852
853 return 0;
854}
855
ba395927
KA
856static void iommu_set_root_entry(struct intel_iommu *iommu)
857{
858 void *addr;
c416daa9 859 u32 sts;
ba395927
KA
860 unsigned long flag;
861
862 addr = iommu->root_entry;
863
864 spin_lock_irqsave(&iommu->register_lock, flag);
865 dmar_writeq(iommu->reg + DMAR_RTADDR_REG, virt_to_phys(addr));
866
c416daa9 867 writel(iommu->gcmd | DMA_GCMD_SRTP, iommu->reg + DMAR_GCMD_REG);
ba395927
KA
868
869 /* Make sure hardware complete it */
870 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
c416daa9 871 readl, (sts & DMA_GSTS_RTPS), sts);
ba395927
KA
872
873 spin_unlock_irqrestore(&iommu->register_lock, flag);
874}
875
876static void iommu_flush_write_buffer(struct intel_iommu *iommu)
877{
878 u32 val;
879 unsigned long flag;
880
9af88143 881 if (!rwbf_quirk && !cap_rwbf(iommu->cap))
ba395927 882 return;
ba395927
KA
883
884 spin_lock_irqsave(&iommu->register_lock, flag);
462b60f6 885 writel(iommu->gcmd | DMA_GCMD_WBF, iommu->reg + DMAR_GCMD_REG);
ba395927
KA
886
887 /* Make sure hardware complete it */
888 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
c416daa9 889 readl, (!(val & DMA_GSTS_WBFS)), val);
ba395927
KA
890
891 spin_unlock_irqrestore(&iommu->register_lock, flag);
892}
893
894/* return value determine if we need a write buffer flush */
4c25a2c1
DW
895static void __iommu_flush_context(struct intel_iommu *iommu,
896 u16 did, u16 source_id, u8 function_mask,
897 u64 type)
ba395927
KA
898{
899 u64 val = 0;
900 unsigned long flag;
901
ba395927
KA
902 switch (type) {
903 case DMA_CCMD_GLOBAL_INVL:
904 val = DMA_CCMD_GLOBAL_INVL;
905 break;
906 case DMA_CCMD_DOMAIN_INVL:
907 val = DMA_CCMD_DOMAIN_INVL|DMA_CCMD_DID(did);
908 break;
909 case DMA_CCMD_DEVICE_INVL:
910 val = DMA_CCMD_DEVICE_INVL|DMA_CCMD_DID(did)
911 | DMA_CCMD_SID(source_id) | DMA_CCMD_FM(function_mask);
912 break;
913 default:
914 BUG();
915 }
916 val |= DMA_CCMD_ICC;
917
918 spin_lock_irqsave(&iommu->register_lock, flag);
919 dmar_writeq(iommu->reg + DMAR_CCMD_REG, val);
920
921 /* Make sure hardware complete it */
922 IOMMU_WAIT_OP(iommu, DMAR_CCMD_REG,
923 dmar_readq, (!(val & DMA_CCMD_ICC)), val);
924
925 spin_unlock_irqrestore(&iommu->register_lock, flag);
ba395927
KA
926}
927
ba395927 928/* return value determine if we need a write buffer flush */
1f0ef2aa
DW
929static void __iommu_flush_iotlb(struct intel_iommu *iommu, u16 did,
930 u64 addr, unsigned int size_order, u64 type)
ba395927
KA
931{
932 int tlb_offset = ecap_iotlb_offset(iommu->ecap);
933 u64 val = 0, val_iva = 0;
934 unsigned long flag;
935
ba395927
KA
936 switch (type) {
937 case DMA_TLB_GLOBAL_FLUSH:
938 /* global flush doesn't need set IVA_REG */
939 val = DMA_TLB_GLOBAL_FLUSH|DMA_TLB_IVT;
940 break;
941 case DMA_TLB_DSI_FLUSH:
942 val = DMA_TLB_DSI_FLUSH|DMA_TLB_IVT|DMA_TLB_DID(did);
943 break;
944 case DMA_TLB_PSI_FLUSH:
945 val = DMA_TLB_PSI_FLUSH|DMA_TLB_IVT|DMA_TLB_DID(did);
946 /* Note: always flush non-leaf currently */
947 val_iva = size_order | addr;
948 break;
949 default:
950 BUG();
951 }
952 /* Note: set drain read/write */
953#if 0
954 /*
955 * This is probably to be super secure.. Looks like we can
956 * ignore it without any impact.
957 */
958 if (cap_read_drain(iommu->cap))
959 val |= DMA_TLB_READ_DRAIN;
960#endif
961 if (cap_write_drain(iommu->cap))
962 val |= DMA_TLB_WRITE_DRAIN;
963
964 spin_lock_irqsave(&iommu->register_lock, flag);
965 /* Note: Only uses first TLB reg currently */
966 if (val_iva)
967 dmar_writeq(iommu->reg + tlb_offset, val_iva);
968 dmar_writeq(iommu->reg + tlb_offset + 8, val);
969
970 /* Make sure hardware complete it */
971 IOMMU_WAIT_OP(iommu, tlb_offset + 8,
972 dmar_readq, (!(val & DMA_TLB_IVT)), val);
973
974 spin_unlock_irqrestore(&iommu->register_lock, flag);
975
976 /* check IOTLB invalidation granularity */
977 if (DMA_TLB_IAIG(val) == 0)
978 printk(KERN_ERR"IOMMU: flush IOTLB failed\n");
979 if (DMA_TLB_IAIG(val) != DMA_TLB_IIRG(type))
980 pr_debug("IOMMU: tlb flush request %Lx, actual %Lx\n",
5b6985ce
FY
981 (unsigned long long)DMA_TLB_IIRG(type),
982 (unsigned long long)DMA_TLB_IAIG(val));
ba395927
KA
983}
984
93a23a72
YZ
985static struct device_domain_info *iommu_support_dev_iotlb(
986 struct dmar_domain *domain, int segment, u8 bus, u8 devfn)
987{
988 int found = 0;
989 unsigned long flags;
990 struct device_domain_info *info;
991 struct intel_iommu *iommu = device_to_iommu(segment, bus, devfn);
992
993 if (!ecap_dev_iotlb_support(iommu->ecap))
994 return NULL;
995
996 if (!iommu->qi)
997 return NULL;
998
999 spin_lock_irqsave(&device_domain_lock, flags);
1000 list_for_each_entry(info, &domain->devices, link)
1001 if (info->bus == bus && info->devfn == devfn) {
1002 found = 1;
1003 break;
1004 }
1005 spin_unlock_irqrestore(&device_domain_lock, flags);
1006
1007 if (!found || !info->dev)
1008 return NULL;
1009
1010 if (!pci_find_ext_capability(info->dev, PCI_EXT_CAP_ID_ATS))
1011 return NULL;
1012
1013 if (!dmar_find_matched_atsr_unit(info->dev))
1014 return NULL;
1015
1016 info->iommu = iommu;
1017
1018 return info;
1019}
1020
1021static void iommu_enable_dev_iotlb(struct device_domain_info *info)
ba395927 1022{
93a23a72
YZ
1023 if (!info)
1024 return;
1025
1026 pci_enable_ats(info->dev, VTD_PAGE_SHIFT);
1027}
1028
1029static void iommu_disable_dev_iotlb(struct device_domain_info *info)
1030{
1031 if (!info->dev || !pci_ats_enabled(info->dev))
1032 return;
1033
1034 pci_disable_ats(info->dev);
1035}
1036
1037static void iommu_flush_dev_iotlb(struct dmar_domain *domain,
1038 u64 addr, unsigned mask)
1039{
1040 u16 sid, qdep;
1041 unsigned long flags;
1042 struct device_domain_info *info;
1043
1044 spin_lock_irqsave(&device_domain_lock, flags);
1045 list_for_each_entry(info, &domain->devices, link) {
1046 if (!info->dev || !pci_ats_enabled(info->dev))
1047 continue;
1048
1049 sid = info->bus << 8 | info->devfn;
1050 qdep = pci_ats_queue_depth(info->dev);
1051 qi_flush_dev_iotlb(info->iommu, sid, qdep, addr, mask);
1052 }
1053 spin_unlock_irqrestore(&device_domain_lock, flags);
1054}
1055
1f0ef2aa 1056static void iommu_flush_iotlb_psi(struct intel_iommu *iommu, u16 did,
03d6a246 1057 unsigned long pfn, unsigned int pages)
ba395927 1058{
9dd2fe89 1059 unsigned int mask = ilog2(__roundup_pow_of_two(pages));
03d6a246 1060 uint64_t addr = (uint64_t)pfn << VTD_PAGE_SHIFT;
ba395927 1061
ba395927
KA
1062 BUG_ON(pages == 0);
1063
ba395927 1064 /*
9dd2fe89
YZ
1065 * Fallback to domain selective flush if no PSI support or the size is
1066 * too big.
ba395927
KA
1067 * PSI requires page size to be 2 ^ x, and the base address is naturally
1068 * aligned to the size
1069 */
9dd2fe89
YZ
1070 if (!cap_pgsel_inv(iommu->cap) || mask > cap_max_amask_val(iommu->cap))
1071 iommu->flush.flush_iotlb(iommu, did, 0, 0,
1f0ef2aa 1072 DMA_TLB_DSI_FLUSH);
9dd2fe89
YZ
1073 else
1074 iommu->flush.flush_iotlb(iommu, did, addr, mask,
1075 DMA_TLB_PSI_FLUSH);
bf92df30
YZ
1076
1077 /*
1078 * In caching mode, domain ID 0 is reserved for non-present to present
1079 * mapping flush. Device IOTLB doesn't need to be flushed in this case.
1080 */
1081 if (!cap_caching_mode(iommu->cap) || did)
93a23a72 1082 iommu_flush_dev_iotlb(iommu->domains[did], addr, mask);
ba395927
KA
1083}
1084
f8bab735 1085static void iommu_disable_protect_mem_regions(struct intel_iommu *iommu)
1086{
1087 u32 pmen;
1088 unsigned long flags;
1089
1090 spin_lock_irqsave(&iommu->register_lock, flags);
1091 pmen = readl(iommu->reg + DMAR_PMEN_REG);
1092 pmen &= ~DMA_PMEN_EPM;
1093 writel(pmen, iommu->reg + DMAR_PMEN_REG);
1094
1095 /* wait for the protected region status bit to clear */
1096 IOMMU_WAIT_OP(iommu, DMAR_PMEN_REG,
1097 readl, !(pmen & DMA_PMEN_PRS), pmen);
1098
1099 spin_unlock_irqrestore(&iommu->register_lock, flags);
1100}
1101
ba395927
KA
1102static int iommu_enable_translation(struct intel_iommu *iommu)
1103{
1104 u32 sts;
1105 unsigned long flags;
1106
1107 spin_lock_irqsave(&iommu->register_lock, flags);
c416daa9
DW
1108 iommu->gcmd |= DMA_GCMD_TE;
1109 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
ba395927
KA
1110
1111 /* Make sure hardware complete it */
1112 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
c416daa9 1113 readl, (sts & DMA_GSTS_TES), sts);
ba395927 1114
ba395927
KA
1115 spin_unlock_irqrestore(&iommu->register_lock, flags);
1116 return 0;
1117}
1118
1119static int iommu_disable_translation(struct intel_iommu *iommu)
1120{
1121 u32 sts;
1122 unsigned long flag;
1123
1124 spin_lock_irqsave(&iommu->register_lock, flag);
1125 iommu->gcmd &= ~DMA_GCMD_TE;
1126 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
1127
1128 /* Make sure hardware complete it */
1129 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
c416daa9 1130 readl, (!(sts & DMA_GSTS_TES)), sts);
ba395927
KA
1131
1132 spin_unlock_irqrestore(&iommu->register_lock, flag);
1133 return 0;
1134}
1135
3460a6d9 1136
ba395927
KA
1137static int iommu_init_domains(struct intel_iommu *iommu)
1138{
1139 unsigned long ndomains;
1140 unsigned long nlongs;
1141
1142 ndomains = cap_ndoms(iommu->cap);
1143 pr_debug("Number of Domains supportd <%ld>\n", ndomains);
1144 nlongs = BITS_TO_LONGS(ndomains);
1145
1146 /* TBD: there might be 64K domains,
1147 * consider other allocation for future chip
1148 */
1149 iommu->domain_ids = kcalloc(nlongs, sizeof(unsigned long), GFP_KERNEL);
1150 if (!iommu->domain_ids) {
1151 printk(KERN_ERR "Allocating domain id array failed\n");
1152 return -ENOMEM;
1153 }
1154 iommu->domains = kcalloc(ndomains, sizeof(struct dmar_domain *),
1155 GFP_KERNEL);
1156 if (!iommu->domains) {
1157 printk(KERN_ERR "Allocating domain array failed\n");
1158 kfree(iommu->domain_ids);
1159 return -ENOMEM;
1160 }
1161
e61d98d8
SS
1162 spin_lock_init(&iommu->lock);
1163
ba395927
KA
1164 /*
1165 * if Caching mode is set, then invalid translations are tagged
1166 * with domainid 0. Hence we need to pre-allocate it.
1167 */
1168 if (cap_caching_mode(iommu->cap))
1169 set_bit(0, iommu->domain_ids);
1170 return 0;
1171}
ba395927 1172
ba395927
KA
1173
1174static void domain_exit(struct dmar_domain *domain);
5e98c4b1 1175static void vm_domain_exit(struct dmar_domain *domain);
e61d98d8
SS
1176
1177void free_dmar_iommu(struct intel_iommu *iommu)
ba395927
KA
1178{
1179 struct dmar_domain *domain;
1180 int i;
c7151a8d 1181 unsigned long flags;
ba395927 1182
ba395927
KA
1183 i = find_first_bit(iommu->domain_ids, cap_ndoms(iommu->cap));
1184 for (; i < cap_ndoms(iommu->cap); ) {
1185 domain = iommu->domains[i];
1186 clear_bit(i, iommu->domain_ids);
c7151a8d
WH
1187
1188 spin_lock_irqsave(&domain->iommu_lock, flags);
5e98c4b1
WH
1189 if (--domain->iommu_count == 0) {
1190 if (domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE)
1191 vm_domain_exit(domain);
1192 else
1193 domain_exit(domain);
1194 }
c7151a8d
WH
1195 spin_unlock_irqrestore(&domain->iommu_lock, flags);
1196
ba395927
KA
1197 i = find_next_bit(iommu->domain_ids,
1198 cap_ndoms(iommu->cap), i+1);
1199 }
1200
1201 if (iommu->gcmd & DMA_GCMD_TE)
1202 iommu_disable_translation(iommu);
1203
1204 if (iommu->irq) {
1205 set_irq_data(iommu->irq, NULL);
1206 /* This will mask the irq */
1207 free_irq(iommu->irq, iommu);
1208 destroy_irq(iommu->irq);
1209 }
1210
1211 kfree(iommu->domains);
1212 kfree(iommu->domain_ids);
1213
d9630fe9
WH
1214 g_iommus[iommu->seq_id] = NULL;
1215
1216 /* if all iommus are freed, free g_iommus */
1217 for (i = 0; i < g_num_of_iommus; i++) {
1218 if (g_iommus[i])
1219 break;
1220 }
1221
1222 if (i == g_num_of_iommus)
1223 kfree(g_iommus);
1224
ba395927
KA
1225 /* free context mapping */
1226 free_context_table(iommu);
ba395927
KA
1227}
1228
2c2e2c38 1229static struct dmar_domain *alloc_domain(void)
ba395927 1230{
ba395927 1231 struct dmar_domain *domain;
ba395927
KA
1232
1233 domain = alloc_domain_mem();
1234 if (!domain)
1235 return NULL;
1236
2c2e2c38
FY
1237 memset(&domain->iommu_bmp, 0, sizeof(unsigned long));
1238 domain->flags = 0;
1239
1240 return domain;
1241}
1242
1243static int iommu_attach_domain(struct dmar_domain *domain,
1244 struct intel_iommu *iommu)
1245{
1246 int num;
1247 unsigned long ndomains;
1248 unsigned long flags;
1249
ba395927
KA
1250 ndomains = cap_ndoms(iommu->cap);
1251
1252 spin_lock_irqsave(&iommu->lock, flags);
2c2e2c38 1253
ba395927
KA
1254 num = find_first_zero_bit(iommu->domain_ids, ndomains);
1255 if (num >= ndomains) {
1256 spin_unlock_irqrestore(&iommu->lock, flags);
ba395927 1257 printk(KERN_ERR "IOMMU: no free domain ids\n");
2c2e2c38 1258 return -ENOMEM;
ba395927
KA
1259 }
1260
ba395927 1261 domain->id = num;
2c2e2c38 1262 set_bit(num, iommu->domain_ids);
8c11e798 1263 set_bit(iommu->seq_id, &domain->iommu_bmp);
ba395927
KA
1264 iommu->domains[num] = domain;
1265 spin_unlock_irqrestore(&iommu->lock, flags);
1266
2c2e2c38 1267 return 0;
ba395927
KA
1268}
1269
2c2e2c38
FY
1270static void iommu_detach_domain(struct dmar_domain *domain,
1271 struct intel_iommu *iommu)
ba395927
KA
1272{
1273 unsigned long flags;
2c2e2c38
FY
1274 int num, ndomains;
1275 int found = 0;
ba395927 1276
8c11e798 1277 spin_lock_irqsave(&iommu->lock, flags);
2c2e2c38
FY
1278 ndomains = cap_ndoms(iommu->cap);
1279 num = find_first_bit(iommu->domain_ids, ndomains);
1280 for (; num < ndomains; ) {
1281 if (iommu->domains[num] == domain) {
1282 found = 1;
1283 break;
1284 }
1285 num = find_next_bit(iommu->domain_ids,
1286 cap_ndoms(iommu->cap), num+1);
1287 }
1288
1289 if (found) {
1290 clear_bit(num, iommu->domain_ids);
1291 clear_bit(iommu->seq_id, &domain->iommu_bmp);
1292 iommu->domains[num] = NULL;
1293 }
8c11e798 1294 spin_unlock_irqrestore(&iommu->lock, flags);
ba395927
KA
1295}
1296
1297static struct iova_domain reserved_iova_list;
8a443df4
MG
1298static struct lock_class_key reserved_alloc_key;
1299static struct lock_class_key reserved_rbtree_key;
ba395927
KA
1300
1301static void dmar_init_reserved_ranges(void)
1302{
1303 struct pci_dev *pdev = NULL;
1304 struct iova *iova;
1305 int i;
ba395927 1306
f661197e 1307 init_iova_domain(&reserved_iova_list, DMA_32BIT_PFN);
ba395927 1308
8a443df4
MG
1309 lockdep_set_class(&reserved_iova_list.iova_alloc_lock,
1310 &reserved_alloc_key);
1311 lockdep_set_class(&reserved_iova_list.iova_rbtree_lock,
1312 &reserved_rbtree_key);
1313
ba395927
KA
1314 /* IOAPIC ranges shouldn't be accessed by DMA */
1315 iova = reserve_iova(&reserved_iova_list, IOVA_PFN(IOAPIC_RANGE_START),
1316 IOVA_PFN(IOAPIC_RANGE_END));
1317 if (!iova)
1318 printk(KERN_ERR "Reserve IOAPIC range failed\n");
1319
1320 /* Reserve all PCI MMIO to avoid peer-to-peer access */
1321 for_each_pci_dev(pdev) {
1322 struct resource *r;
1323
1324 for (i = 0; i < PCI_NUM_RESOURCES; i++) {
1325 r = &pdev->resource[i];
1326 if (!r->flags || !(r->flags & IORESOURCE_MEM))
1327 continue;
1a4a4551
DW
1328 iova = reserve_iova(&reserved_iova_list,
1329 IOVA_PFN(r->start),
1330 IOVA_PFN(r->end));
ba395927
KA
1331 if (!iova)
1332 printk(KERN_ERR "Reserve iova failed\n");
1333 }
1334 }
1335
1336}
1337
1338static void domain_reserve_special_ranges(struct dmar_domain *domain)
1339{
1340 copy_reserved_iova(&reserved_iova_list, &domain->iovad);
1341}
1342
1343static inline int guestwidth_to_adjustwidth(int gaw)
1344{
1345 int agaw;
1346 int r = (gaw - 12) % 9;
1347
1348 if (r == 0)
1349 agaw = gaw;
1350 else
1351 agaw = gaw + 9 - r;
1352 if (agaw > 64)
1353 agaw = 64;
1354 return agaw;
1355}
1356
1357static int domain_init(struct dmar_domain *domain, int guest_width)
1358{
1359 struct intel_iommu *iommu;
1360 int adjust_width, agaw;
1361 unsigned long sagaw;
1362
f661197e 1363 init_iova_domain(&domain->iovad, DMA_32BIT_PFN);
ba395927 1364 spin_lock_init(&domain->mapping_lock);
c7151a8d 1365 spin_lock_init(&domain->iommu_lock);
ba395927
KA
1366
1367 domain_reserve_special_ranges(domain);
1368
1369 /* calculate AGAW */
8c11e798 1370 iommu = domain_get_iommu(domain);
ba395927
KA
1371 if (guest_width > cap_mgaw(iommu->cap))
1372 guest_width = cap_mgaw(iommu->cap);
1373 domain->gaw = guest_width;
1374 adjust_width = guestwidth_to_adjustwidth(guest_width);
1375 agaw = width_to_agaw(adjust_width);
1376 sagaw = cap_sagaw(iommu->cap);
1377 if (!test_bit(agaw, &sagaw)) {
1378 /* hardware doesn't support it, choose a bigger one */
1379 pr_debug("IOMMU: hardware doesn't support agaw %d\n", agaw);
1380 agaw = find_next_bit(&sagaw, 5, agaw);
1381 if (agaw >= 5)
1382 return -ENODEV;
1383 }
1384 domain->agaw = agaw;
1385 INIT_LIST_HEAD(&domain->devices);
1386
8e604097
WH
1387 if (ecap_coherent(iommu->ecap))
1388 domain->iommu_coherency = 1;
1389 else
1390 domain->iommu_coherency = 0;
1391
58c610bd
SY
1392 if (ecap_sc_support(iommu->ecap))
1393 domain->iommu_snooping = 1;
1394 else
1395 domain->iommu_snooping = 0;
1396
c7151a8d
WH
1397 domain->iommu_count = 1;
1398
ba395927
KA
1399 /* always allocate the top pgd */
1400 domain->pgd = (struct dma_pte *)alloc_pgtable_page();
1401 if (!domain->pgd)
1402 return -ENOMEM;
5b6985ce 1403 __iommu_flush_cache(iommu, domain->pgd, PAGE_SIZE);
ba395927
KA
1404 return 0;
1405}
1406
1407static void domain_exit(struct dmar_domain *domain)
1408{
2c2e2c38
FY
1409 struct dmar_drhd_unit *drhd;
1410 struct intel_iommu *iommu;
ba395927
KA
1411
1412 /* Domain 0 is reserved, so dont process it */
1413 if (!domain)
1414 return;
1415
1416 domain_remove_dev_info(domain);
1417 /* destroy iovas */
1418 put_iova_domain(&domain->iovad);
ba395927
KA
1419
1420 /* clear ptes */
595badf5 1421 dma_pte_clear_range(domain, 0, DOMAIN_MAX_PFN(domain->gaw));
ba395927
KA
1422
1423 /* free page tables */
d794dc9b 1424 dma_pte_free_pagetable(domain, 0, DOMAIN_MAX_PFN(domain->gaw));
ba395927 1425
2c2e2c38
FY
1426 for_each_active_iommu(iommu, drhd)
1427 if (test_bit(iommu->seq_id, &domain->iommu_bmp))
1428 iommu_detach_domain(domain, iommu);
1429
ba395927
KA
1430 free_domain_mem(domain);
1431}
1432
4ed0d3e6
FY
1433static int domain_context_mapping_one(struct dmar_domain *domain, int segment,
1434 u8 bus, u8 devfn, int translation)
ba395927
KA
1435{
1436 struct context_entry *context;
ba395927 1437 unsigned long flags;
5331fe6f 1438 struct intel_iommu *iommu;
ea6606b0
WH
1439 struct dma_pte *pgd;
1440 unsigned long num;
1441 unsigned long ndomains;
1442 int id;
1443 int agaw;
93a23a72 1444 struct device_domain_info *info = NULL;
ba395927
KA
1445
1446 pr_debug("Set context mapping for %02x:%02x.%d\n",
1447 bus, PCI_SLOT(devfn), PCI_FUNC(devfn));
4ed0d3e6 1448
ba395927 1449 BUG_ON(!domain->pgd);
4ed0d3e6
FY
1450 BUG_ON(translation != CONTEXT_TT_PASS_THROUGH &&
1451 translation != CONTEXT_TT_MULTI_LEVEL);
5331fe6f 1452
276dbf99 1453 iommu = device_to_iommu(segment, bus, devfn);
5331fe6f
WH
1454 if (!iommu)
1455 return -ENODEV;
1456
ba395927
KA
1457 context = device_to_context_entry(iommu, bus, devfn);
1458 if (!context)
1459 return -ENOMEM;
1460 spin_lock_irqsave(&iommu->lock, flags);
c07e7d21 1461 if (context_present(context)) {
ba395927
KA
1462 spin_unlock_irqrestore(&iommu->lock, flags);
1463 return 0;
1464 }
1465
ea6606b0
WH
1466 id = domain->id;
1467 pgd = domain->pgd;
1468
2c2e2c38
FY
1469 if (domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE ||
1470 domain->flags & DOMAIN_FLAG_STATIC_IDENTITY) {
ea6606b0
WH
1471 int found = 0;
1472
1473 /* find an available domain id for this device in iommu */
1474 ndomains = cap_ndoms(iommu->cap);
1475 num = find_first_bit(iommu->domain_ids, ndomains);
1476 for (; num < ndomains; ) {
1477 if (iommu->domains[num] == domain) {
1478 id = num;
1479 found = 1;
1480 break;
1481 }
1482 num = find_next_bit(iommu->domain_ids,
1483 cap_ndoms(iommu->cap), num+1);
1484 }
1485
1486 if (found == 0) {
1487 num = find_first_zero_bit(iommu->domain_ids, ndomains);
1488 if (num >= ndomains) {
1489 spin_unlock_irqrestore(&iommu->lock, flags);
1490 printk(KERN_ERR "IOMMU: no free domain ids\n");
1491 return -EFAULT;
1492 }
1493
1494 set_bit(num, iommu->domain_ids);
2c2e2c38 1495 set_bit(iommu->seq_id, &domain->iommu_bmp);
ea6606b0
WH
1496 iommu->domains[num] = domain;
1497 id = num;
1498 }
1499
1500 /* Skip top levels of page tables for
1501 * iommu which has less agaw than default.
1502 */
1503 for (agaw = domain->agaw; agaw != iommu->agaw; agaw--) {
1504 pgd = phys_to_virt(dma_pte_addr(pgd));
1505 if (!dma_pte_present(pgd)) {
1506 spin_unlock_irqrestore(&iommu->lock, flags);
1507 return -ENOMEM;
1508 }
1509 }
1510 }
1511
1512 context_set_domain_id(context, id);
4ed0d3e6 1513
93a23a72
YZ
1514 if (translation != CONTEXT_TT_PASS_THROUGH) {
1515 info = iommu_support_dev_iotlb(domain, segment, bus, devfn);
1516 translation = info ? CONTEXT_TT_DEV_IOTLB :
1517 CONTEXT_TT_MULTI_LEVEL;
1518 }
4ed0d3e6
FY
1519 /*
1520 * In pass through mode, AW must be programmed to indicate the largest
1521 * AGAW value supported by hardware. And ASR is ignored by hardware.
1522 */
93a23a72 1523 if (unlikely(translation == CONTEXT_TT_PASS_THROUGH))
4ed0d3e6 1524 context_set_address_width(context, iommu->msagaw);
93a23a72
YZ
1525 else {
1526 context_set_address_root(context, virt_to_phys(pgd));
1527 context_set_address_width(context, iommu->agaw);
1528 }
4ed0d3e6
FY
1529
1530 context_set_translation_type(context, translation);
c07e7d21
MM
1531 context_set_fault_enable(context);
1532 context_set_present(context);
5331fe6f 1533 domain_flush_cache(domain, context, sizeof(*context));
ba395927 1534
4c25a2c1
DW
1535 /*
1536 * It's a non-present to present mapping. If hardware doesn't cache
1537 * non-present entry we only need to flush the write-buffer. If the
1538 * _does_ cache non-present entries, then it does so in the special
1539 * domain #0, which we have to flush:
1540 */
1541 if (cap_caching_mode(iommu->cap)) {
1542 iommu->flush.flush_context(iommu, 0,
1543 (((u16)bus) << 8) | devfn,
1544 DMA_CCMD_MASK_NOBIT,
1545 DMA_CCMD_DEVICE_INVL);
1f0ef2aa 1546 iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_DSI_FLUSH);
4c25a2c1 1547 } else {
ba395927 1548 iommu_flush_write_buffer(iommu);
4c25a2c1 1549 }
93a23a72 1550 iommu_enable_dev_iotlb(info);
ba395927 1551 spin_unlock_irqrestore(&iommu->lock, flags);
c7151a8d
WH
1552
1553 spin_lock_irqsave(&domain->iommu_lock, flags);
1554 if (!test_and_set_bit(iommu->seq_id, &domain->iommu_bmp)) {
1555 domain->iommu_count++;
58c610bd 1556 domain_update_iommu_cap(domain);
c7151a8d
WH
1557 }
1558 spin_unlock_irqrestore(&domain->iommu_lock, flags);
ba395927
KA
1559 return 0;
1560}
1561
1562static int
4ed0d3e6
FY
1563domain_context_mapping(struct dmar_domain *domain, struct pci_dev *pdev,
1564 int translation)
ba395927
KA
1565{
1566 int ret;
1567 struct pci_dev *tmp, *parent;
1568
276dbf99 1569 ret = domain_context_mapping_one(domain, pci_domain_nr(pdev->bus),
4ed0d3e6
FY
1570 pdev->bus->number, pdev->devfn,
1571 translation);
ba395927
KA
1572 if (ret)
1573 return ret;
1574
1575 /* dependent device mapping */
1576 tmp = pci_find_upstream_pcie_bridge(pdev);
1577 if (!tmp)
1578 return 0;
1579 /* Secondary interface's bus number and devfn 0 */
1580 parent = pdev->bus->self;
1581 while (parent != tmp) {
276dbf99
DW
1582 ret = domain_context_mapping_one(domain,
1583 pci_domain_nr(parent->bus),
1584 parent->bus->number,
4ed0d3e6 1585 parent->devfn, translation);
ba395927
KA
1586 if (ret)
1587 return ret;
1588 parent = parent->bus->self;
1589 }
1590 if (tmp->is_pcie) /* this is a PCIE-to-PCI bridge */
1591 return domain_context_mapping_one(domain,
276dbf99 1592 pci_domain_nr(tmp->subordinate),
4ed0d3e6
FY
1593 tmp->subordinate->number, 0,
1594 translation);
ba395927
KA
1595 else /* this is a legacy PCI bridge */
1596 return domain_context_mapping_one(domain,
276dbf99
DW
1597 pci_domain_nr(tmp->bus),
1598 tmp->bus->number,
4ed0d3e6
FY
1599 tmp->devfn,
1600 translation);
ba395927
KA
1601}
1602
5331fe6f 1603static int domain_context_mapped(struct pci_dev *pdev)
ba395927
KA
1604{
1605 int ret;
1606 struct pci_dev *tmp, *parent;
5331fe6f
WH
1607 struct intel_iommu *iommu;
1608
276dbf99
DW
1609 iommu = device_to_iommu(pci_domain_nr(pdev->bus), pdev->bus->number,
1610 pdev->devfn);
5331fe6f
WH
1611 if (!iommu)
1612 return -ENODEV;
ba395927 1613
276dbf99 1614 ret = device_context_mapped(iommu, pdev->bus->number, pdev->devfn);
ba395927
KA
1615 if (!ret)
1616 return ret;
1617 /* dependent device mapping */
1618 tmp = pci_find_upstream_pcie_bridge(pdev);
1619 if (!tmp)
1620 return ret;
1621 /* Secondary interface's bus number and devfn 0 */
1622 parent = pdev->bus->self;
1623 while (parent != tmp) {
8c11e798 1624 ret = device_context_mapped(iommu, parent->bus->number,
276dbf99 1625 parent->devfn);
ba395927
KA
1626 if (!ret)
1627 return ret;
1628 parent = parent->bus->self;
1629 }
1630 if (tmp->is_pcie)
276dbf99
DW
1631 return device_context_mapped(iommu, tmp->subordinate->number,
1632 0);
ba395927 1633 else
276dbf99
DW
1634 return device_context_mapped(iommu, tmp->bus->number,
1635 tmp->devfn);
ba395927
KA
1636}
1637
9051aa02
DW
1638static int __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
1639 struct scatterlist *sg, unsigned long phys_pfn,
1640 unsigned long nr_pages, int prot)
e1605495
DW
1641{
1642 struct dma_pte *first_pte = NULL, *pte = NULL;
9051aa02 1643 phys_addr_t uninitialized_var(pteval);
e1605495 1644 int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT;
9051aa02 1645 unsigned long sg_res;
e1605495
DW
1646
1647 BUG_ON(addr_width < BITS_PER_LONG && (iov_pfn + nr_pages - 1) >> addr_width);
1648
1649 if ((prot & (DMA_PTE_READ|DMA_PTE_WRITE)) == 0)
1650 return -EINVAL;
1651
1652 prot &= DMA_PTE_READ | DMA_PTE_WRITE | DMA_PTE_SNP;
1653
9051aa02
DW
1654 if (sg)
1655 sg_res = 0;
1656 else {
1657 sg_res = nr_pages + 1;
1658 pteval = ((phys_addr_t)phys_pfn << VTD_PAGE_SHIFT) | prot;
1659 }
1660
e1605495
DW
1661 while (nr_pages--) {
1662 if (!sg_res) {
1663 sg_res = (sg->offset + sg->length + VTD_PAGE_SIZE - 1) >> VTD_PAGE_SHIFT;
1664 sg->dma_address = ((dma_addr_t)iov_pfn << VTD_PAGE_SHIFT) + sg->offset;
1665 sg->dma_length = sg->length;
1666 pteval = page_to_phys(sg_page(sg)) | prot;
1667 }
1668 if (!pte) {
1669 first_pte = pte = pfn_to_dma_pte(domain, iov_pfn);
1670 if (!pte)
1671 return -ENOMEM;
1672 }
1673 /* We don't need lock here, nobody else
1674 * touches the iova range
1675 */
1bf20f0d
DW
1676 if (unlikely(dma_pte_addr(pte))) {
1677 static int dumps = 5;
1678 printk(KERN_CRIT "ERROR: DMA PTE for vPFN 0x%lx already set (to %llx)\n",
1679 iov_pfn, pte->val);
1680 if (dumps) {
1681 dumps--;
1682 debug_dma_dump_mappings(NULL);
1683 }
1684 WARN_ON(1);
1685 }
e1605495
DW
1686 pte->val = pteval;
1687 pte++;
1688 if (!nr_pages ||
1689 (unsigned long)pte >> VTD_PAGE_SHIFT !=
1690 (unsigned long)first_pte >> VTD_PAGE_SHIFT) {
1691 domain_flush_cache(domain, first_pte,
1692 (void *)pte - (void *)first_pte);
1693 pte = NULL;
1694 }
1695 iov_pfn++;
1696 pteval += VTD_PAGE_SIZE;
1697 sg_res--;
1698 if (!sg_res)
1699 sg = sg_next(sg);
1700 }
1701 return 0;
1702}
1703
9051aa02
DW
1704static inline int domain_sg_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
1705 struct scatterlist *sg, unsigned long nr_pages,
1706 int prot)
ba395927 1707{
9051aa02
DW
1708 return __domain_mapping(domain, iov_pfn, sg, 0, nr_pages, prot);
1709}
6f6a00e4 1710
9051aa02
DW
1711static inline int domain_pfn_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
1712 unsigned long phys_pfn, unsigned long nr_pages,
1713 int prot)
1714{
1715 return __domain_mapping(domain, iov_pfn, NULL, phys_pfn, nr_pages, prot);
ba395927
KA
1716}
1717
c7151a8d 1718static void iommu_detach_dev(struct intel_iommu *iommu, u8 bus, u8 devfn)
ba395927 1719{
c7151a8d
WH
1720 if (!iommu)
1721 return;
8c11e798
WH
1722
1723 clear_context_table(iommu, bus, devfn);
1724 iommu->flush.flush_context(iommu, 0, 0, 0,
4c25a2c1 1725 DMA_CCMD_GLOBAL_INVL);
1f0ef2aa 1726 iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
ba395927
KA
1727}
1728
1729static void domain_remove_dev_info(struct dmar_domain *domain)
1730{
1731 struct device_domain_info *info;
1732 unsigned long flags;
c7151a8d 1733 struct intel_iommu *iommu;
ba395927
KA
1734
1735 spin_lock_irqsave(&device_domain_lock, flags);
1736 while (!list_empty(&domain->devices)) {
1737 info = list_entry(domain->devices.next,
1738 struct device_domain_info, link);
1739 list_del(&info->link);
1740 list_del(&info->global);
1741 if (info->dev)
358dd8ac 1742 info->dev->dev.archdata.iommu = NULL;
ba395927
KA
1743 spin_unlock_irqrestore(&device_domain_lock, flags);
1744
93a23a72 1745 iommu_disable_dev_iotlb(info);
276dbf99 1746 iommu = device_to_iommu(info->segment, info->bus, info->devfn);
c7151a8d 1747 iommu_detach_dev(iommu, info->bus, info->devfn);
ba395927
KA
1748 free_devinfo_mem(info);
1749
1750 spin_lock_irqsave(&device_domain_lock, flags);
1751 }
1752 spin_unlock_irqrestore(&device_domain_lock, flags);
1753}
1754
1755/*
1756 * find_domain
358dd8ac 1757 * Note: we use struct pci_dev->dev.archdata.iommu stores the info
ba395927 1758 */
38717946 1759static struct dmar_domain *
ba395927
KA
1760find_domain(struct pci_dev *pdev)
1761{
1762 struct device_domain_info *info;
1763
1764 /* No lock here, assumes no domain exit in normal case */
358dd8ac 1765 info = pdev->dev.archdata.iommu;
ba395927
KA
1766 if (info)
1767 return info->domain;
1768 return NULL;
1769}
1770
ba395927
KA
1771/* domain is initialized */
1772static struct dmar_domain *get_domain_for_dev(struct pci_dev *pdev, int gaw)
1773{
1774 struct dmar_domain *domain, *found = NULL;
1775 struct intel_iommu *iommu;
1776 struct dmar_drhd_unit *drhd;
1777 struct device_domain_info *info, *tmp;
1778 struct pci_dev *dev_tmp;
1779 unsigned long flags;
1780 int bus = 0, devfn = 0;
276dbf99 1781 int segment;
2c2e2c38 1782 int ret;
ba395927
KA
1783
1784 domain = find_domain(pdev);
1785 if (domain)
1786 return domain;
1787
276dbf99
DW
1788 segment = pci_domain_nr(pdev->bus);
1789
ba395927
KA
1790 dev_tmp = pci_find_upstream_pcie_bridge(pdev);
1791 if (dev_tmp) {
1792 if (dev_tmp->is_pcie) {
1793 bus = dev_tmp->subordinate->number;
1794 devfn = 0;
1795 } else {
1796 bus = dev_tmp->bus->number;
1797 devfn = dev_tmp->devfn;
1798 }
1799 spin_lock_irqsave(&device_domain_lock, flags);
1800 list_for_each_entry(info, &device_domain_list, global) {
276dbf99
DW
1801 if (info->segment == segment &&
1802 info->bus == bus && info->devfn == devfn) {
ba395927
KA
1803 found = info->domain;
1804 break;
1805 }
1806 }
1807 spin_unlock_irqrestore(&device_domain_lock, flags);
1808 /* pcie-pci bridge already has a domain, uses it */
1809 if (found) {
1810 domain = found;
1811 goto found_domain;
1812 }
1813 }
1814
2c2e2c38
FY
1815 domain = alloc_domain();
1816 if (!domain)
1817 goto error;
1818
ba395927
KA
1819 /* Allocate new domain for the device */
1820 drhd = dmar_find_matched_drhd_unit(pdev);
1821 if (!drhd) {
1822 printk(KERN_ERR "IOMMU: can't find DMAR for device %s\n",
1823 pci_name(pdev));
1824 return NULL;
1825 }
1826 iommu = drhd->iommu;
1827
2c2e2c38
FY
1828 ret = iommu_attach_domain(domain, iommu);
1829 if (ret) {
1830 domain_exit(domain);
ba395927 1831 goto error;
2c2e2c38 1832 }
ba395927
KA
1833
1834 if (domain_init(domain, gaw)) {
1835 domain_exit(domain);
1836 goto error;
1837 }
1838
1839 /* register pcie-to-pci device */
1840 if (dev_tmp) {
1841 info = alloc_devinfo_mem();
1842 if (!info) {
1843 domain_exit(domain);
1844 goto error;
1845 }
276dbf99 1846 info->segment = segment;
ba395927
KA
1847 info->bus = bus;
1848 info->devfn = devfn;
1849 info->dev = NULL;
1850 info->domain = domain;
1851 /* This domain is shared by devices under p2p bridge */
3b5410e7 1852 domain->flags |= DOMAIN_FLAG_P2P_MULTIPLE_DEVICES;
ba395927
KA
1853
1854 /* pcie-to-pci bridge already has a domain, uses it */
1855 found = NULL;
1856 spin_lock_irqsave(&device_domain_lock, flags);
1857 list_for_each_entry(tmp, &device_domain_list, global) {
276dbf99
DW
1858 if (tmp->segment == segment &&
1859 tmp->bus == bus && tmp->devfn == devfn) {
ba395927
KA
1860 found = tmp->domain;
1861 break;
1862 }
1863 }
1864 if (found) {
1865 free_devinfo_mem(info);
1866 domain_exit(domain);
1867 domain = found;
1868 } else {
1869 list_add(&info->link, &domain->devices);
1870 list_add(&info->global, &device_domain_list);
1871 }
1872 spin_unlock_irqrestore(&device_domain_lock, flags);
1873 }
1874
1875found_domain:
1876 info = alloc_devinfo_mem();
1877 if (!info)
1878 goto error;
276dbf99 1879 info->segment = segment;
ba395927
KA
1880 info->bus = pdev->bus->number;
1881 info->devfn = pdev->devfn;
1882 info->dev = pdev;
1883 info->domain = domain;
1884 spin_lock_irqsave(&device_domain_lock, flags);
1885 /* somebody is fast */
1886 found = find_domain(pdev);
1887 if (found != NULL) {
1888 spin_unlock_irqrestore(&device_domain_lock, flags);
1889 if (found != domain) {
1890 domain_exit(domain);
1891 domain = found;
1892 }
1893 free_devinfo_mem(info);
1894 return domain;
1895 }
1896 list_add(&info->link, &domain->devices);
1897 list_add(&info->global, &device_domain_list);
358dd8ac 1898 pdev->dev.archdata.iommu = info;
ba395927
KA
1899 spin_unlock_irqrestore(&device_domain_lock, flags);
1900 return domain;
1901error:
1902 /* recheck it here, maybe others set it */
1903 return find_domain(pdev);
1904}
1905
2c2e2c38
FY
1906static int iommu_identity_mapping;
1907
b213203e
DW
1908static int iommu_domain_identity_map(struct dmar_domain *domain,
1909 unsigned long long start,
1910 unsigned long long end)
ba395927 1911{
c5395d5c
DW
1912 unsigned long first_vpfn = start >> VTD_PAGE_SHIFT;
1913 unsigned long last_vpfn = end >> VTD_PAGE_SHIFT;
1914
1915 if (!reserve_iova(&domain->iovad, dma_to_mm_pfn(first_vpfn),
1916 dma_to_mm_pfn(last_vpfn))) {
ba395927 1917 printk(KERN_ERR "IOMMU: reserve iova failed\n");
b213203e 1918 return -ENOMEM;
ba395927
KA
1919 }
1920
c5395d5c
DW
1921 pr_debug("Mapping reserved region %llx-%llx for domain %d\n",
1922 start, end, domain->id);
ba395927
KA
1923 /*
1924 * RMRR range might have overlap with physical memory range,
1925 * clear it first
1926 */
c5395d5c 1927 dma_pte_clear_range(domain, first_vpfn, last_vpfn);
ba395927 1928
c5395d5c
DW
1929 return domain_pfn_mapping(domain, first_vpfn, first_vpfn,
1930 last_vpfn - first_vpfn + 1,
61df7443 1931 DMA_PTE_READ|DMA_PTE_WRITE);
b213203e
DW
1932}
1933
1934static int iommu_prepare_identity_map(struct pci_dev *pdev,
1935 unsigned long long start,
1936 unsigned long long end)
1937{
1938 struct dmar_domain *domain;
1939 int ret;
1940
1941 printk(KERN_INFO
1942 "IOMMU: Setting identity map for device %s [0x%Lx - 0x%Lx]\n",
1943 pci_name(pdev), start, end);
1944
c7ab48d2 1945 domain = get_domain_for_dev(pdev, DEFAULT_DOMAIN_ADDRESS_WIDTH);
b213203e
DW
1946 if (!domain)
1947 return -ENOMEM;
1948
1949 ret = iommu_domain_identity_map(domain, start, end);
ba395927
KA
1950 if (ret)
1951 goto error;
1952
1953 /* context entry init */
4ed0d3e6 1954 ret = domain_context_mapping(domain, pdev, CONTEXT_TT_MULTI_LEVEL);
b213203e
DW
1955 if (ret)
1956 goto error;
1957
1958 return 0;
1959
1960 error:
ba395927
KA
1961 domain_exit(domain);
1962 return ret;
ba395927
KA
1963}
1964
1965static inline int iommu_prepare_rmrr_dev(struct dmar_rmrr_unit *rmrr,
1966 struct pci_dev *pdev)
1967{
358dd8ac 1968 if (pdev->dev.archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO)
ba395927
KA
1969 return 0;
1970 return iommu_prepare_identity_map(pdev, rmrr->base_address,
1971 rmrr->end_address + 1);
1972}
1973
49a0429e
KA
1974#ifdef CONFIG_DMAR_FLOPPY_WA
1975static inline void iommu_prepare_isa(void)
1976{
1977 struct pci_dev *pdev;
1978 int ret;
1979
1980 pdev = pci_get_class(PCI_CLASS_BRIDGE_ISA << 8, NULL);
1981 if (!pdev)
1982 return;
1983
c7ab48d2 1984 printk(KERN_INFO "IOMMU: Prepare 0-16MiB unity mapping for LPC\n");
49a0429e
KA
1985 ret = iommu_prepare_identity_map(pdev, 0, 16*1024*1024);
1986
1987 if (ret)
c7ab48d2
DW
1988 printk(KERN_ERR "IOMMU: Failed to create 0-16MiB identity map; "
1989 "floppy might not work\n");
49a0429e
KA
1990
1991}
1992#else
1993static inline void iommu_prepare_isa(void)
1994{
1995 return;
1996}
1997#endif /* !CONFIG_DMAR_FLPY_WA */
1998
4ed0d3e6
FY
1999/* Initialize each context entry as pass through.*/
2000static int __init init_context_pass_through(void)
2001{
2002 struct pci_dev *pdev = NULL;
2003 struct dmar_domain *domain;
2004 int ret;
2005
2006 for_each_pci_dev(pdev) {
2007 domain = get_domain_for_dev(pdev, DEFAULT_DOMAIN_ADDRESS_WIDTH);
2008 ret = domain_context_mapping(domain, pdev,
2009 CONTEXT_TT_PASS_THROUGH);
2010 if (ret)
2011 return ret;
2012 }
2013 return 0;
2014}
2015
2c2e2c38 2016static int md_domain_init(struct dmar_domain *domain, int guest_width);
c7ab48d2
DW
2017
2018static int __init si_domain_work_fn(unsigned long start_pfn,
2019 unsigned long end_pfn, void *datax)
2020{
2021 int *ret = datax;
2022
2023 *ret = iommu_domain_identity_map(si_domain,
2024 (uint64_t)start_pfn << PAGE_SHIFT,
2025 (uint64_t)end_pfn << PAGE_SHIFT);
2026 return *ret;
2027
2028}
2029
2c2e2c38
FY
2030static int si_domain_init(void)
2031{
2032 struct dmar_drhd_unit *drhd;
2033 struct intel_iommu *iommu;
c7ab48d2 2034 int nid, ret = 0;
2c2e2c38
FY
2035
2036 si_domain = alloc_domain();
2037 if (!si_domain)
2038 return -EFAULT;
2039
c7ab48d2 2040 pr_debug("Identity mapping domain is domain %d\n", si_domain->id);
2c2e2c38
FY
2041
2042 for_each_active_iommu(iommu, drhd) {
2043 ret = iommu_attach_domain(si_domain, iommu);
2044 if (ret) {
2045 domain_exit(si_domain);
2046 return -EFAULT;
2047 }
2048 }
2049
2050 if (md_domain_init(si_domain, DEFAULT_DOMAIN_ADDRESS_WIDTH)) {
2051 domain_exit(si_domain);
2052 return -EFAULT;
2053 }
2054
2055 si_domain->flags = DOMAIN_FLAG_STATIC_IDENTITY;
2056
c7ab48d2
DW
2057 for_each_online_node(nid) {
2058 work_with_active_regions(nid, si_domain_work_fn, &ret);
2059 if (ret)
2060 return ret;
2061 }
2062
2c2e2c38
FY
2063 return 0;
2064}
2065
2066static void domain_remove_one_dev_info(struct dmar_domain *domain,
2067 struct pci_dev *pdev);
2068static int identity_mapping(struct pci_dev *pdev)
2069{
2070 struct device_domain_info *info;
2071
2072 if (likely(!iommu_identity_mapping))
2073 return 0;
2074
2075
2076 list_for_each_entry(info, &si_domain->devices, link)
2077 if (info->dev == pdev)
2078 return 1;
2079 return 0;
2080}
2081
2082static int domain_add_dev_info(struct dmar_domain *domain,
2083 struct pci_dev *pdev)
2084{
2085 struct device_domain_info *info;
2086 unsigned long flags;
2087
2088 info = alloc_devinfo_mem();
2089 if (!info)
2090 return -ENOMEM;
2091
2092 info->segment = pci_domain_nr(pdev->bus);
2093 info->bus = pdev->bus->number;
2094 info->devfn = pdev->devfn;
2095 info->dev = pdev;
2096 info->domain = domain;
2097
2098 spin_lock_irqsave(&device_domain_lock, flags);
2099 list_add(&info->link, &domain->devices);
2100 list_add(&info->global, &device_domain_list);
2101 pdev->dev.archdata.iommu = info;
2102 spin_unlock_irqrestore(&device_domain_lock, flags);
2103
2104 return 0;
2105}
2106
2107static int iommu_prepare_static_identity_mapping(void)
2108{
2c2e2c38
FY
2109 struct pci_dev *pdev = NULL;
2110 int ret;
2111
2112 ret = si_domain_init();
2113 if (ret)
2114 return -EFAULT;
2115
2c2e2c38 2116 for_each_pci_dev(pdev) {
c7ab48d2
DW
2117 printk(KERN_INFO "IOMMU: identity mapping for device %s\n",
2118 pci_name(pdev));
2119
2120 ret = domain_context_mapping(si_domain, pdev,
2121 CONTEXT_TT_MULTI_LEVEL);
2122 if (ret)
2123 return ret;
2c2e2c38
FY
2124 ret = domain_add_dev_info(si_domain, pdev);
2125 if (ret)
2126 return ret;
2127 }
2128
2129 return 0;
2130}
2131
2132int __init init_dmars(void)
ba395927
KA
2133{
2134 struct dmar_drhd_unit *drhd;
2135 struct dmar_rmrr_unit *rmrr;
2136 struct pci_dev *pdev;
2137 struct intel_iommu *iommu;
9d783ba0 2138 int i, ret;
4ed0d3e6 2139 int pass_through = 1;
ba395927 2140
2c2e2c38
FY
2141 /*
2142 * In case pass through can not be enabled, iommu tries to use identity
2143 * mapping.
2144 */
2145 if (iommu_pass_through)
2146 iommu_identity_mapping = 1;
2147
ba395927
KA
2148 /*
2149 * for each drhd
2150 * allocate root
2151 * initialize and program root entry to not present
2152 * endfor
2153 */
2154 for_each_drhd_unit(drhd) {
5e0d2a6f 2155 g_num_of_iommus++;
2156 /*
2157 * lock not needed as this is only incremented in the single
2158 * threaded kernel __init code path all other access are read
2159 * only
2160 */
2161 }
2162
d9630fe9
WH
2163 g_iommus = kcalloc(g_num_of_iommus, sizeof(struct intel_iommu *),
2164 GFP_KERNEL);
2165 if (!g_iommus) {
2166 printk(KERN_ERR "Allocating global iommu array failed\n");
2167 ret = -ENOMEM;
2168 goto error;
2169 }
2170
80b20dd8 2171 deferred_flush = kzalloc(g_num_of_iommus *
2172 sizeof(struct deferred_flush_tables), GFP_KERNEL);
2173 if (!deferred_flush) {
d9630fe9 2174 kfree(g_iommus);
5e0d2a6f 2175 ret = -ENOMEM;
2176 goto error;
2177 }
2178
5e0d2a6f 2179 for_each_drhd_unit(drhd) {
2180 if (drhd->ignored)
2181 continue;
1886e8a9
SS
2182
2183 iommu = drhd->iommu;
d9630fe9 2184 g_iommus[iommu->seq_id] = iommu;
ba395927 2185
e61d98d8
SS
2186 ret = iommu_init_domains(iommu);
2187 if (ret)
2188 goto error;
2189
ba395927
KA
2190 /*
2191 * TBD:
2192 * we could share the same root & context tables
2193 * amoung all IOMMU's. Need to Split it later.
2194 */
2195 ret = iommu_alloc_root_entry(iommu);
2196 if (ret) {
2197 printk(KERN_ERR "IOMMU: allocate root entry failed\n");
2198 goto error;
2199 }
4ed0d3e6
FY
2200 if (!ecap_pass_through(iommu->ecap))
2201 pass_through = 0;
ba395927 2202 }
4ed0d3e6
FY
2203 if (iommu_pass_through)
2204 if (!pass_through) {
2205 printk(KERN_INFO
2206 "Pass Through is not supported by hardware.\n");
2207 iommu_pass_through = 0;
2208 }
ba395927 2209
1531a6a6
SS
2210 /*
2211 * Start from the sane iommu hardware state.
2212 */
a77b67d4
YS
2213 for_each_drhd_unit(drhd) {
2214 if (drhd->ignored)
2215 continue;
2216
2217 iommu = drhd->iommu;
1531a6a6
SS
2218
2219 /*
2220 * If the queued invalidation is already initialized by us
2221 * (for example, while enabling interrupt-remapping) then
2222 * we got the things already rolling from a sane state.
2223 */
2224 if (iommu->qi)
2225 continue;
2226
2227 /*
2228 * Clear any previous faults.
2229 */
2230 dmar_fault(-1, iommu);
2231 /*
2232 * Disable queued invalidation if supported and already enabled
2233 * before OS handover.
2234 */
2235 dmar_disable_qi(iommu);
2236 }
2237
2238 for_each_drhd_unit(drhd) {
2239 if (drhd->ignored)
2240 continue;
2241
2242 iommu = drhd->iommu;
2243
a77b67d4
YS
2244 if (dmar_enable_qi(iommu)) {
2245 /*
2246 * Queued Invalidate not enabled, use Register Based
2247 * Invalidate
2248 */
2249 iommu->flush.flush_context = __iommu_flush_context;
2250 iommu->flush.flush_iotlb = __iommu_flush_iotlb;
2251 printk(KERN_INFO "IOMMU 0x%Lx: using Register based "
b4e0f9eb
FT
2252 "invalidation\n",
2253 (unsigned long long)drhd->reg_base_addr);
a77b67d4
YS
2254 } else {
2255 iommu->flush.flush_context = qi_flush_context;
2256 iommu->flush.flush_iotlb = qi_flush_iotlb;
2257 printk(KERN_INFO "IOMMU 0x%Lx: using Queued "
b4e0f9eb
FT
2258 "invalidation\n",
2259 (unsigned long long)drhd->reg_base_addr);
a77b67d4
YS
2260 }
2261 }
2262
ba395927 2263 /*
4ed0d3e6
FY
2264 * If pass through is set and enabled, context entries of all pci
2265 * devices are intialized by pass through translation type.
ba395927 2266 */
4ed0d3e6
FY
2267 if (iommu_pass_through) {
2268 ret = init_context_pass_through();
2269 if (ret) {
2270 printk(KERN_ERR "IOMMU: Pass through init failed.\n");
2271 iommu_pass_through = 0;
ba395927
KA
2272 }
2273 }
2274
ba395927 2275 /*
4ed0d3e6 2276 * If pass through is not set or not enabled, setup context entries for
2c2e2c38
FY
2277 * identity mappings for rmrr, gfx, and isa and may fall back to static
2278 * identity mapping if iommu_identity_mapping is set.
ba395927 2279 */
4ed0d3e6 2280 if (!iommu_pass_through) {
2c2e2c38
FY
2281 if (iommu_identity_mapping)
2282 iommu_prepare_static_identity_mapping();
4ed0d3e6
FY
2283 /*
2284 * For each rmrr
2285 * for each dev attached to rmrr
2286 * do
2287 * locate drhd for dev, alloc domain for dev
2288 * allocate free domain
2289 * allocate page table entries for rmrr
2290 * if context not allocated for bus
2291 * allocate and init context
2292 * set present in root table for this bus
2293 * init context with domain, translation etc
2294 * endfor
2295 * endfor
2296 */
2c2e2c38 2297 printk(KERN_INFO "IOMMU: Setting RMRR:\n");
4ed0d3e6
FY
2298 for_each_rmrr_units(rmrr) {
2299 for (i = 0; i < rmrr->devices_cnt; i++) {
2300 pdev = rmrr->devices[i];
2301 /*
2302 * some BIOS lists non-exist devices in DMAR
2303 * table.
2304 */
2305 if (!pdev)
2306 continue;
2307 ret = iommu_prepare_rmrr_dev(rmrr, pdev);
2308 if (ret)
2309 printk(KERN_ERR
ba395927 2310 "IOMMU: mapping reserved region failed\n");
4ed0d3e6 2311 }
ba395927 2312 }
ba395927 2313
4ed0d3e6
FY
2314 iommu_prepare_isa();
2315 }
49a0429e 2316
ba395927
KA
2317 /*
2318 * for each drhd
2319 * enable fault log
2320 * global invalidate context cache
2321 * global invalidate iotlb
2322 * enable translation
2323 */
2324 for_each_drhd_unit(drhd) {
2325 if (drhd->ignored)
2326 continue;
2327 iommu = drhd->iommu;
ba395927
KA
2328
2329 iommu_flush_write_buffer(iommu);
2330
3460a6d9
KA
2331 ret = dmar_set_interrupt(iommu);
2332 if (ret)
2333 goto error;
2334
ba395927
KA
2335 iommu_set_root_entry(iommu);
2336
4c25a2c1 2337 iommu->flush.flush_context(iommu, 0, 0, 0, DMA_CCMD_GLOBAL_INVL);
1f0ef2aa 2338 iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
f8bab735 2339 iommu_disable_protect_mem_regions(iommu);
2340
ba395927
KA
2341 ret = iommu_enable_translation(iommu);
2342 if (ret)
2343 goto error;
2344 }
2345
2346 return 0;
2347error:
2348 for_each_drhd_unit(drhd) {
2349 if (drhd->ignored)
2350 continue;
2351 iommu = drhd->iommu;
2352 free_iommu(iommu);
2353 }
d9630fe9 2354 kfree(g_iommus);
ba395927
KA
2355 return ret;
2356}
2357
88cb6a74
DW
2358static inline unsigned long aligned_nrpages(unsigned long host_addr,
2359 size_t size)
ba395927 2360{
88cb6a74
DW
2361 host_addr &= ~PAGE_MASK;
2362 host_addr += size + PAGE_SIZE - 1;
2363
2364 return host_addr >> VTD_PAGE_SHIFT;
ba395927
KA
2365}
2366
875764de
DW
2367static struct iova *intel_alloc_iova(struct device *dev,
2368 struct dmar_domain *domain,
2369 unsigned long nrpages, uint64_t dma_mask)
ba395927 2370{
ba395927 2371 struct pci_dev *pdev = to_pci_dev(dev);
ba395927 2372 struct iova *iova = NULL;
ba395927 2373
875764de
DW
2374 /* Restrict dma_mask to the width that the iommu can handle */
2375 dma_mask = min_t(uint64_t, DOMAIN_MAX_ADDR(domain->gaw), dma_mask);
2376
2377 if (!dmar_forcedac && dma_mask > DMA_BIT_MASK(32)) {
ba395927
KA
2378 /*
2379 * First try to allocate an io virtual address in
284901a9 2380 * DMA_BIT_MASK(32) and if that fails then try allocating
3609801e 2381 * from higher range
ba395927 2382 */
875764de
DW
2383 iova = alloc_iova(&domain->iovad, nrpages,
2384 IOVA_PFN(DMA_BIT_MASK(32)), 1);
2385 if (iova)
2386 return iova;
2387 }
2388 iova = alloc_iova(&domain->iovad, nrpages, IOVA_PFN(dma_mask), 1);
2389 if (unlikely(!iova)) {
2390 printk(KERN_ERR "Allocating %ld-page iova for %s failed",
2391 nrpages, pci_name(pdev));
f76aec76
KA
2392 return NULL;
2393 }
2394
2395 return iova;
2396}
2397
2398static struct dmar_domain *
2399get_valid_domain_for_dev(struct pci_dev *pdev)
2400{
2401 struct dmar_domain *domain;
2402 int ret;
2403
2404 domain = get_domain_for_dev(pdev,
2405 DEFAULT_DOMAIN_ADDRESS_WIDTH);
2406 if (!domain) {
2407 printk(KERN_ERR
2408 "Allocating domain for %s failed", pci_name(pdev));
4fe05bbc 2409 return NULL;
ba395927
KA
2410 }
2411
2412 /* make sure context mapping is ok */
5331fe6f 2413 if (unlikely(!domain_context_mapped(pdev))) {
4ed0d3e6
FY
2414 ret = domain_context_mapping(domain, pdev,
2415 CONTEXT_TT_MULTI_LEVEL);
f76aec76
KA
2416 if (ret) {
2417 printk(KERN_ERR
2418 "Domain context map for %s failed",
2419 pci_name(pdev));
4fe05bbc 2420 return NULL;
f76aec76 2421 }
ba395927
KA
2422 }
2423
f76aec76
KA
2424 return domain;
2425}
2426
2c2e2c38
FY
2427static int iommu_dummy(struct pci_dev *pdev)
2428{
2429 return pdev->dev.archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO;
2430}
2431
2432/* Check if the pdev needs to go through non-identity map and unmap process.*/
2433static int iommu_no_mapping(struct pci_dev *pdev)
2434{
2435 int found;
2436
2437 if (!iommu_identity_mapping)
2438 return iommu_dummy(pdev);
2439
2440 found = identity_mapping(pdev);
2441 if (found) {
2442 if (pdev->dma_mask > DMA_BIT_MASK(32))
2443 return 1;
2444 else {
2445 /*
2446 * 32 bit DMA is removed from si_domain and fall back
2447 * to non-identity mapping.
2448 */
2449 domain_remove_one_dev_info(si_domain, pdev);
2450 printk(KERN_INFO "32bit %s uses non-identity mapping\n",
2451 pci_name(pdev));
2452 return 0;
2453 }
2454 } else {
2455 /*
2456 * In case of a detached 64 bit DMA device from vm, the device
2457 * is put into si_domain for identity mapping.
2458 */
2459 if (pdev->dma_mask > DMA_BIT_MASK(32)) {
2460 int ret;
2461 ret = domain_add_dev_info(si_domain, pdev);
2462 if (!ret) {
2463 printk(KERN_INFO "64bit %s uses identity mapping\n",
2464 pci_name(pdev));
2465 return 1;
2466 }
2467 }
2468 }
2469
2470 return iommu_dummy(pdev);
2471}
2472
bb9e6d65
FT
2473static dma_addr_t __intel_map_single(struct device *hwdev, phys_addr_t paddr,
2474 size_t size, int dir, u64 dma_mask)
f76aec76
KA
2475{
2476 struct pci_dev *pdev = to_pci_dev(hwdev);
f76aec76 2477 struct dmar_domain *domain;
5b6985ce 2478 phys_addr_t start_paddr;
f76aec76
KA
2479 struct iova *iova;
2480 int prot = 0;
6865f0d1 2481 int ret;
8c11e798 2482 struct intel_iommu *iommu;
f76aec76
KA
2483
2484 BUG_ON(dir == DMA_NONE);
2c2e2c38
FY
2485
2486 if (iommu_no_mapping(pdev))
6865f0d1 2487 return paddr;
f76aec76
KA
2488
2489 domain = get_valid_domain_for_dev(pdev);
2490 if (!domain)
2491 return 0;
2492
8c11e798 2493 iommu = domain_get_iommu(domain);
88cb6a74 2494 size = aligned_nrpages(paddr, size);
f76aec76 2495
875764de 2496 iova = intel_alloc_iova(hwdev, domain, size, pdev->dma_mask);
f76aec76
KA
2497 if (!iova)
2498 goto error;
2499
ba395927
KA
2500 /*
2501 * Check if DMAR supports zero-length reads on write only
2502 * mappings..
2503 */
2504 if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL || \
8c11e798 2505 !cap_zlr(iommu->cap))
ba395927
KA
2506 prot |= DMA_PTE_READ;
2507 if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL)
2508 prot |= DMA_PTE_WRITE;
2509 /*
6865f0d1 2510 * paddr - (paddr + size) might be partial page, we should map the whole
ba395927 2511 * page. Note: if two part of one page are separately mapped, we
6865f0d1 2512 * might have two guest_addr mapping to the same host paddr, but this
ba395927
KA
2513 * is not a big problem
2514 */
0ab36de2
DW
2515 ret = domain_pfn_mapping(domain, mm_to_dma_pfn(iova->pfn_lo),
2516 paddr >> VTD_PAGE_SHIFT, size, prot);
ba395927
KA
2517 if (ret)
2518 goto error;
2519
1f0ef2aa
DW
2520 /* it's a non-present to present mapping. Only flush if caching mode */
2521 if (cap_caching_mode(iommu->cap))
03d6a246 2522 iommu_flush_iotlb_psi(iommu, 0, mm_to_dma_pfn(iova->pfn_lo), size);
1f0ef2aa 2523 else
8c11e798 2524 iommu_flush_write_buffer(iommu);
f76aec76 2525
03d6a246
DW
2526 start_paddr = (phys_addr_t)iova->pfn_lo << PAGE_SHIFT;
2527 start_paddr += paddr & ~PAGE_MASK;
2528 return start_paddr;
ba395927 2529
ba395927 2530error:
f76aec76
KA
2531 if (iova)
2532 __free_iova(&domain->iovad, iova);
4cf2e75d 2533 printk(KERN_ERR"Device %s request: %zx@%llx dir %d --- failed\n",
5b6985ce 2534 pci_name(pdev), size, (unsigned long long)paddr, dir);
ba395927
KA
2535 return 0;
2536}
2537
ffbbef5c
FT
2538static dma_addr_t intel_map_page(struct device *dev, struct page *page,
2539 unsigned long offset, size_t size,
2540 enum dma_data_direction dir,
2541 struct dma_attrs *attrs)
bb9e6d65 2542{
ffbbef5c
FT
2543 return __intel_map_single(dev, page_to_phys(page) + offset, size,
2544 dir, to_pci_dev(dev)->dma_mask);
bb9e6d65
FT
2545}
2546
5e0d2a6f 2547static void flush_unmaps(void)
2548{
80b20dd8 2549 int i, j;
5e0d2a6f 2550
5e0d2a6f 2551 timer_on = 0;
2552
2553 /* just flush them all */
2554 for (i = 0; i < g_num_of_iommus; i++) {
a2bb8459
WH
2555 struct intel_iommu *iommu = g_iommus[i];
2556 if (!iommu)
2557 continue;
c42d9f32 2558
9dd2fe89
YZ
2559 if (!deferred_flush[i].next)
2560 continue;
2561
2562 iommu->flush.flush_iotlb(iommu, 0, 0, 0,
93a23a72 2563 DMA_TLB_GLOBAL_FLUSH);
9dd2fe89 2564 for (j = 0; j < deferred_flush[i].next; j++) {
93a23a72
YZ
2565 unsigned long mask;
2566 struct iova *iova = deferred_flush[i].iova[j];
2567
2568 mask = (iova->pfn_hi - iova->pfn_lo + 1) << PAGE_SHIFT;
2569 mask = ilog2(mask >> VTD_PAGE_SHIFT);
2570 iommu_flush_dev_iotlb(deferred_flush[i].domain[j],
2571 iova->pfn_lo << PAGE_SHIFT, mask);
2572 __free_iova(&deferred_flush[i].domain[j]->iovad, iova);
80b20dd8 2573 }
9dd2fe89 2574 deferred_flush[i].next = 0;
5e0d2a6f 2575 }
2576
5e0d2a6f 2577 list_size = 0;
5e0d2a6f 2578}
2579
2580static void flush_unmaps_timeout(unsigned long data)
2581{
80b20dd8 2582 unsigned long flags;
2583
2584 spin_lock_irqsave(&async_umap_flush_lock, flags);
5e0d2a6f 2585 flush_unmaps();
80b20dd8 2586 spin_unlock_irqrestore(&async_umap_flush_lock, flags);
5e0d2a6f 2587}
2588
2589static void add_unmap(struct dmar_domain *dom, struct iova *iova)
2590{
2591 unsigned long flags;
80b20dd8 2592 int next, iommu_id;
8c11e798 2593 struct intel_iommu *iommu;
5e0d2a6f 2594
2595 spin_lock_irqsave(&async_umap_flush_lock, flags);
80b20dd8 2596 if (list_size == HIGH_WATER_MARK)
2597 flush_unmaps();
2598
8c11e798
WH
2599 iommu = domain_get_iommu(dom);
2600 iommu_id = iommu->seq_id;
c42d9f32 2601
80b20dd8 2602 next = deferred_flush[iommu_id].next;
2603 deferred_flush[iommu_id].domain[next] = dom;
2604 deferred_flush[iommu_id].iova[next] = iova;
2605 deferred_flush[iommu_id].next++;
5e0d2a6f 2606
2607 if (!timer_on) {
2608 mod_timer(&unmap_timer, jiffies + msecs_to_jiffies(10));
2609 timer_on = 1;
2610 }
2611 list_size++;
2612 spin_unlock_irqrestore(&async_umap_flush_lock, flags);
2613}
2614
ffbbef5c
FT
2615static void intel_unmap_page(struct device *dev, dma_addr_t dev_addr,
2616 size_t size, enum dma_data_direction dir,
2617 struct dma_attrs *attrs)
ba395927 2618{
ba395927 2619 struct pci_dev *pdev = to_pci_dev(dev);
f76aec76 2620 struct dmar_domain *domain;
d794dc9b 2621 unsigned long start_pfn, last_pfn;
ba395927 2622 struct iova *iova;
8c11e798 2623 struct intel_iommu *iommu;
ba395927 2624
2c2e2c38 2625 if (iommu_no_mapping(pdev))
f76aec76 2626 return;
2c2e2c38 2627
ba395927
KA
2628 domain = find_domain(pdev);
2629 BUG_ON(!domain);
2630
8c11e798
WH
2631 iommu = domain_get_iommu(domain);
2632
ba395927 2633 iova = find_iova(&domain->iovad, IOVA_PFN(dev_addr));
f76aec76 2634 if (!iova)
ba395927 2635 return;
ba395927 2636
d794dc9b
DW
2637 start_pfn = mm_to_dma_pfn(iova->pfn_lo);
2638 last_pfn = mm_to_dma_pfn(iova->pfn_hi + 1) - 1;
ba395927 2639
d794dc9b
DW
2640 pr_debug("Device %s unmapping: pfn %lx-%lx\n",
2641 pci_name(pdev), start_pfn, last_pfn);
ba395927 2642
f76aec76 2643 /* clear the whole page */
d794dc9b
DW
2644 dma_pte_clear_range(domain, start_pfn, last_pfn);
2645
f76aec76 2646 /* free page tables */
d794dc9b
DW
2647 dma_pte_free_pagetable(domain, start_pfn, last_pfn);
2648
5e0d2a6f 2649 if (intel_iommu_strict) {
03d6a246 2650 iommu_flush_iotlb_psi(iommu, domain->id, start_pfn,
d794dc9b 2651 last_pfn - start_pfn + 1);
5e0d2a6f 2652 /* free iova */
2653 __free_iova(&domain->iovad, iova);
2654 } else {
2655 add_unmap(domain, iova);
2656 /*
2657 * queue up the release of the unmap to save the 1/6th of the
2658 * cpu used up by the iotlb flush operation...
2659 */
5e0d2a6f 2660 }
ba395927
KA
2661}
2662
d7ab5c46
FT
2663static void intel_unmap_single(struct device *dev, dma_addr_t dev_addr, size_t size,
2664 int dir)
ffbbef5c
FT
2665{
2666 intel_unmap_page(dev, dev_addr, size, dir, NULL);
2667}
2668
d7ab5c46
FT
2669static void *intel_alloc_coherent(struct device *hwdev, size_t size,
2670 dma_addr_t *dma_handle, gfp_t flags)
ba395927
KA
2671{
2672 void *vaddr;
2673 int order;
2674
5b6985ce 2675 size = PAGE_ALIGN(size);
ba395927
KA
2676 order = get_order(size);
2677 flags &= ~(GFP_DMA | GFP_DMA32);
2678
2679 vaddr = (void *)__get_free_pages(flags, order);
2680 if (!vaddr)
2681 return NULL;
2682 memset(vaddr, 0, size);
2683
bb9e6d65
FT
2684 *dma_handle = __intel_map_single(hwdev, virt_to_bus(vaddr), size,
2685 DMA_BIDIRECTIONAL,
2686 hwdev->coherent_dma_mask);
ba395927
KA
2687 if (*dma_handle)
2688 return vaddr;
2689 free_pages((unsigned long)vaddr, order);
2690 return NULL;
2691}
2692
d7ab5c46
FT
2693static void intel_free_coherent(struct device *hwdev, size_t size, void *vaddr,
2694 dma_addr_t dma_handle)
ba395927
KA
2695{
2696 int order;
2697
5b6985ce 2698 size = PAGE_ALIGN(size);
ba395927
KA
2699 order = get_order(size);
2700
2701 intel_unmap_single(hwdev, dma_handle, size, DMA_BIDIRECTIONAL);
2702 free_pages((unsigned long)vaddr, order);
2703}
2704
d7ab5c46
FT
2705static void intel_unmap_sg(struct device *hwdev, struct scatterlist *sglist,
2706 int nelems, enum dma_data_direction dir,
2707 struct dma_attrs *attrs)
ba395927 2708{
ba395927
KA
2709 struct pci_dev *pdev = to_pci_dev(hwdev);
2710 struct dmar_domain *domain;
d794dc9b 2711 unsigned long start_pfn, last_pfn;
f76aec76 2712 struct iova *iova;
8c11e798 2713 struct intel_iommu *iommu;
ba395927 2714
2c2e2c38 2715 if (iommu_no_mapping(pdev))
ba395927
KA
2716 return;
2717
2718 domain = find_domain(pdev);
8c11e798
WH
2719 BUG_ON(!domain);
2720
2721 iommu = domain_get_iommu(domain);
ba395927 2722
c03ab37c 2723 iova = find_iova(&domain->iovad, IOVA_PFN(sglist[0].dma_address));
f76aec76
KA
2724 if (!iova)
2725 return;
f76aec76 2726
d794dc9b
DW
2727 start_pfn = mm_to_dma_pfn(iova->pfn_lo);
2728 last_pfn = mm_to_dma_pfn(iova->pfn_hi + 1) - 1;
f76aec76
KA
2729
2730 /* clear the whole page */
d794dc9b
DW
2731 dma_pte_clear_range(domain, start_pfn, last_pfn);
2732
f76aec76 2733 /* free page tables */
d794dc9b 2734 dma_pte_free_pagetable(domain, start_pfn, last_pfn);
f76aec76 2735
03d6a246 2736 iommu_flush_iotlb_psi(iommu, domain->id, start_pfn,
d794dc9b 2737 (last_pfn - start_pfn + 1));
f76aec76
KA
2738
2739 /* free iova */
2740 __free_iova(&domain->iovad, iova);
ba395927
KA
2741}
2742
ba395927 2743static int intel_nontranslate_map_sg(struct device *hddev,
c03ab37c 2744 struct scatterlist *sglist, int nelems, int dir)
ba395927
KA
2745{
2746 int i;
c03ab37c 2747 struct scatterlist *sg;
ba395927 2748
c03ab37c 2749 for_each_sg(sglist, sg, nelems, i) {
12d4d40e 2750 BUG_ON(!sg_page(sg));
4cf2e75d 2751 sg->dma_address = page_to_phys(sg_page(sg)) + sg->offset;
c03ab37c 2752 sg->dma_length = sg->length;
ba395927
KA
2753 }
2754 return nelems;
2755}
2756
d7ab5c46
FT
2757static int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, int nelems,
2758 enum dma_data_direction dir, struct dma_attrs *attrs)
ba395927 2759{
ba395927 2760 int i;
ba395927
KA
2761 struct pci_dev *pdev = to_pci_dev(hwdev);
2762 struct dmar_domain *domain;
f76aec76
KA
2763 size_t size = 0;
2764 int prot = 0;
b536d24d 2765 size_t offset_pfn = 0;
f76aec76
KA
2766 struct iova *iova = NULL;
2767 int ret;
c03ab37c 2768 struct scatterlist *sg;
b536d24d 2769 unsigned long start_vpfn;
8c11e798 2770 struct intel_iommu *iommu;
ba395927
KA
2771
2772 BUG_ON(dir == DMA_NONE);
2c2e2c38 2773 if (iommu_no_mapping(pdev))
c03ab37c 2774 return intel_nontranslate_map_sg(hwdev, sglist, nelems, dir);
ba395927 2775
f76aec76
KA
2776 domain = get_valid_domain_for_dev(pdev);
2777 if (!domain)
2778 return 0;
2779
8c11e798
WH
2780 iommu = domain_get_iommu(domain);
2781
b536d24d 2782 for_each_sg(sglist, sg, nelems, i)
88cb6a74 2783 size += aligned_nrpages(sg->offset, sg->length);
f76aec76 2784
875764de 2785 iova = intel_alloc_iova(hwdev, domain, size, pdev->dma_mask);
f76aec76 2786 if (!iova) {
c03ab37c 2787 sglist->dma_length = 0;
f76aec76
KA
2788 return 0;
2789 }
2790
2791 /*
2792 * Check if DMAR supports zero-length reads on write only
2793 * mappings..
2794 */
2795 if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL || \
8c11e798 2796 !cap_zlr(iommu->cap))
f76aec76
KA
2797 prot |= DMA_PTE_READ;
2798 if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL)
2799 prot |= DMA_PTE_WRITE;
2800
b536d24d 2801 start_vpfn = mm_to_dma_pfn(iova->pfn_lo);
e1605495
DW
2802
2803 ret = domain_sg_mapping(domain, start_vpfn, sglist, mm_to_dma_pfn(size), prot);
2804 if (unlikely(ret)) {
2805 /* clear the page */
2806 dma_pte_clear_range(domain, start_vpfn,
2807 start_vpfn + size - 1);
2808 /* free page tables */
2809 dma_pte_free_pagetable(domain, start_vpfn,
2810 start_vpfn + size - 1);
2811 /* free iova */
2812 __free_iova(&domain->iovad, iova);
2813 return 0;
ba395927
KA
2814 }
2815
1f0ef2aa
DW
2816 /* it's a non-present to present mapping. Only flush if caching mode */
2817 if (cap_caching_mode(iommu->cap))
03d6a246 2818 iommu_flush_iotlb_psi(iommu, 0, start_vpfn, offset_pfn);
1f0ef2aa 2819 else
8c11e798 2820 iommu_flush_write_buffer(iommu);
1f0ef2aa 2821
ba395927
KA
2822 return nelems;
2823}
2824
dfb805e8
FT
2825static int intel_mapping_error(struct device *dev, dma_addr_t dma_addr)
2826{
2827 return !dma_addr;
2828}
2829
160c1d8e 2830struct dma_map_ops intel_dma_ops = {
ba395927
KA
2831 .alloc_coherent = intel_alloc_coherent,
2832 .free_coherent = intel_free_coherent,
ba395927
KA
2833 .map_sg = intel_map_sg,
2834 .unmap_sg = intel_unmap_sg,
ffbbef5c
FT
2835 .map_page = intel_map_page,
2836 .unmap_page = intel_unmap_page,
dfb805e8 2837 .mapping_error = intel_mapping_error,
ba395927
KA
2838};
2839
2840static inline int iommu_domain_cache_init(void)
2841{
2842 int ret = 0;
2843
2844 iommu_domain_cache = kmem_cache_create("iommu_domain",
2845 sizeof(struct dmar_domain),
2846 0,
2847 SLAB_HWCACHE_ALIGN,
2848
2849 NULL);
2850 if (!iommu_domain_cache) {
2851 printk(KERN_ERR "Couldn't create iommu_domain cache\n");
2852 ret = -ENOMEM;
2853 }
2854
2855 return ret;
2856}
2857
2858static inline int iommu_devinfo_cache_init(void)
2859{
2860 int ret = 0;
2861
2862 iommu_devinfo_cache = kmem_cache_create("iommu_devinfo",
2863 sizeof(struct device_domain_info),
2864 0,
2865 SLAB_HWCACHE_ALIGN,
ba395927
KA
2866 NULL);
2867 if (!iommu_devinfo_cache) {
2868 printk(KERN_ERR "Couldn't create devinfo cache\n");
2869 ret = -ENOMEM;
2870 }
2871
2872 return ret;
2873}
2874
2875static inline int iommu_iova_cache_init(void)
2876{
2877 int ret = 0;
2878
2879 iommu_iova_cache = kmem_cache_create("iommu_iova",
2880 sizeof(struct iova),
2881 0,
2882 SLAB_HWCACHE_ALIGN,
ba395927
KA
2883 NULL);
2884 if (!iommu_iova_cache) {
2885 printk(KERN_ERR "Couldn't create iova cache\n");
2886 ret = -ENOMEM;
2887 }
2888
2889 return ret;
2890}
2891
2892static int __init iommu_init_mempool(void)
2893{
2894 int ret;
2895 ret = iommu_iova_cache_init();
2896 if (ret)
2897 return ret;
2898
2899 ret = iommu_domain_cache_init();
2900 if (ret)
2901 goto domain_error;
2902
2903 ret = iommu_devinfo_cache_init();
2904 if (!ret)
2905 return ret;
2906
2907 kmem_cache_destroy(iommu_domain_cache);
2908domain_error:
2909 kmem_cache_destroy(iommu_iova_cache);
2910
2911 return -ENOMEM;
2912}
2913
2914static void __init iommu_exit_mempool(void)
2915{
2916 kmem_cache_destroy(iommu_devinfo_cache);
2917 kmem_cache_destroy(iommu_domain_cache);
2918 kmem_cache_destroy(iommu_iova_cache);
2919
2920}
2921
ba395927
KA
2922static void __init init_no_remapping_devices(void)
2923{
2924 struct dmar_drhd_unit *drhd;
2925
2926 for_each_drhd_unit(drhd) {
2927 if (!drhd->include_all) {
2928 int i;
2929 for (i = 0; i < drhd->devices_cnt; i++)
2930 if (drhd->devices[i] != NULL)
2931 break;
2932 /* ignore DMAR unit if no pci devices exist */
2933 if (i == drhd->devices_cnt)
2934 drhd->ignored = 1;
2935 }
2936 }
2937
2938 if (dmar_map_gfx)
2939 return;
2940
2941 for_each_drhd_unit(drhd) {
2942 int i;
2943 if (drhd->ignored || drhd->include_all)
2944 continue;
2945
2946 for (i = 0; i < drhd->devices_cnt; i++)
2947 if (drhd->devices[i] &&
2948 !IS_GFX_DEVICE(drhd->devices[i]))
2949 break;
2950
2951 if (i < drhd->devices_cnt)
2952 continue;
2953
2954 /* bypass IOMMU if it is just for gfx devices */
2955 drhd->ignored = 1;
2956 for (i = 0; i < drhd->devices_cnt; i++) {
2957 if (!drhd->devices[i])
2958 continue;
358dd8ac 2959 drhd->devices[i]->dev.archdata.iommu = DUMMY_DEVICE_DOMAIN_INFO;
ba395927
KA
2960 }
2961 }
2962}
2963
f59c7b69
FY
2964#ifdef CONFIG_SUSPEND
2965static int init_iommu_hw(void)
2966{
2967 struct dmar_drhd_unit *drhd;
2968 struct intel_iommu *iommu = NULL;
2969
2970 for_each_active_iommu(iommu, drhd)
2971 if (iommu->qi)
2972 dmar_reenable_qi(iommu);
2973
2974 for_each_active_iommu(iommu, drhd) {
2975 iommu_flush_write_buffer(iommu);
2976
2977 iommu_set_root_entry(iommu);
2978
2979 iommu->flush.flush_context(iommu, 0, 0, 0,
1f0ef2aa 2980 DMA_CCMD_GLOBAL_INVL);
f59c7b69 2981 iommu->flush.flush_iotlb(iommu, 0, 0, 0,
1f0ef2aa 2982 DMA_TLB_GLOBAL_FLUSH);
f59c7b69
FY
2983 iommu_disable_protect_mem_regions(iommu);
2984 iommu_enable_translation(iommu);
2985 }
2986
2987 return 0;
2988}
2989
2990static void iommu_flush_all(void)
2991{
2992 struct dmar_drhd_unit *drhd;
2993 struct intel_iommu *iommu;
2994
2995 for_each_active_iommu(iommu, drhd) {
2996 iommu->flush.flush_context(iommu, 0, 0, 0,
1f0ef2aa 2997 DMA_CCMD_GLOBAL_INVL);
f59c7b69 2998 iommu->flush.flush_iotlb(iommu, 0, 0, 0,
1f0ef2aa 2999 DMA_TLB_GLOBAL_FLUSH);
f59c7b69
FY
3000 }
3001}
3002
3003static int iommu_suspend(struct sys_device *dev, pm_message_t state)
3004{
3005 struct dmar_drhd_unit *drhd;
3006 struct intel_iommu *iommu = NULL;
3007 unsigned long flag;
3008
3009 for_each_active_iommu(iommu, drhd) {
3010 iommu->iommu_state = kzalloc(sizeof(u32) * MAX_SR_DMAR_REGS,
3011 GFP_ATOMIC);
3012 if (!iommu->iommu_state)
3013 goto nomem;
3014 }
3015
3016 iommu_flush_all();
3017
3018 for_each_active_iommu(iommu, drhd) {
3019 iommu_disable_translation(iommu);
3020
3021 spin_lock_irqsave(&iommu->register_lock, flag);
3022
3023 iommu->iommu_state[SR_DMAR_FECTL_REG] =
3024 readl(iommu->reg + DMAR_FECTL_REG);
3025 iommu->iommu_state[SR_DMAR_FEDATA_REG] =
3026 readl(iommu->reg + DMAR_FEDATA_REG);
3027 iommu->iommu_state[SR_DMAR_FEADDR_REG] =
3028 readl(iommu->reg + DMAR_FEADDR_REG);
3029 iommu->iommu_state[SR_DMAR_FEUADDR_REG] =
3030 readl(iommu->reg + DMAR_FEUADDR_REG);
3031
3032 spin_unlock_irqrestore(&iommu->register_lock, flag);
3033 }
3034 return 0;
3035
3036nomem:
3037 for_each_active_iommu(iommu, drhd)
3038 kfree(iommu->iommu_state);
3039
3040 return -ENOMEM;
3041}
3042
3043static int iommu_resume(struct sys_device *dev)
3044{
3045 struct dmar_drhd_unit *drhd;
3046 struct intel_iommu *iommu = NULL;
3047 unsigned long flag;
3048
3049 if (init_iommu_hw()) {
3050 WARN(1, "IOMMU setup failed, DMAR can not resume!\n");
3051 return -EIO;
3052 }
3053
3054 for_each_active_iommu(iommu, drhd) {
3055
3056 spin_lock_irqsave(&iommu->register_lock, flag);
3057
3058 writel(iommu->iommu_state[SR_DMAR_FECTL_REG],
3059 iommu->reg + DMAR_FECTL_REG);
3060 writel(iommu->iommu_state[SR_DMAR_FEDATA_REG],
3061 iommu->reg + DMAR_FEDATA_REG);
3062 writel(iommu->iommu_state[SR_DMAR_FEADDR_REG],
3063 iommu->reg + DMAR_FEADDR_REG);
3064 writel(iommu->iommu_state[SR_DMAR_FEUADDR_REG],
3065 iommu->reg + DMAR_FEUADDR_REG);
3066
3067 spin_unlock_irqrestore(&iommu->register_lock, flag);
3068 }
3069
3070 for_each_active_iommu(iommu, drhd)
3071 kfree(iommu->iommu_state);
3072
3073 return 0;
3074}
3075
3076static struct sysdev_class iommu_sysclass = {
3077 .name = "iommu",
3078 .resume = iommu_resume,
3079 .suspend = iommu_suspend,
3080};
3081
3082static struct sys_device device_iommu = {
3083 .cls = &iommu_sysclass,
3084};
3085
3086static int __init init_iommu_sysfs(void)
3087{
3088 int error;
3089
3090 error = sysdev_class_register(&iommu_sysclass);
3091 if (error)
3092 return error;
3093
3094 error = sysdev_register(&device_iommu);
3095 if (error)
3096 sysdev_class_unregister(&iommu_sysclass);
3097
3098 return error;
3099}
3100
3101#else
3102static int __init init_iommu_sysfs(void)
3103{
3104 return 0;
3105}
3106#endif /* CONFIG_PM */
3107
ba395927
KA
3108int __init intel_iommu_init(void)
3109{
3110 int ret = 0;
3111
ba395927
KA
3112 if (dmar_table_init())
3113 return -ENODEV;
3114
1886e8a9
SS
3115 if (dmar_dev_scope_init())
3116 return -ENODEV;
3117
2ae21010
SS
3118 /*
3119 * Check the need for DMA-remapping initialization now.
3120 * Above initialization will also be used by Interrupt-remapping.
3121 */
4ed0d3e6 3122 if (no_iommu || (swiotlb && !iommu_pass_through) || dmar_disabled)
2ae21010
SS
3123 return -ENODEV;
3124
ba395927
KA
3125 iommu_init_mempool();
3126 dmar_init_reserved_ranges();
3127
3128 init_no_remapping_devices();
3129
3130 ret = init_dmars();
3131 if (ret) {
3132 printk(KERN_ERR "IOMMU: dmar init failed\n");
3133 put_iova_domain(&reserved_iova_list);
3134 iommu_exit_mempool();
3135 return ret;
3136 }
3137 printk(KERN_INFO
3138 "PCI-DMA: Intel(R) Virtualization Technology for Directed I/O\n");
3139
5e0d2a6f 3140 init_timer(&unmap_timer);
ba395927 3141 force_iommu = 1;
4ed0d3e6
FY
3142
3143 if (!iommu_pass_through) {
3144 printk(KERN_INFO
3145 "Multi-level page-table translation for DMAR.\n");
3146 dma_ops = &intel_dma_ops;
3147 } else
3148 printk(KERN_INFO
3149 "DMAR: Pass through translation for DMAR.\n");
3150
f59c7b69 3151 init_iommu_sysfs();
a8bcbb0d
JR
3152
3153 register_iommu(&intel_iommu_ops);
3154
ba395927
KA
3155 return 0;
3156}
e820482c 3157
3199aa6b
HW
3158static void iommu_detach_dependent_devices(struct intel_iommu *iommu,
3159 struct pci_dev *pdev)
3160{
3161 struct pci_dev *tmp, *parent;
3162
3163 if (!iommu || !pdev)
3164 return;
3165
3166 /* dependent device detach */
3167 tmp = pci_find_upstream_pcie_bridge(pdev);
3168 /* Secondary interface's bus number and devfn 0 */
3169 if (tmp) {
3170 parent = pdev->bus->self;
3171 while (parent != tmp) {
3172 iommu_detach_dev(iommu, parent->bus->number,
276dbf99 3173 parent->devfn);
3199aa6b
HW
3174 parent = parent->bus->self;
3175 }
3176 if (tmp->is_pcie) /* this is a PCIE-to-PCI bridge */
3177 iommu_detach_dev(iommu,
3178 tmp->subordinate->number, 0);
3179 else /* this is a legacy PCI bridge */
276dbf99
DW
3180 iommu_detach_dev(iommu, tmp->bus->number,
3181 tmp->devfn);
3199aa6b
HW
3182 }
3183}
3184
2c2e2c38 3185static void domain_remove_one_dev_info(struct dmar_domain *domain,
c7151a8d
WH
3186 struct pci_dev *pdev)
3187{
3188 struct device_domain_info *info;
3189 struct intel_iommu *iommu;
3190 unsigned long flags;
3191 int found = 0;
3192 struct list_head *entry, *tmp;
3193
276dbf99
DW
3194 iommu = device_to_iommu(pci_domain_nr(pdev->bus), pdev->bus->number,
3195 pdev->devfn);
c7151a8d
WH
3196 if (!iommu)
3197 return;
3198
3199 spin_lock_irqsave(&device_domain_lock, flags);
3200 list_for_each_safe(entry, tmp, &domain->devices) {
3201 info = list_entry(entry, struct device_domain_info, link);
276dbf99 3202 /* No need to compare PCI domain; it has to be the same */
c7151a8d
WH
3203 if (info->bus == pdev->bus->number &&
3204 info->devfn == pdev->devfn) {
3205 list_del(&info->link);
3206 list_del(&info->global);
3207 if (info->dev)
3208 info->dev->dev.archdata.iommu = NULL;
3209 spin_unlock_irqrestore(&device_domain_lock, flags);
3210
93a23a72 3211 iommu_disable_dev_iotlb(info);
c7151a8d 3212 iommu_detach_dev(iommu, info->bus, info->devfn);
3199aa6b 3213 iommu_detach_dependent_devices(iommu, pdev);
c7151a8d
WH
3214 free_devinfo_mem(info);
3215
3216 spin_lock_irqsave(&device_domain_lock, flags);
3217
3218 if (found)
3219 break;
3220 else
3221 continue;
3222 }
3223
3224 /* if there is no other devices under the same iommu
3225 * owned by this domain, clear this iommu in iommu_bmp
3226 * update iommu count and coherency
3227 */
276dbf99
DW
3228 if (iommu == device_to_iommu(info->segment, info->bus,
3229 info->devfn))
c7151a8d
WH
3230 found = 1;
3231 }
3232
3233 if (found == 0) {
3234 unsigned long tmp_flags;
3235 spin_lock_irqsave(&domain->iommu_lock, tmp_flags);
3236 clear_bit(iommu->seq_id, &domain->iommu_bmp);
3237 domain->iommu_count--;
58c610bd 3238 domain_update_iommu_cap(domain);
c7151a8d
WH
3239 spin_unlock_irqrestore(&domain->iommu_lock, tmp_flags);
3240 }
3241
3242 spin_unlock_irqrestore(&device_domain_lock, flags);
3243}
3244
3245static void vm_domain_remove_all_dev_info(struct dmar_domain *domain)
3246{
3247 struct device_domain_info *info;
3248 struct intel_iommu *iommu;
3249 unsigned long flags1, flags2;
3250
3251 spin_lock_irqsave(&device_domain_lock, flags1);
3252 while (!list_empty(&domain->devices)) {
3253 info = list_entry(domain->devices.next,
3254 struct device_domain_info, link);
3255 list_del(&info->link);
3256 list_del(&info->global);
3257 if (info->dev)
3258 info->dev->dev.archdata.iommu = NULL;
3259
3260 spin_unlock_irqrestore(&device_domain_lock, flags1);
3261
93a23a72 3262 iommu_disable_dev_iotlb(info);
276dbf99 3263 iommu = device_to_iommu(info->segment, info->bus, info->devfn);
c7151a8d 3264 iommu_detach_dev(iommu, info->bus, info->devfn);
3199aa6b 3265 iommu_detach_dependent_devices(iommu, info->dev);
c7151a8d
WH
3266
3267 /* clear this iommu in iommu_bmp, update iommu count
58c610bd 3268 * and capabilities
c7151a8d
WH
3269 */
3270 spin_lock_irqsave(&domain->iommu_lock, flags2);
3271 if (test_and_clear_bit(iommu->seq_id,
3272 &domain->iommu_bmp)) {
3273 domain->iommu_count--;
58c610bd 3274 domain_update_iommu_cap(domain);
c7151a8d
WH
3275 }
3276 spin_unlock_irqrestore(&domain->iommu_lock, flags2);
3277
3278 free_devinfo_mem(info);
3279 spin_lock_irqsave(&device_domain_lock, flags1);
3280 }
3281 spin_unlock_irqrestore(&device_domain_lock, flags1);
3282}
3283
5e98c4b1
WH
3284/* domain id for virtual machine, it won't be set in context */
3285static unsigned long vm_domid;
3286
fe40f1e0
WH
3287static int vm_domain_min_agaw(struct dmar_domain *domain)
3288{
3289 int i;
3290 int min_agaw = domain->agaw;
3291
3292 i = find_first_bit(&domain->iommu_bmp, g_num_of_iommus);
3293 for (; i < g_num_of_iommus; ) {
3294 if (min_agaw > g_iommus[i]->agaw)
3295 min_agaw = g_iommus[i]->agaw;
3296
3297 i = find_next_bit(&domain->iommu_bmp, g_num_of_iommus, i+1);
3298 }
3299
3300 return min_agaw;
3301}
3302
5e98c4b1
WH
3303static struct dmar_domain *iommu_alloc_vm_domain(void)
3304{
3305 struct dmar_domain *domain;
3306
3307 domain = alloc_domain_mem();
3308 if (!domain)
3309 return NULL;
3310
3311 domain->id = vm_domid++;
3312 memset(&domain->iommu_bmp, 0, sizeof(unsigned long));
3313 domain->flags = DOMAIN_FLAG_VIRTUAL_MACHINE;
3314
3315 return domain;
3316}
3317
2c2e2c38 3318static int md_domain_init(struct dmar_domain *domain, int guest_width)
5e98c4b1
WH
3319{
3320 int adjust_width;
3321
3322 init_iova_domain(&domain->iovad, DMA_32BIT_PFN);
3323 spin_lock_init(&domain->mapping_lock);
3324 spin_lock_init(&domain->iommu_lock);
3325
3326 domain_reserve_special_ranges(domain);
3327
3328 /* calculate AGAW */
3329 domain->gaw = guest_width;
3330 adjust_width = guestwidth_to_adjustwidth(guest_width);
3331 domain->agaw = width_to_agaw(adjust_width);
3332
3333 INIT_LIST_HEAD(&domain->devices);
3334
3335 domain->iommu_count = 0;
3336 domain->iommu_coherency = 0;
fe40f1e0 3337 domain->max_addr = 0;
5e98c4b1
WH
3338
3339 /* always allocate the top pgd */
3340 domain->pgd = (struct dma_pte *)alloc_pgtable_page();
3341 if (!domain->pgd)
3342 return -ENOMEM;
3343 domain_flush_cache(domain, domain->pgd, PAGE_SIZE);
3344 return 0;
3345}
3346
3347static void iommu_free_vm_domain(struct dmar_domain *domain)
3348{
3349 unsigned long flags;
3350 struct dmar_drhd_unit *drhd;
3351 struct intel_iommu *iommu;
3352 unsigned long i;
3353 unsigned long ndomains;
3354
3355 for_each_drhd_unit(drhd) {
3356 if (drhd->ignored)
3357 continue;
3358 iommu = drhd->iommu;
3359
3360 ndomains = cap_ndoms(iommu->cap);
3361 i = find_first_bit(iommu->domain_ids, ndomains);
3362 for (; i < ndomains; ) {
3363 if (iommu->domains[i] == domain) {
3364 spin_lock_irqsave(&iommu->lock, flags);
3365 clear_bit(i, iommu->domain_ids);
3366 iommu->domains[i] = NULL;
3367 spin_unlock_irqrestore(&iommu->lock, flags);
3368 break;
3369 }
3370 i = find_next_bit(iommu->domain_ids, ndomains, i+1);
3371 }
3372 }
3373}
3374
3375static void vm_domain_exit(struct dmar_domain *domain)
3376{
5e98c4b1
WH
3377 /* Domain 0 is reserved, so dont process it */
3378 if (!domain)
3379 return;
3380
3381 vm_domain_remove_all_dev_info(domain);
3382 /* destroy iovas */
3383 put_iova_domain(&domain->iovad);
5e98c4b1
WH
3384
3385 /* clear ptes */
595badf5 3386 dma_pte_clear_range(domain, 0, DOMAIN_MAX_PFN(domain->gaw));
5e98c4b1
WH
3387
3388 /* free page tables */
d794dc9b 3389 dma_pte_free_pagetable(domain, 0, DOMAIN_MAX_PFN(domain->gaw));
5e98c4b1
WH
3390
3391 iommu_free_vm_domain(domain);
3392 free_domain_mem(domain);
3393}
3394
5d450806 3395static int intel_iommu_domain_init(struct iommu_domain *domain)
38717946 3396{
5d450806 3397 struct dmar_domain *dmar_domain;
38717946 3398
5d450806
JR
3399 dmar_domain = iommu_alloc_vm_domain();
3400 if (!dmar_domain) {
38717946 3401 printk(KERN_ERR
5d450806
JR
3402 "intel_iommu_domain_init: dmar_domain == NULL\n");
3403 return -ENOMEM;
38717946 3404 }
2c2e2c38 3405 if (md_domain_init(dmar_domain, DEFAULT_DOMAIN_ADDRESS_WIDTH)) {
38717946 3406 printk(KERN_ERR
5d450806
JR
3407 "intel_iommu_domain_init() failed\n");
3408 vm_domain_exit(dmar_domain);
3409 return -ENOMEM;
38717946 3410 }
5d450806 3411 domain->priv = dmar_domain;
faa3d6f5 3412
5d450806 3413 return 0;
38717946 3414}
38717946 3415
5d450806 3416static void intel_iommu_domain_destroy(struct iommu_domain *domain)
38717946 3417{
5d450806
JR
3418 struct dmar_domain *dmar_domain = domain->priv;
3419
3420 domain->priv = NULL;
3421 vm_domain_exit(dmar_domain);
38717946 3422}
38717946 3423
4c5478c9
JR
3424static int intel_iommu_attach_device(struct iommu_domain *domain,
3425 struct device *dev)
38717946 3426{
4c5478c9
JR
3427 struct dmar_domain *dmar_domain = domain->priv;
3428 struct pci_dev *pdev = to_pci_dev(dev);
fe40f1e0
WH
3429 struct intel_iommu *iommu;
3430 int addr_width;
3431 u64 end;
faa3d6f5
WH
3432 int ret;
3433
3434 /* normally pdev is not mapped */
3435 if (unlikely(domain_context_mapped(pdev))) {
3436 struct dmar_domain *old_domain;
3437
3438 old_domain = find_domain(pdev);
3439 if (old_domain) {
2c2e2c38
FY
3440 if (dmar_domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE ||
3441 dmar_domain->flags & DOMAIN_FLAG_STATIC_IDENTITY)
3442 domain_remove_one_dev_info(old_domain, pdev);
faa3d6f5
WH
3443 else
3444 domain_remove_dev_info(old_domain);
3445 }
3446 }
3447
276dbf99
DW
3448 iommu = device_to_iommu(pci_domain_nr(pdev->bus), pdev->bus->number,
3449 pdev->devfn);
fe40f1e0
WH
3450 if (!iommu)
3451 return -ENODEV;
3452
3453 /* check if this iommu agaw is sufficient for max mapped address */
3454 addr_width = agaw_to_width(iommu->agaw);
3455 end = DOMAIN_MAX_ADDR(addr_width);
3456 end = end & VTD_PAGE_MASK;
4c5478c9 3457 if (end < dmar_domain->max_addr) {
fe40f1e0
WH
3458 printk(KERN_ERR "%s: iommu agaw (%d) is not "
3459 "sufficient for the mapped address (%llx)\n",
4c5478c9 3460 __func__, iommu->agaw, dmar_domain->max_addr);
fe40f1e0
WH
3461 return -EFAULT;
3462 }
3463
2c2e2c38 3464 ret = domain_add_dev_info(dmar_domain, pdev);
faa3d6f5
WH
3465 if (ret)
3466 return ret;
3467
93a23a72 3468 ret = domain_context_mapping(dmar_domain, pdev, CONTEXT_TT_MULTI_LEVEL);
faa3d6f5 3469 return ret;
38717946 3470}
38717946 3471
4c5478c9
JR
3472static void intel_iommu_detach_device(struct iommu_domain *domain,
3473 struct device *dev)
38717946 3474{
4c5478c9
JR
3475 struct dmar_domain *dmar_domain = domain->priv;
3476 struct pci_dev *pdev = to_pci_dev(dev);
3477
2c2e2c38 3478 domain_remove_one_dev_info(dmar_domain, pdev);
faa3d6f5 3479}
c7151a8d 3480
dde57a21
JR
3481static int intel_iommu_map_range(struct iommu_domain *domain,
3482 unsigned long iova, phys_addr_t hpa,
3483 size_t size, int iommu_prot)
faa3d6f5 3484{
dde57a21 3485 struct dmar_domain *dmar_domain = domain->priv;
fe40f1e0
WH
3486 u64 max_addr;
3487 int addr_width;
dde57a21 3488 int prot = 0;
faa3d6f5 3489 int ret;
fe40f1e0 3490
dde57a21
JR
3491 if (iommu_prot & IOMMU_READ)
3492 prot |= DMA_PTE_READ;
3493 if (iommu_prot & IOMMU_WRITE)
3494 prot |= DMA_PTE_WRITE;
9cf06697
SY
3495 if ((iommu_prot & IOMMU_CACHE) && dmar_domain->iommu_snooping)
3496 prot |= DMA_PTE_SNP;
dde57a21 3497
163cc52c 3498 max_addr = iova + size;
dde57a21 3499 if (dmar_domain->max_addr < max_addr) {
fe40f1e0
WH
3500 int min_agaw;
3501 u64 end;
3502
3503 /* check if minimum agaw is sufficient for mapped address */
dde57a21 3504 min_agaw = vm_domain_min_agaw(dmar_domain);
fe40f1e0
WH
3505 addr_width = agaw_to_width(min_agaw);
3506 end = DOMAIN_MAX_ADDR(addr_width);
3507 end = end & VTD_PAGE_MASK;
3508 if (end < max_addr) {
3509 printk(KERN_ERR "%s: iommu agaw (%d) is not "
3510 "sufficient for the mapped address (%llx)\n",
3511 __func__, min_agaw, max_addr);
3512 return -EFAULT;
3513 }
dde57a21 3514 dmar_domain->max_addr = max_addr;
fe40f1e0 3515 }
ad051221
DW
3516 /* Round up size to next multiple of PAGE_SIZE, if it and
3517 the low bits of hpa would take us onto the next page */
88cb6a74 3518 size = aligned_nrpages(hpa, size);
ad051221
DW
3519 ret = domain_pfn_mapping(dmar_domain, iova >> VTD_PAGE_SHIFT,
3520 hpa >> VTD_PAGE_SHIFT, size, prot);
faa3d6f5 3521 return ret;
38717946 3522}
38717946 3523
dde57a21
JR
3524static void intel_iommu_unmap_range(struct iommu_domain *domain,
3525 unsigned long iova, size_t size)
38717946 3526{
dde57a21 3527 struct dmar_domain *dmar_domain = domain->priv;
faa3d6f5 3528
163cc52c
DW
3529 dma_pte_clear_range(dmar_domain, iova >> VTD_PAGE_SHIFT,
3530 (iova + size - 1) >> VTD_PAGE_SHIFT);
fe40f1e0 3531
163cc52c
DW
3532 if (dmar_domain->max_addr == iova + size)
3533 dmar_domain->max_addr = iova;
38717946 3534}
38717946 3535
d14d6577
JR
3536static phys_addr_t intel_iommu_iova_to_phys(struct iommu_domain *domain,
3537 unsigned long iova)
38717946 3538{
d14d6577 3539 struct dmar_domain *dmar_domain = domain->priv;
38717946 3540 struct dma_pte *pte;
faa3d6f5 3541 u64 phys = 0;
38717946 3542
b026fd28 3543 pte = pfn_to_dma_pte(dmar_domain, iova >> VTD_PAGE_SHIFT);
38717946 3544 if (pte)
faa3d6f5 3545 phys = dma_pte_addr(pte);
38717946 3546
faa3d6f5 3547 return phys;
38717946 3548}
a8bcbb0d 3549
dbb9fd86
SY
3550static int intel_iommu_domain_has_cap(struct iommu_domain *domain,
3551 unsigned long cap)
3552{
3553 struct dmar_domain *dmar_domain = domain->priv;
3554
3555 if (cap == IOMMU_CAP_CACHE_COHERENCY)
3556 return dmar_domain->iommu_snooping;
3557
3558 return 0;
3559}
3560
a8bcbb0d
JR
3561static struct iommu_ops intel_iommu_ops = {
3562 .domain_init = intel_iommu_domain_init,
3563 .domain_destroy = intel_iommu_domain_destroy,
3564 .attach_dev = intel_iommu_attach_device,
3565 .detach_dev = intel_iommu_detach_device,
3566 .map = intel_iommu_map_range,
3567 .unmap = intel_iommu_unmap_range,
3568 .iova_to_phys = intel_iommu_iova_to_phys,
dbb9fd86 3569 .domain_has_cap = intel_iommu_domain_has_cap,
a8bcbb0d 3570};
9af88143
DW
3571
3572static void __devinit quirk_iommu_rwbf(struct pci_dev *dev)
3573{
3574 /*
3575 * Mobile 4 Series Chipset neglects to set RWBF capability,
3576 * but needs it:
3577 */
3578 printk(KERN_INFO "DMAR: Forcing write-buffer flush capability\n");
3579 rwbf_quirk = 1;
3580}
3581
3582DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2a40, quirk_iommu_rwbf);