1 // SPDX-License-Identifier: GPL-2.0-only
3 * A fairly generic DMA-API to IOMMU-API glue layer.
5 * Copyright (C) 2014-2015 ARM Ltd.
7 * based in part on arch/arm/mm/dma-mapping.c:
8 * Copyright (C) 2000-2004 Russell King
11 #include <linux/acpi_iort.h>
12 #include <linux/device.h>
13 #include <linux/dma-map-ops.h>
14 #include <linux/dma-iommu.h>
15 #include <linux/gfp.h>
16 #include <linux/huge_mm.h>
17 #include <linux/iommu.h>
18 #include <linux/iova.h>
19 #include <linux/irq.h>
21 #include <linux/mutex.h>
22 #include <linux/pci.h>
23 #include <linux/swiotlb.h>
24 #include <linux/scatterlist.h>
25 #include <linux/vmalloc.h>
26 #include <linux/crash_dump.h>
27 #include <linux/dma-direct.h>
29 struct iommu_dma_msi_page {
30 struct list_head list;
35 enum iommu_dma_cookie_type {
36 IOMMU_DMA_IOVA_COOKIE,
40 struct iommu_dma_cookie {
41 enum iommu_dma_cookie_type type;
43 /* Full allocator for IOMMU_DMA_IOVA_COOKIE */
44 struct iova_domain iovad;
45 /* Trivial linear page allocator for IOMMU_DMA_MSI_COOKIE */
48 struct list_head msi_page_list;
50 /* Domain for flush queue callback; NULL if flush queue not in use */
51 struct iommu_domain *fq_domain;
54 static DEFINE_STATIC_KEY_FALSE(iommu_deferred_attach_enabled);
55 bool iommu_dma_forcedac __read_mostly;
57 static int __init iommu_dma_forcedac_setup(char *str)
59 int ret = kstrtobool(str, &iommu_dma_forcedac);
61 if (!ret && iommu_dma_forcedac)
62 pr_info("Forcing DAC for PCI devices\n");
65 early_param("iommu.forcedac", iommu_dma_forcedac_setup);
67 static void iommu_dma_entry_dtor(unsigned long data)
69 struct page *freelist = (struct page *)data;
72 unsigned long p = (unsigned long)page_address(freelist);
74 freelist = freelist->freelist;
79 static inline size_t cookie_msi_granule(struct iommu_dma_cookie *cookie)
81 if (cookie->type == IOMMU_DMA_IOVA_COOKIE)
82 return cookie->iovad.granule;
86 static struct iommu_dma_cookie *cookie_alloc(enum iommu_dma_cookie_type type)
88 struct iommu_dma_cookie *cookie;
90 cookie = kzalloc(sizeof(*cookie), GFP_KERNEL);
92 INIT_LIST_HEAD(&cookie->msi_page_list);
99 * iommu_get_dma_cookie - Acquire DMA-API resources for a domain
100 * @domain: IOMMU domain to prepare for DMA-API usage
102 * IOMMU drivers should normally call this from their domain_alloc
103 * callback when domain->type == IOMMU_DOMAIN_DMA.
105 int iommu_get_dma_cookie(struct iommu_domain *domain)
107 if (domain->iova_cookie)
110 domain->iova_cookie = cookie_alloc(IOMMU_DMA_IOVA_COOKIE);
111 if (!domain->iova_cookie)
116 EXPORT_SYMBOL(iommu_get_dma_cookie);
119 * iommu_get_msi_cookie - Acquire just MSI remapping resources
120 * @domain: IOMMU domain to prepare
121 * @base: Start address of IOVA region for MSI mappings
123 * Users who manage their own IOVA allocation and do not want DMA API support,
124 * but would still like to take advantage of automatic MSI remapping, can use
125 * this to initialise their own domain appropriately. Users should reserve a
126 * contiguous IOVA region, starting at @base, large enough to accommodate the
127 * number of PAGE_SIZE mappings necessary to cover every MSI doorbell address
128 * used by the devices attached to @domain.
130 int iommu_get_msi_cookie(struct iommu_domain *domain, dma_addr_t base)
132 struct iommu_dma_cookie *cookie;
134 if (domain->type != IOMMU_DOMAIN_UNMANAGED)
137 if (domain->iova_cookie)
140 cookie = cookie_alloc(IOMMU_DMA_MSI_COOKIE);
144 cookie->msi_iova = base;
145 domain->iova_cookie = cookie;
148 EXPORT_SYMBOL(iommu_get_msi_cookie);
151 * iommu_put_dma_cookie - Release a domain's DMA mapping resources
152 * @domain: IOMMU domain previously prepared by iommu_get_dma_cookie() or
153 * iommu_get_msi_cookie()
155 * IOMMU drivers should normally call this from their domain_free callback.
157 void iommu_put_dma_cookie(struct iommu_domain *domain)
159 struct iommu_dma_cookie *cookie = domain->iova_cookie;
160 struct iommu_dma_msi_page *msi, *tmp;
165 if (cookie->type == IOMMU_DMA_IOVA_COOKIE && cookie->iovad.granule)
166 put_iova_domain(&cookie->iovad);
168 list_for_each_entry_safe(msi, tmp, &cookie->msi_page_list, list) {
169 list_del(&msi->list);
173 domain->iova_cookie = NULL;
175 EXPORT_SYMBOL(iommu_put_dma_cookie);
178 * iommu_dma_get_resv_regions - Reserved region driver helper
179 * @dev: Device from iommu_get_resv_regions()
180 * @list: Reserved region list from iommu_get_resv_regions()
182 * IOMMU drivers can use this to implement their .get_resv_regions callback
183 * for general non-IOMMU-specific reservations. Currently, this covers GICv3
184 * ITS region reservation on ACPI based ARM platforms that may require HW MSI
187 void iommu_dma_get_resv_regions(struct device *dev, struct list_head *list)
190 if (!is_of_node(dev_iommu_fwspec_get(dev)->iommu_fwnode))
191 iort_iommu_msi_get_resv_regions(dev, list);
194 EXPORT_SYMBOL(iommu_dma_get_resv_regions);
196 static int cookie_init_hw_msi_region(struct iommu_dma_cookie *cookie,
197 phys_addr_t start, phys_addr_t end)
199 struct iova_domain *iovad = &cookie->iovad;
200 struct iommu_dma_msi_page *msi_page;
203 start -= iova_offset(iovad, start);
204 num_pages = iova_align(iovad, end - start) >> iova_shift(iovad);
206 for (i = 0; i < num_pages; i++) {
207 msi_page = kmalloc(sizeof(*msi_page), GFP_KERNEL);
211 msi_page->phys = start;
212 msi_page->iova = start;
213 INIT_LIST_HEAD(&msi_page->list);
214 list_add(&msi_page->list, &cookie->msi_page_list);
215 start += iovad->granule;
221 static int iova_reserve_pci_windows(struct pci_dev *dev,
222 struct iova_domain *iovad)
224 struct pci_host_bridge *bridge = pci_find_host_bridge(dev->bus);
225 struct resource_entry *window;
226 unsigned long lo, hi;
227 phys_addr_t start = 0, end;
229 resource_list_for_each_entry(window, &bridge->windows) {
230 if (resource_type(window->res) != IORESOURCE_MEM)
233 lo = iova_pfn(iovad, window->res->start - window->offset);
234 hi = iova_pfn(iovad, window->res->end - window->offset);
235 reserve_iova(iovad, lo, hi);
238 /* Get reserved DMA windows from host bridge */
239 resource_list_for_each_entry(window, &bridge->dma_ranges) {
240 end = window->res->start - window->offset;
243 lo = iova_pfn(iovad, start);
244 hi = iova_pfn(iovad, end);
245 reserve_iova(iovad, lo, hi);
247 /* dma_ranges list should be sorted */
248 dev_err(&dev->dev, "Failed to reserve IOVA\n");
252 start = window->res->end - window->offset + 1;
253 /* If window is last entry */
254 if (window->node.next == &bridge->dma_ranges &&
255 end != ~(phys_addr_t)0) {
256 end = ~(phys_addr_t)0;
264 static int iova_reserve_iommu_regions(struct device *dev,
265 struct iommu_domain *domain)
267 struct iommu_dma_cookie *cookie = domain->iova_cookie;
268 struct iova_domain *iovad = &cookie->iovad;
269 struct iommu_resv_region *region;
270 LIST_HEAD(resv_regions);
273 if (dev_is_pci(dev)) {
274 ret = iova_reserve_pci_windows(to_pci_dev(dev), iovad);
279 iommu_get_resv_regions(dev, &resv_regions);
280 list_for_each_entry(region, &resv_regions, list) {
281 unsigned long lo, hi;
283 /* We ARE the software that manages these! */
284 if (region->type == IOMMU_RESV_SW_MSI)
287 lo = iova_pfn(iovad, region->start);
288 hi = iova_pfn(iovad, region->start + region->length - 1);
289 reserve_iova(iovad, lo, hi);
291 if (region->type == IOMMU_RESV_MSI)
292 ret = cookie_init_hw_msi_region(cookie, region->start,
293 region->start + region->length);
297 iommu_put_resv_regions(dev, &resv_regions);
302 static void iommu_dma_flush_iotlb_all(struct iova_domain *iovad)
304 struct iommu_dma_cookie *cookie;
305 struct iommu_domain *domain;
307 cookie = container_of(iovad, struct iommu_dma_cookie, iovad);
308 domain = cookie->fq_domain;
310 domain->ops->flush_iotlb_all(domain);
313 static bool dev_is_untrusted(struct device *dev)
315 return dev_is_pci(dev) && to_pci_dev(dev)->untrusted;
319 * iommu_dma_init_domain - Initialise a DMA mapping domain
320 * @domain: IOMMU domain previously prepared by iommu_get_dma_cookie()
321 * @base: IOVA at which the mappable address space starts
322 * @size: Size of IOVA space
323 * @dev: Device the domain is being initialised for
325 * @base and @size should be exact multiples of IOMMU page granularity to
326 * avoid rounding surprises. If necessary, we reserve the page at address 0
327 * to ensure it is an invalid IOVA. It is safe to reinitialise a domain, but
328 * any change which could make prior IOVAs invalid will fail.
330 static int iommu_dma_init_domain(struct iommu_domain *domain, dma_addr_t base,
331 u64 size, struct device *dev)
333 struct iommu_dma_cookie *cookie = domain->iova_cookie;
334 unsigned long order, base_pfn;
335 struct iova_domain *iovad;
337 if (!cookie || cookie->type != IOMMU_DMA_IOVA_COOKIE)
340 iovad = &cookie->iovad;
342 /* Use the smallest supported page size for IOVA granularity */
343 order = __ffs(domain->pgsize_bitmap);
344 base_pfn = max_t(unsigned long, 1, base >> order);
346 /* Check the domain allows at least some access to the device... */
347 if (domain->geometry.force_aperture) {
348 if (base > domain->geometry.aperture_end ||
349 base + size <= domain->geometry.aperture_start) {
350 pr_warn("specified DMA range outside IOMMU capability\n");
353 /* ...then finally give it a kicking to make sure it fits */
354 base_pfn = max_t(unsigned long, base_pfn,
355 domain->geometry.aperture_start >> order);
358 /* start_pfn is always nonzero for an already-initialised domain */
359 if (iovad->start_pfn) {
360 if (1UL << order != iovad->granule ||
361 base_pfn != iovad->start_pfn) {
362 pr_warn("Incompatible range for DMA domain\n");
369 init_iova_domain(iovad, 1UL << order, base_pfn);
371 if (!cookie->fq_domain && (!dev || !dev_is_untrusted(dev)) &&
372 domain->ops->flush_iotlb_all && !iommu_get_dma_strict(domain)) {
373 if (init_iova_flush_queue(iovad, iommu_dma_flush_iotlb_all,
374 iommu_dma_entry_dtor))
375 pr_warn("iova flush queue initialization failed\n");
377 cookie->fq_domain = domain;
383 return iova_reserve_iommu_regions(dev, domain);
387 * dma_info_to_prot - Translate DMA API directions and attributes to IOMMU API
389 * @dir: Direction of DMA transfer
390 * @coherent: Is the DMA master cache-coherent?
391 * @attrs: DMA attributes for the mapping
393 * Return: corresponding IOMMU API page protection flags
395 static int dma_info_to_prot(enum dma_data_direction dir, bool coherent,
398 int prot = coherent ? IOMMU_CACHE : 0;
400 if (attrs & DMA_ATTR_PRIVILEGED)
404 case DMA_BIDIRECTIONAL:
405 return prot | IOMMU_READ | IOMMU_WRITE;
407 return prot | IOMMU_READ;
408 case DMA_FROM_DEVICE:
409 return prot | IOMMU_WRITE;
415 static dma_addr_t iommu_dma_alloc_iova(struct iommu_domain *domain,
416 size_t size, u64 dma_limit, struct device *dev)
418 struct iommu_dma_cookie *cookie = domain->iova_cookie;
419 struct iova_domain *iovad = &cookie->iovad;
420 unsigned long shift, iova_len, iova = 0;
422 if (cookie->type == IOMMU_DMA_MSI_COOKIE) {
423 cookie->msi_iova += size;
424 return cookie->msi_iova - size;
427 shift = iova_shift(iovad);
428 iova_len = size >> shift;
430 * Freeing non-power-of-two-sized allocations back into the IOVA caches
431 * will come back to bite us badly, so we have to waste a bit of space
432 * rounding up anything cacheable to make sure that can't happen. The
433 * order of the unadjusted size will still match upon freeing.
435 if (iova_len < (1 << (IOVA_RANGE_CACHE_MAX_SIZE - 1)))
436 iova_len = roundup_pow_of_two(iova_len);
438 dma_limit = min_not_zero(dma_limit, dev->bus_dma_limit);
440 if (domain->geometry.force_aperture)
441 dma_limit = min(dma_limit, (u64)domain->geometry.aperture_end);
443 /* Try to get PCI devices a SAC address */
444 if (dma_limit > DMA_BIT_MASK(32) && !iommu_dma_forcedac && dev_is_pci(dev))
445 iova = alloc_iova_fast(iovad, iova_len,
446 DMA_BIT_MASK(32) >> shift, false);
449 iova = alloc_iova_fast(iovad, iova_len, dma_limit >> shift,
452 return (dma_addr_t)iova << shift;
455 static void iommu_dma_free_iova(struct iommu_dma_cookie *cookie,
456 dma_addr_t iova, size_t size, struct page *freelist)
458 struct iova_domain *iovad = &cookie->iovad;
460 /* The MSI case is only ever cleaning up its most recent allocation */
461 if (cookie->type == IOMMU_DMA_MSI_COOKIE)
462 cookie->msi_iova -= size;
463 else if (cookie->fq_domain) /* non-strict mode */
464 queue_iova(iovad, iova_pfn(iovad, iova),
465 size >> iova_shift(iovad),
466 (unsigned long)freelist);
468 free_iova_fast(iovad, iova_pfn(iovad, iova),
469 size >> iova_shift(iovad));
472 static void __iommu_dma_unmap(struct device *dev, dma_addr_t dma_addr,
475 struct iommu_domain *domain = iommu_get_dma_domain(dev);
476 struct iommu_dma_cookie *cookie = domain->iova_cookie;
477 struct iova_domain *iovad = &cookie->iovad;
478 size_t iova_off = iova_offset(iovad, dma_addr);
479 struct iommu_iotlb_gather iotlb_gather;
482 dma_addr -= iova_off;
483 size = iova_align(iovad, size + iova_off);
484 iommu_iotlb_gather_init(&iotlb_gather);
486 unmapped = iommu_unmap_fast(domain, dma_addr, size, &iotlb_gather);
487 WARN_ON(unmapped != size);
489 if (!cookie->fq_domain)
490 iommu_iotlb_sync(domain, &iotlb_gather);
491 iommu_dma_free_iova(cookie, dma_addr, size, iotlb_gather.freelist);
494 static void __iommu_dma_unmap_swiotlb(struct device *dev, dma_addr_t dma_addr,
495 size_t size, enum dma_data_direction dir,
498 struct iommu_domain *domain = iommu_get_dma_domain(dev);
499 struct iommu_dma_cookie *cookie = domain->iova_cookie;
500 struct iova_domain *iovad = &cookie->iovad;
503 phys = iommu_iova_to_phys(domain, dma_addr);
507 __iommu_dma_unmap(dev, dma_addr, size);
509 if (unlikely(is_swiotlb_buffer(phys)))
510 swiotlb_tbl_unmap_single(dev, phys, size,
511 iova_align(iovad, size), dir, attrs);
514 static dma_addr_t __iommu_dma_map(struct device *dev, phys_addr_t phys,
515 size_t size, int prot, u64 dma_mask)
517 struct iommu_domain *domain = iommu_get_dma_domain(dev);
518 struct iommu_dma_cookie *cookie = domain->iova_cookie;
519 struct iova_domain *iovad = &cookie->iovad;
520 size_t iova_off = iova_offset(iovad, phys);
523 if (static_branch_unlikely(&iommu_deferred_attach_enabled) &&
524 iommu_deferred_attach(dev, domain))
525 return DMA_MAPPING_ERROR;
527 size = iova_align(iovad, size + iova_off);
529 iova = iommu_dma_alloc_iova(domain, size, dma_mask, dev);
531 return DMA_MAPPING_ERROR;
533 if (iommu_map_atomic(domain, iova, phys - iova_off, size, prot)) {
534 iommu_dma_free_iova(cookie, iova, size, NULL);
535 return DMA_MAPPING_ERROR;
537 return iova + iova_off;
540 static dma_addr_t __iommu_dma_map_swiotlb(struct device *dev, phys_addr_t phys,
541 size_t org_size, dma_addr_t dma_mask, bool coherent,
542 enum dma_data_direction dir, unsigned long attrs)
544 int prot = dma_info_to_prot(dir, coherent, attrs);
545 struct iommu_domain *domain = iommu_get_dma_domain(dev);
546 struct iommu_dma_cookie *cookie = domain->iova_cookie;
547 struct iova_domain *iovad = &cookie->iovad;
548 size_t aligned_size = org_size;
554 * If both the physical buffer start address and size are
555 * page aligned, we don't need to use a bounce page.
557 if (IS_ENABLED(CONFIG_SWIOTLB) && dev_is_untrusted(dev) &&
558 iova_offset(iovad, phys | org_size)) {
559 aligned_size = iova_align(iovad, org_size);
560 phys = swiotlb_tbl_map_single(dev, phys, org_size,
561 aligned_size, dir, attrs);
563 if (phys == DMA_MAPPING_ERROR)
564 return DMA_MAPPING_ERROR;
566 /* Cleanup the padding area. */
567 padding_start = phys_to_virt(phys);
568 padding_size = aligned_size;
570 if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC) &&
571 (dir == DMA_TO_DEVICE ||
572 dir == DMA_BIDIRECTIONAL)) {
573 padding_start += org_size;
574 padding_size -= org_size;
577 memset(padding_start, 0, padding_size);
580 iova = __iommu_dma_map(dev, phys, aligned_size, prot, dma_mask);
581 if ((iova == DMA_MAPPING_ERROR) && is_swiotlb_buffer(phys))
582 swiotlb_tbl_unmap_single(dev, phys, org_size,
583 aligned_size, dir, attrs);
588 static void __iommu_dma_free_pages(struct page **pages, int count)
591 __free_page(pages[count]);
595 static struct page **__iommu_dma_alloc_pages(struct device *dev,
596 unsigned int count, unsigned long order_mask, gfp_t gfp)
599 unsigned int i = 0, nid = dev_to_node(dev);
601 order_mask &= (2U << MAX_ORDER) - 1;
605 pages = kvzalloc(count * sizeof(*pages), GFP_KERNEL);
609 /* IOMMU can map any pages, so himem can also be used here */
610 gfp |= __GFP_NOWARN | __GFP_HIGHMEM;
612 /* It makes no sense to muck about with huge pages */
616 struct page *page = NULL;
617 unsigned int order_size;
620 * Higher-order allocations are a convenience rather
621 * than a necessity, hence using __GFP_NORETRY until
622 * falling back to minimum-order allocations.
624 for (order_mask &= (2U << __fls(count)) - 1;
625 order_mask; order_mask &= ~order_size) {
626 unsigned int order = __fls(order_mask);
627 gfp_t alloc_flags = gfp;
629 order_size = 1U << order;
630 if (order_mask > order_size)
631 alloc_flags |= __GFP_NORETRY;
632 page = alloc_pages_node(nid, alloc_flags, order);
636 split_page(page, order);
640 __iommu_dma_free_pages(pages, i);
651 * If size is less than PAGE_SIZE, then a full CPU page will be allocated,
652 * but an IOMMU which supports smaller pages might not map the whole thing.
654 static struct page **__iommu_dma_alloc_noncontiguous(struct device *dev,
655 size_t size, struct sg_table *sgt, gfp_t gfp, pgprot_t prot,
658 struct iommu_domain *domain = iommu_get_dma_domain(dev);
659 struct iommu_dma_cookie *cookie = domain->iova_cookie;
660 struct iova_domain *iovad = &cookie->iovad;
661 bool coherent = dev_is_dma_coherent(dev);
662 int ioprot = dma_info_to_prot(DMA_BIDIRECTIONAL, coherent, attrs);
663 unsigned int count, min_size, alloc_sizes = domain->pgsize_bitmap;
667 if (static_branch_unlikely(&iommu_deferred_attach_enabled) &&
668 iommu_deferred_attach(dev, domain))
671 min_size = alloc_sizes & -alloc_sizes;
672 if (min_size < PAGE_SIZE) {
673 min_size = PAGE_SIZE;
674 alloc_sizes |= PAGE_SIZE;
676 size = ALIGN(size, min_size);
678 if (attrs & DMA_ATTR_ALLOC_SINGLE_PAGES)
679 alloc_sizes = min_size;
681 count = PAGE_ALIGN(size) >> PAGE_SHIFT;
682 pages = __iommu_dma_alloc_pages(dev, count, alloc_sizes >> PAGE_SHIFT,
687 size = iova_align(iovad, size);
688 iova = iommu_dma_alloc_iova(domain, size, dev->coherent_dma_mask, dev);
692 if (sg_alloc_table_from_pages(sgt, pages, count, 0, size, GFP_KERNEL))
695 if (!(ioprot & IOMMU_CACHE)) {
696 struct scatterlist *sg;
699 for_each_sg(sgt->sgl, sg, sgt->orig_nents, i)
700 arch_dma_prep_coherent(sg_page(sg), sg->length);
703 if (iommu_map_sg_atomic(domain, iova, sgt->sgl, sgt->orig_nents, ioprot)
707 sgt->sgl->dma_address = iova;
708 sgt->sgl->dma_length = size;
714 iommu_dma_free_iova(cookie, iova, size, NULL);
716 __iommu_dma_free_pages(pages, count);
720 static void *iommu_dma_alloc_remap(struct device *dev, size_t size,
721 dma_addr_t *dma_handle, gfp_t gfp, pgprot_t prot,
728 pages = __iommu_dma_alloc_noncontiguous(dev, size, &sgt, gfp, prot,
732 *dma_handle = sgt.sgl->dma_address;
734 vaddr = dma_common_pages_remap(pages, size, prot,
735 __builtin_return_address(0));
741 __iommu_dma_unmap(dev, *dma_handle, size);
742 __iommu_dma_free_pages(pages, PAGE_ALIGN(size) >> PAGE_SHIFT);
746 #ifdef CONFIG_DMA_REMAP
747 static struct sg_table *iommu_dma_alloc_noncontiguous(struct device *dev,
748 size_t size, enum dma_data_direction dir, gfp_t gfp,
751 struct dma_sgt_handle *sh;
753 sh = kmalloc(sizeof(*sh), gfp);
757 sh->pages = __iommu_dma_alloc_noncontiguous(dev, size, &sh->sgt, gfp,
766 static void iommu_dma_free_noncontiguous(struct device *dev, size_t size,
767 struct sg_table *sgt, enum dma_data_direction dir)
769 struct dma_sgt_handle *sh = sgt_handle(sgt);
771 __iommu_dma_unmap(dev, sgt->sgl->dma_address, size);
772 __iommu_dma_free_pages(sh->pages, PAGE_ALIGN(size) >> PAGE_SHIFT);
773 sg_free_table(&sh->sgt);
775 #endif /* CONFIG_DMA_REMAP */
777 static void iommu_dma_sync_single_for_cpu(struct device *dev,
778 dma_addr_t dma_handle, size_t size, enum dma_data_direction dir)
782 if (dev_is_dma_coherent(dev) && !dev_is_untrusted(dev))
785 phys = iommu_iova_to_phys(iommu_get_dma_domain(dev), dma_handle);
786 if (!dev_is_dma_coherent(dev))
787 arch_sync_dma_for_cpu(phys, size, dir);
789 if (is_swiotlb_buffer(phys))
790 swiotlb_tbl_sync_single(dev, phys, size, dir, SYNC_FOR_CPU);
793 static void iommu_dma_sync_single_for_device(struct device *dev,
794 dma_addr_t dma_handle, size_t size, enum dma_data_direction dir)
798 if (dev_is_dma_coherent(dev) && !dev_is_untrusted(dev))
801 phys = iommu_iova_to_phys(iommu_get_dma_domain(dev), dma_handle);
802 if (is_swiotlb_buffer(phys))
803 swiotlb_tbl_sync_single(dev, phys, size, dir, SYNC_FOR_DEVICE);
805 if (!dev_is_dma_coherent(dev))
806 arch_sync_dma_for_device(phys, size, dir);
809 static void iommu_dma_sync_sg_for_cpu(struct device *dev,
810 struct scatterlist *sgl, int nelems,
811 enum dma_data_direction dir)
813 struct scatterlist *sg;
816 if (dev_is_dma_coherent(dev) && !dev_is_untrusted(dev))
819 for_each_sg(sgl, sg, nelems, i) {
820 if (!dev_is_dma_coherent(dev))
821 arch_sync_dma_for_cpu(sg_phys(sg), sg->length, dir);
823 if (is_swiotlb_buffer(sg_phys(sg)))
824 swiotlb_tbl_sync_single(dev, sg_phys(sg), sg->length,
829 static void iommu_dma_sync_sg_for_device(struct device *dev,
830 struct scatterlist *sgl, int nelems,
831 enum dma_data_direction dir)
833 struct scatterlist *sg;
836 if (dev_is_dma_coherent(dev) && !dev_is_untrusted(dev))
839 for_each_sg(sgl, sg, nelems, i) {
840 if (is_swiotlb_buffer(sg_phys(sg)))
841 swiotlb_tbl_sync_single(dev, sg_phys(sg), sg->length,
842 dir, SYNC_FOR_DEVICE);
844 if (!dev_is_dma_coherent(dev))
845 arch_sync_dma_for_device(sg_phys(sg), sg->length, dir);
849 static dma_addr_t iommu_dma_map_page(struct device *dev, struct page *page,
850 unsigned long offset, size_t size, enum dma_data_direction dir,
853 phys_addr_t phys = page_to_phys(page) + offset;
854 bool coherent = dev_is_dma_coherent(dev);
855 dma_addr_t dma_handle;
857 dma_handle = __iommu_dma_map_swiotlb(dev, phys, size, dma_get_mask(dev),
858 coherent, dir, attrs);
859 if (!coherent && !(attrs & DMA_ATTR_SKIP_CPU_SYNC) &&
860 dma_handle != DMA_MAPPING_ERROR)
861 arch_sync_dma_for_device(phys, size, dir);
865 static void iommu_dma_unmap_page(struct device *dev, dma_addr_t dma_handle,
866 size_t size, enum dma_data_direction dir, unsigned long attrs)
868 if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
869 iommu_dma_sync_single_for_cpu(dev, dma_handle, size, dir);
870 __iommu_dma_unmap_swiotlb(dev, dma_handle, size, dir, attrs);
874 * Prepare a successfully-mapped scatterlist to give back to the caller.
876 * At this point the segments are already laid out by iommu_dma_map_sg() to
877 * avoid individually crossing any boundaries, so we merely need to check a
878 * segment's start address to avoid concatenating across one.
880 static int __finalise_sg(struct device *dev, struct scatterlist *sg, int nents,
883 struct scatterlist *s, *cur = sg;
884 unsigned long seg_mask = dma_get_seg_boundary(dev);
885 unsigned int cur_len = 0, max_len = dma_get_max_seg_size(dev);
888 for_each_sg(sg, s, nents, i) {
889 /* Restore this segment's original unaligned fields first */
890 unsigned int s_iova_off = sg_dma_address(s);
891 unsigned int s_length = sg_dma_len(s);
892 unsigned int s_iova_len = s->length;
894 s->offset += s_iova_off;
895 s->length = s_length;
896 sg_dma_address(s) = DMA_MAPPING_ERROR;
900 * Now fill in the real DMA data. If...
901 * - there is a valid output segment to append to
902 * - and this segment starts on an IOVA page boundary
903 * - but doesn't fall at a segment boundary
904 * - and wouldn't make the resulting output segment too long
906 if (cur_len && !s_iova_off && (dma_addr & seg_mask) &&
907 (max_len - cur_len >= s_length)) {
908 /* ...then concatenate it with the previous one */
911 /* Otherwise start the next output segment */
917 sg_dma_address(cur) = dma_addr + s_iova_off;
920 sg_dma_len(cur) = cur_len;
921 dma_addr += s_iova_len;
923 if (s_length + s_iova_off < s_iova_len)
930 * If mapping failed, then just restore the original list,
931 * but making sure the DMA fields are invalidated.
933 static void __invalidate_sg(struct scatterlist *sg, int nents)
935 struct scatterlist *s;
938 for_each_sg(sg, s, nents, i) {
939 if (sg_dma_address(s) != DMA_MAPPING_ERROR)
940 s->offset += sg_dma_address(s);
942 s->length = sg_dma_len(s);
943 sg_dma_address(s) = DMA_MAPPING_ERROR;
948 static void iommu_dma_unmap_sg_swiotlb(struct device *dev, struct scatterlist *sg,
949 int nents, enum dma_data_direction dir, unsigned long attrs)
951 struct scatterlist *s;
954 for_each_sg(sg, s, nents, i)
955 __iommu_dma_unmap_swiotlb(dev, sg_dma_address(s),
956 sg_dma_len(s), dir, attrs);
959 static int iommu_dma_map_sg_swiotlb(struct device *dev, struct scatterlist *sg,
960 int nents, enum dma_data_direction dir, unsigned long attrs)
962 struct scatterlist *s;
965 for_each_sg(sg, s, nents, i) {
966 sg_dma_address(s) = __iommu_dma_map_swiotlb(dev, sg_phys(s),
967 s->length, dma_get_mask(dev),
968 dev_is_dma_coherent(dev), dir, attrs);
969 if (sg_dma_address(s) == DMA_MAPPING_ERROR)
971 sg_dma_len(s) = s->length;
977 iommu_dma_unmap_sg_swiotlb(dev, sg, i, dir, attrs | DMA_ATTR_SKIP_CPU_SYNC);
982 * The DMA API client is passing in a scatterlist which could describe
983 * any old buffer layout, but the IOMMU API requires everything to be
984 * aligned to IOMMU pages. Hence the need for this complicated bit of
985 * impedance-matching, to be able to hand off a suitably-aligned list,
986 * but still preserve the original offsets and sizes for the caller.
988 static int iommu_dma_map_sg(struct device *dev, struct scatterlist *sg,
989 int nents, enum dma_data_direction dir, unsigned long attrs)
991 struct iommu_domain *domain = iommu_get_dma_domain(dev);
992 struct iommu_dma_cookie *cookie = domain->iova_cookie;
993 struct iova_domain *iovad = &cookie->iovad;
994 struct scatterlist *s, *prev = NULL;
995 int prot = dma_info_to_prot(dir, dev_is_dma_coherent(dev), attrs);
998 unsigned long mask = dma_get_seg_boundary(dev);
1001 if (static_branch_unlikely(&iommu_deferred_attach_enabled) &&
1002 iommu_deferred_attach(dev, domain))
1005 if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
1006 iommu_dma_sync_sg_for_device(dev, sg, nents, dir);
1008 if (dev_is_untrusted(dev))
1009 return iommu_dma_map_sg_swiotlb(dev, sg, nents, dir, attrs);
1012 * Work out how much IOVA space we need, and align the segments to
1013 * IOVA granules for the IOMMU driver to handle. With some clever
1014 * trickery we can modify the list in-place, but reversibly, by
1015 * stashing the unaligned parts in the as-yet-unused DMA fields.
1017 for_each_sg(sg, s, nents, i) {
1018 size_t s_iova_off = iova_offset(iovad, s->offset);
1019 size_t s_length = s->length;
1020 size_t pad_len = (mask - iova_len + 1) & mask;
1022 sg_dma_address(s) = s_iova_off;
1023 sg_dma_len(s) = s_length;
1024 s->offset -= s_iova_off;
1025 s_length = iova_align(iovad, s_length + s_iova_off);
1026 s->length = s_length;
1029 * Due to the alignment of our single IOVA allocation, we can
1030 * depend on these assumptions about the segment boundary mask:
1031 * - If mask size >= IOVA size, then the IOVA range cannot
1032 * possibly fall across a boundary, so we don't care.
1033 * - If mask size < IOVA size, then the IOVA range must start
1034 * exactly on a boundary, therefore we can lay things out
1035 * based purely on segment lengths without needing to know
1036 * the actual addresses beforehand.
1037 * - The mask must be a power of 2, so pad_len == 0 if
1038 * iova_len == 0, thus we cannot dereference prev the first
1039 * time through here (i.e. before it has a meaningful value).
1041 if (pad_len && pad_len < s_length - 1) {
1042 prev->length += pad_len;
1043 iova_len += pad_len;
1046 iova_len += s_length;
1050 iova = iommu_dma_alloc_iova(domain, iova_len, dma_get_mask(dev), dev);
1052 goto out_restore_sg;
1055 * We'll leave any physical concatenation to the IOMMU driver's
1056 * implementation - it knows better than we do.
1058 if (iommu_map_sg_atomic(domain, iova, sg, nents, prot) < iova_len)
1061 return __finalise_sg(dev, sg, nents, iova);
1064 iommu_dma_free_iova(cookie, iova, iova_len, NULL);
1066 __invalidate_sg(sg, nents);
1070 static void iommu_dma_unmap_sg(struct device *dev, struct scatterlist *sg,
1071 int nents, enum dma_data_direction dir, unsigned long attrs)
1073 dma_addr_t start, end;
1074 struct scatterlist *tmp;
1077 if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
1078 iommu_dma_sync_sg_for_cpu(dev, sg, nents, dir);
1080 if (dev_is_untrusted(dev)) {
1081 iommu_dma_unmap_sg_swiotlb(dev, sg, nents, dir, attrs);
1086 * The scatterlist segments are mapped into a single
1087 * contiguous IOVA allocation, so this is incredibly easy.
1089 start = sg_dma_address(sg);
1090 for_each_sg(sg_next(sg), tmp, nents - 1, i) {
1091 if (sg_dma_len(tmp) == 0)
1095 end = sg_dma_address(sg) + sg_dma_len(sg);
1096 __iommu_dma_unmap(dev, start, end - start);
1099 static dma_addr_t iommu_dma_map_resource(struct device *dev, phys_addr_t phys,
1100 size_t size, enum dma_data_direction dir, unsigned long attrs)
1102 return __iommu_dma_map(dev, phys, size,
1103 dma_info_to_prot(dir, false, attrs) | IOMMU_MMIO,
1107 static void iommu_dma_unmap_resource(struct device *dev, dma_addr_t handle,
1108 size_t size, enum dma_data_direction dir, unsigned long attrs)
1110 __iommu_dma_unmap(dev, handle, size);
1113 static void __iommu_dma_free(struct device *dev, size_t size, void *cpu_addr)
1115 size_t alloc_size = PAGE_ALIGN(size);
1116 int count = alloc_size >> PAGE_SHIFT;
1117 struct page *page = NULL, **pages = NULL;
1119 /* Non-coherent atomic allocation? Easy */
1120 if (IS_ENABLED(CONFIG_DMA_DIRECT_REMAP) &&
1121 dma_free_from_pool(dev, cpu_addr, alloc_size))
1124 if (IS_ENABLED(CONFIG_DMA_REMAP) && is_vmalloc_addr(cpu_addr)) {
1126 * If it the address is remapped, then it's either non-coherent
1127 * or highmem CMA, or an iommu_dma_alloc_remap() construction.
1129 pages = dma_common_find_pages(cpu_addr);
1131 page = vmalloc_to_page(cpu_addr);
1132 dma_common_free_remap(cpu_addr, alloc_size);
1134 /* Lowmem means a coherent atomic or CMA allocation */
1135 page = virt_to_page(cpu_addr);
1139 __iommu_dma_free_pages(pages, count);
1141 dma_free_contiguous(dev, page, alloc_size);
1144 static void iommu_dma_free(struct device *dev, size_t size, void *cpu_addr,
1145 dma_addr_t handle, unsigned long attrs)
1147 __iommu_dma_unmap(dev, handle, size);
1148 __iommu_dma_free(dev, size, cpu_addr);
1151 static void *iommu_dma_alloc_pages(struct device *dev, size_t size,
1152 struct page **pagep, gfp_t gfp, unsigned long attrs)
1154 bool coherent = dev_is_dma_coherent(dev);
1155 size_t alloc_size = PAGE_ALIGN(size);
1156 int node = dev_to_node(dev);
1157 struct page *page = NULL;
1160 page = dma_alloc_contiguous(dev, alloc_size, gfp);
1162 page = alloc_pages_node(node, gfp, get_order(alloc_size));
1166 if (IS_ENABLED(CONFIG_DMA_REMAP) && (!coherent || PageHighMem(page))) {
1167 pgprot_t prot = dma_pgprot(dev, PAGE_KERNEL, attrs);
1169 cpu_addr = dma_common_contiguous_remap(page, alloc_size,
1170 prot, __builtin_return_address(0));
1172 goto out_free_pages;
1175 arch_dma_prep_coherent(page, size);
1177 cpu_addr = page_address(page);
1181 memset(cpu_addr, 0, alloc_size);
1184 dma_free_contiguous(dev, page, alloc_size);
1188 static void *iommu_dma_alloc(struct device *dev, size_t size,
1189 dma_addr_t *handle, gfp_t gfp, unsigned long attrs)
1191 bool coherent = dev_is_dma_coherent(dev);
1192 int ioprot = dma_info_to_prot(DMA_BIDIRECTIONAL, coherent, attrs);
1193 struct page *page = NULL;
1198 if (IS_ENABLED(CONFIG_DMA_REMAP) && gfpflags_allow_blocking(gfp) &&
1199 !(attrs & DMA_ATTR_FORCE_CONTIGUOUS)) {
1200 return iommu_dma_alloc_remap(dev, size, handle, gfp,
1201 dma_pgprot(dev, PAGE_KERNEL, attrs), attrs);
1204 if (IS_ENABLED(CONFIG_DMA_DIRECT_REMAP) &&
1205 !gfpflags_allow_blocking(gfp) && !coherent)
1206 page = dma_alloc_from_pool(dev, PAGE_ALIGN(size), &cpu_addr,
1209 cpu_addr = iommu_dma_alloc_pages(dev, size, &page, gfp, attrs);
1213 *handle = __iommu_dma_map(dev, page_to_phys(page), size, ioprot,
1214 dev->coherent_dma_mask);
1215 if (*handle == DMA_MAPPING_ERROR) {
1216 __iommu_dma_free(dev, size, cpu_addr);
1223 static int iommu_dma_mmap(struct device *dev, struct vm_area_struct *vma,
1224 void *cpu_addr, dma_addr_t dma_addr, size_t size,
1225 unsigned long attrs)
1227 unsigned long nr_pages = PAGE_ALIGN(size) >> PAGE_SHIFT;
1228 unsigned long pfn, off = vma->vm_pgoff;
1231 vma->vm_page_prot = dma_pgprot(dev, vma->vm_page_prot, attrs);
1233 if (dma_mmap_from_dev_coherent(dev, vma, cpu_addr, size, &ret))
1236 if (off >= nr_pages || vma_pages(vma) > nr_pages - off)
1239 if (IS_ENABLED(CONFIG_DMA_REMAP) && is_vmalloc_addr(cpu_addr)) {
1240 struct page **pages = dma_common_find_pages(cpu_addr);
1243 return vm_map_pages(vma, pages, nr_pages);
1244 pfn = vmalloc_to_pfn(cpu_addr);
1246 pfn = page_to_pfn(virt_to_page(cpu_addr));
1249 return remap_pfn_range(vma, vma->vm_start, pfn + off,
1250 vma->vm_end - vma->vm_start,
1254 static int iommu_dma_get_sgtable(struct device *dev, struct sg_table *sgt,
1255 void *cpu_addr, dma_addr_t dma_addr, size_t size,
1256 unsigned long attrs)
1261 if (IS_ENABLED(CONFIG_DMA_REMAP) && is_vmalloc_addr(cpu_addr)) {
1262 struct page **pages = dma_common_find_pages(cpu_addr);
1265 return sg_alloc_table_from_pages(sgt, pages,
1266 PAGE_ALIGN(size) >> PAGE_SHIFT,
1267 0, size, GFP_KERNEL);
1270 page = vmalloc_to_page(cpu_addr);
1272 page = virt_to_page(cpu_addr);
1275 ret = sg_alloc_table(sgt, 1, GFP_KERNEL);
1277 sg_set_page(sgt->sgl, page, PAGE_ALIGN(size), 0);
1281 static unsigned long iommu_dma_get_merge_boundary(struct device *dev)
1283 struct iommu_domain *domain = iommu_get_dma_domain(dev);
1285 return (1UL << __ffs(domain->pgsize_bitmap)) - 1;
1288 static const struct dma_map_ops iommu_dma_ops = {
1289 .alloc = iommu_dma_alloc,
1290 .free = iommu_dma_free,
1291 .alloc_pages = dma_common_alloc_pages,
1292 .free_pages = dma_common_free_pages,
1293 #ifdef CONFIG_DMA_REMAP
1294 .alloc_noncontiguous = iommu_dma_alloc_noncontiguous,
1295 .free_noncontiguous = iommu_dma_free_noncontiguous,
1297 .mmap = iommu_dma_mmap,
1298 .get_sgtable = iommu_dma_get_sgtable,
1299 .map_page = iommu_dma_map_page,
1300 .unmap_page = iommu_dma_unmap_page,
1301 .map_sg = iommu_dma_map_sg,
1302 .unmap_sg = iommu_dma_unmap_sg,
1303 .sync_single_for_cpu = iommu_dma_sync_single_for_cpu,
1304 .sync_single_for_device = iommu_dma_sync_single_for_device,
1305 .sync_sg_for_cpu = iommu_dma_sync_sg_for_cpu,
1306 .sync_sg_for_device = iommu_dma_sync_sg_for_device,
1307 .map_resource = iommu_dma_map_resource,
1308 .unmap_resource = iommu_dma_unmap_resource,
1309 .get_merge_boundary = iommu_dma_get_merge_boundary,
1313 * The IOMMU core code allocates the default DMA domain, which the underlying
1314 * IOMMU driver needs to support via the dma-iommu layer.
1316 void iommu_setup_dma_ops(struct device *dev, u64 dma_base, u64 size)
1318 struct iommu_domain *domain = iommu_get_domain_for_dev(dev);
1324 * The IOMMU core code allocates the default DMA domain, which the
1325 * underlying IOMMU driver needs to support via the dma-iommu layer.
1327 if (domain->type == IOMMU_DOMAIN_DMA) {
1328 if (iommu_dma_init_domain(domain, dma_base, size, dev))
1330 dev->dma_ops = &iommu_dma_ops;
1335 pr_warn("Failed to set up IOMMU for device %s; retaining platform DMA ops\n",
1339 static struct iommu_dma_msi_page *iommu_dma_get_msi_page(struct device *dev,
1340 phys_addr_t msi_addr, struct iommu_domain *domain)
1342 struct iommu_dma_cookie *cookie = domain->iova_cookie;
1343 struct iommu_dma_msi_page *msi_page;
1345 int prot = IOMMU_WRITE | IOMMU_NOEXEC | IOMMU_MMIO;
1346 size_t size = cookie_msi_granule(cookie);
1348 msi_addr &= ~(phys_addr_t)(size - 1);
1349 list_for_each_entry(msi_page, &cookie->msi_page_list, list)
1350 if (msi_page->phys == msi_addr)
1353 msi_page = kzalloc(sizeof(*msi_page), GFP_KERNEL);
1357 iova = iommu_dma_alloc_iova(domain, size, dma_get_mask(dev), dev);
1361 if (iommu_map(domain, iova, msi_addr, size, prot))
1364 INIT_LIST_HEAD(&msi_page->list);
1365 msi_page->phys = msi_addr;
1366 msi_page->iova = iova;
1367 list_add(&msi_page->list, &cookie->msi_page_list);
1371 iommu_dma_free_iova(cookie, iova, size, NULL);
1377 int iommu_dma_prepare_msi(struct msi_desc *desc, phys_addr_t msi_addr)
1379 struct device *dev = msi_desc_to_dev(desc);
1380 struct iommu_domain *domain = iommu_get_domain_for_dev(dev);
1381 struct iommu_dma_msi_page *msi_page;
1382 static DEFINE_MUTEX(msi_prepare_lock); /* see below */
1384 if (!domain || !domain->iova_cookie) {
1385 desc->iommu_cookie = NULL;
1390 * In fact the whole prepare operation should already be serialised by
1391 * irq_domain_mutex further up the callchain, but that's pretty subtle
1392 * on its own, so consider this locking as failsafe documentation...
1394 mutex_lock(&msi_prepare_lock);
1395 msi_page = iommu_dma_get_msi_page(dev, msi_addr, domain);
1396 mutex_unlock(&msi_prepare_lock);
1398 msi_desc_set_iommu_cookie(desc, msi_page);
1405 void iommu_dma_compose_msi_msg(struct msi_desc *desc,
1406 struct msi_msg *msg)
1408 struct device *dev = msi_desc_to_dev(desc);
1409 const struct iommu_domain *domain = iommu_get_domain_for_dev(dev);
1410 const struct iommu_dma_msi_page *msi_page;
1412 msi_page = msi_desc_get_iommu_cookie(desc);
1414 if (!domain || !domain->iova_cookie || WARN_ON(!msi_page))
1417 msg->address_hi = upper_32_bits(msi_page->iova);
1418 msg->address_lo &= cookie_msi_granule(domain->iova_cookie) - 1;
1419 msg->address_lo += lower_32_bits(msi_page->iova);
1422 static int iommu_dma_init(void)
1424 if (is_kdump_kernel())
1425 static_branch_enable(&iommu_deferred_attach_enabled);
1427 return iova_cache_get();
1429 arch_initcall(iommu_dma_init);