1 // SPDX-License-Identifier: GPL-2.0-only
3 * Dynamic DMA mapping support.
5 * This implementation is a fallback for platforms that do not support
6 * I/O TLBs (aka DMA address translation hardware).
7 * Copyright (C) 2000 Asit Mallick <Asit.K.Mallick@intel.com>
8 * Copyright (C) 2000 Goutham Rao <goutham.rao@intel.com>
9 * Copyright (C) 2000, 2003 Hewlett-Packard Co
10 * David Mosberger-Tang <davidm@hpl.hp.com>
12 * 03/05/07 davidm Switch from PCI-DMA to generic device DMA API.
13 * 00/12/13 davidm Rename to swiotlb.c and add mark_clean() to avoid
14 * unnecessary i-cache flushing.
15 * 04/07/.. ak Better overflow handling. Assorted fixes.
16 * 05/09/10 linville Add support for syncing ranges, support syncing for
17 * DMA_BIDIRECTIONAL mappings, miscellaneous cleanup.
18 * 08/12/11 beckyb Add highmem support
21 #define pr_fmt(fmt) "software IO TLB: " fmt
23 #include <linux/cache.h>
24 #include <linux/cc_platform.h>
25 #include <linux/ctype.h>
26 #include <linux/debugfs.h>
27 #include <linux/dma-direct.h>
28 #include <linux/dma-map-ops.h>
29 #include <linux/export.h>
30 #include <linux/gfp.h>
31 #include <linux/highmem.h>
33 #include <linux/iommu-helper.h>
34 #include <linux/init.h>
35 #include <linux/memblock.h>
37 #include <linux/pfn.h>
38 #include <linux/rculist.h>
39 #include <linux/scatterlist.h>
40 #include <linux/set_memory.h>
41 #include <linux/spinlock.h>
42 #include <linux/string.h>
43 #include <linux/swiotlb.h>
44 #include <linux/types.h>
45 #ifdef CONFIG_DMA_RESTRICTED_POOL
47 #include <linux/of_fdt.h>
48 #include <linux/of_reserved_mem.h>
49 #include <linux/slab.h>
52 #define CREATE_TRACE_POINTS
53 #include <trace/events/swiotlb.h>
55 #define SLABS_PER_PAGE (1 << (PAGE_SHIFT - IO_TLB_SHIFT))
58 * Minimum IO TLB size to bother booting with. Systems with mainly
59 * 64bit capable cards will only lightly use the swiotlb. If we can't
60 * allocate a contiguous 1MB, we're probably in trouble anyway.
62 #define IO_TLB_MIN_SLABS ((1<<20) >> IO_TLB_SHIFT)
64 #define INVALID_PHYS_ADDR (~(phys_addr_t)0)
67 * struct io_tlb_slot - IO TLB slot descriptor
68 * @orig_addr: The original address corresponding to a mapped entry.
69 * @alloc_size: Size of the allocated buffer.
70 * @list: The free list describing the number of free entries available
74 phys_addr_t orig_addr;
79 static bool swiotlb_force_bounce;
80 static bool swiotlb_force_disable;
82 #ifdef CONFIG_SWIOTLB_DYNAMIC
84 static void swiotlb_dyn_alloc(struct work_struct *work);
86 static struct io_tlb_mem io_tlb_default_mem = {
87 .lock = __SPIN_LOCK_UNLOCKED(io_tlb_default_mem.lock),
88 .pools = LIST_HEAD_INIT(io_tlb_default_mem.pools),
89 .dyn_alloc = __WORK_INITIALIZER(io_tlb_default_mem.dyn_alloc,
93 #else /* !CONFIG_SWIOTLB_DYNAMIC */
95 static struct io_tlb_mem io_tlb_default_mem;
97 #endif /* CONFIG_SWIOTLB_DYNAMIC */
99 static unsigned long default_nslabs = IO_TLB_DEFAULT_SIZE >> IO_TLB_SHIFT;
100 static unsigned long default_nareas;
103 * struct io_tlb_area - IO TLB memory area descriptor
105 * This is a single area with a single lock.
107 * @used: The number of used IO TLB block.
108 * @index: The slot index to start searching in this area for next round.
109 * @lock: The lock to protect the above data structures in the map and
119 * Round up number of slabs to the next power of 2. The last area is going
120 * be smaller than the rest if default_nslabs is not power of two.
121 * The number of slot in an area should be a multiple of IO_TLB_SEGSIZE,
122 * otherwise a segment may span two or more areas. It conflicts with free
123 * contiguous slots tracking: free slots are treated contiguous no matter
124 * whether they cross an area boundary.
126 * Return true if default_nslabs is rounded up.
128 static bool round_up_default_nslabs(void)
133 if (default_nslabs < IO_TLB_SEGSIZE * default_nareas)
134 default_nslabs = IO_TLB_SEGSIZE * default_nareas;
135 else if (is_power_of_2(default_nslabs))
137 default_nslabs = roundup_pow_of_two(default_nslabs);
142 * swiotlb_adjust_nareas() - adjust the number of areas and slots
143 * @nareas: Desired number of areas. Zero is treated as 1.
145 * Adjust the default number of areas in a memory pool.
146 * The default size of the memory pool may also change to meet minimum area
149 static void swiotlb_adjust_nareas(unsigned int nareas)
153 else if (!is_power_of_2(nareas))
154 nareas = roundup_pow_of_two(nareas);
156 default_nareas = nareas;
158 pr_info("area num %d.\n", nareas);
159 if (round_up_default_nslabs())
160 pr_info("SWIOTLB bounce buffer size roundup to %luMB",
161 (default_nslabs << IO_TLB_SHIFT) >> 20);
165 * limit_nareas() - get the maximum number of areas for a given memory pool size
166 * @nareas: Desired number of areas.
167 * @nslots: Total number of slots in the memory pool.
169 * Limit the number of areas to the maximum possible number of areas in
170 * a memory pool of the given size.
172 * Return: Maximum possible number of areas.
174 static unsigned int limit_nareas(unsigned int nareas, unsigned long nslots)
176 if (nslots < nareas * IO_TLB_SEGSIZE)
177 return nslots / IO_TLB_SEGSIZE;
182 setup_io_tlb_npages(char *str)
185 /* avoid tail segment of size < IO_TLB_SEGSIZE */
187 ALIGN(simple_strtoul(str, &str, 0), IO_TLB_SEGSIZE);
192 swiotlb_adjust_nareas(simple_strtoul(str, &str, 0));
195 if (!strcmp(str, "force"))
196 swiotlb_force_bounce = true;
197 else if (!strcmp(str, "noforce"))
198 swiotlb_force_disable = true;
202 early_param("swiotlb", setup_io_tlb_npages);
204 unsigned long swiotlb_size_or_default(void)
206 return default_nslabs << IO_TLB_SHIFT;
209 void __init swiotlb_adjust_size(unsigned long size)
212 * If swiotlb parameter has not been specified, give a chance to
213 * architectures such as those supporting memory encryption to
214 * adjust/expand SWIOTLB size for their use.
216 if (default_nslabs != IO_TLB_DEFAULT_SIZE >> IO_TLB_SHIFT)
219 size = ALIGN(size, IO_TLB_SIZE);
220 default_nslabs = ALIGN(size >> IO_TLB_SHIFT, IO_TLB_SEGSIZE);
221 if (round_up_default_nslabs())
222 size = default_nslabs << IO_TLB_SHIFT;
223 pr_info("SWIOTLB bounce buffer size adjusted to %luMB", size >> 20);
226 void swiotlb_print_info(void)
228 struct io_tlb_pool *mem = &io_tlb_default_mem.defpool;
231 pr_warn("No low mem\n");
235 pr_info("mapped [mem %pa-%pa] (%luMB)\n", &mem->start, &mem->end,
236 (mem->nslabs << IO_TLB_SHIFT) >> 20);
239 static inline unsigned long io_tlb_offset(unsigned long val)
241 return val & (IO_TLB_SEGSIZE - 1);
244 static inline unsigned long nr_slots(u64 val)
246 return DIV_ROUND_UP(val, IO_TLB_SIZE);
250 * Early SWIOTLB allocation may be too early to allow an architecture to
251 * perform the desired operations. This function allows the architecture to
252 * call SWIOTLB when the operations are possible. It needs to be called
253 * before the SWIOTLB memory is used.
255 void __init swiotlb_update_mem_attributes(void)
257 struct io_tlb_pool *mem = &io_tlb_default_mem.defpool;
260 if (!mem->nslabs || mem->late_alloc)
262 bytes = PAGE_ALIGN(mem->nslabs << IO_TLB_SHIFT);
263 set_memory_decrypted((unsigned long)mem->vaddr, bytes >> PAGE_SHIFT);
266 static void swiotlb_init_io_tlb_pool(struct io_tlb_pool *mem, phys_addr_t start,
267 unsigned long nslabs, bool late_alloc, unsigned int nareas)
269 void *vaddr = phys_to_virt(start);
270 unsigned long bytes = nslabs << IO_TLB_SHIFT, i;
272 mem->nslabs = nslabs;
274 mem->end = mem->start + bytes;
275 mem->late_alloc = late_alloc;
276 mem->nareas = nareas;
277 mem->area_nslabs = nslabs / mem->nareas;
279 for (i = 0; i < mem->nareas; i++) {
280 spin_lock_init(&mem->areas[i].lock);
281 mem->areas[i].index = 0;
282 mem->areas[i].used = 0;
285 for (i = 0; i < mem->nslabs; i++) {
286 mem->slots[i].list = min(IO_TLB_SEGSIZE - io_tlb_offset(i),
288 mem->slots[i].orig_addr = INVALID_PHYS_ADDR;
289 mem->slots[i].alloc_size = 0;
292 memset(vaddr, 0, bytes);
298 * add_mem_pool() - add a memory pool to the allocator
299 * @mem: Software IO TLB allocator.
300 * @pool: Memory pool to be added.
302 static void add_mem_pool(struct io_tlb_mem *mem, struct io_tlb_pool *pool)
304 #ifdef CONFIG_SWIOTLB_DYNAMIC
305 spin_lock(&mem->lock);
306 list_add_rcu(&pool->node, &mem->pools);
307 mem->nslabs += pool->nslabs;
308 spin_unlock(&mem->lock);
310 mem->nslabs = pool->nslabs;
314 static void __init *swiotlb_memblock_alloc(unsigned long nslabs,
316 int (*remap)(void *tlb, unsigned long nslabs))
318 size_t bytes = PAGE_ALIGN(nslabs << IO_TLB_SHIFT);
322 * By default allocate the bounce buffer memory from low memory, but
323 * allow to pick a location everywhere for hypervisors with guest
326 if (flags & SWIOTLB_ANY)
327 tlb = memblock_alloc(bytes, PAGE_SIZE);
329 tlb = memblock_alloc_low(bytes, PAGE_SIZE);
332 pr_warn("%s: Failed to allocate %zu bytes tlb structure\n",
337 if (remap && remap(tlb, nslabs) < 0) {
338 memblock_free(tlb, PAGE_ALIGN(bytes));
339 pr_warn("%s: Failed to remap %zu bytes\n", __func__, bytes);
347 * Statically reserve bounce buffer space and initialize bounce buffer data
348 * structures for the software IO TLB used to implement the DMA API.
350 void __init swiotlb_init_remap(bool addressing_limit, unsigned int flags,
351 int (*remap)(void *tlb, unsigned long nslabs))
353 struct io_tlb_pool *mem = &io_tlb_default_mem.defpool;
354 unsigned long nslabs;
359 if (!addressing_limit && !swiotlb_force_bounce)
361 if (swiotlb_force_disable)
364 io_tlb_default_mem.force_bounce =
365 swiotlb_force_bounce || (flags & SWIOTLB_FORCE);
367 #ifdef CONFIG_SWIOTLB_DYNAMIC
369 io_tlb_default_mem.can_grow = true;
370 if (flags & SWIOTLB_ANY)
371 io_tlb_default_mem.phys_limit = virt_to_phys(high_memory - 1);
373 io_tlb_default_mem.phys_limit = ARCH_LOW_ADDRESS_LIMIT;
377 swiotlb_adjust_nareas(num_possible_cpus());
379 nslabs = default_nslabs;
380 nareas = limit_nareas(default_nareas, nslabs);
381 while ((tlb = swiotlb_memblock_alloc(nslabs, flags, remap)) == NULL) {
382 if (nslabs <= IO_TLB_MIN_SLABS)
384 nslabs = ALIGN(nslabs >> 1, IO_TLB_SEGSIZE);
385 nareas = limit_nareas(nareas, nslabs);
388 if (default_nslabs != nslabs) {
389 pr_info("SWIOTLB bounce buffer size adjusted %lu -> %lu slabs",
390 default_nslabs, nslabs);
391 default_nslabs = nslabs;
394 alloc_size = PAGE_ALIGN(array_size(sizeof(*mem->slots), nslabs));
395 mem->slots = memblock_alloc(alloc_size, PAGE_SIZE);
397 pr_warn("%s: Failed to allocate %zu bytes align=0x%lx\n",
398 __func__, alloc_size, PAGE_SIZE);
402 mem->areas = memblock_alloc(array_size(sizeof(struct io_tlb_area),
403 nareas), SMP_CACHE_BYTES);
405 pr_warn("%s: Failed to allocate mem->areas.\n", __func__);
409 swiotlb_init_io_tlb_pool(mem, __pa(tlb), nslabs, false, nareas);
410 add_mem_pool(&io_tlb_default_mem, mem);
412 if (flags & SWIOTLB_VERBOSE)
413 swiotlb_print_info();
416 void __init swiotlb_init(bool addressing_limit, unsigned int flags)
418 swiotlb_init_remap(addressing_limit, flags, NULL);
422 * Systems with larger DMA zones (those that don't support ISA) can
423 * initialize the swiotlb later using the slab allocator if needed.
424 * This should be just like above, but with some error catching.
426 int swiotlb_init_late(size_t size, gfp_t gfp_mask,
427 int (*remap)(void *tlb, unsigned long nslabs))
429 struct io_tlb_pool *mem = &io_tlb_default_mem.defpool;
430 unsigned long nslabs = ALIGN(size >> IO_TLB_SHIFT, IO_TLB_SEGSIZE);
432 unsigned char *vstart = NULL;
433 unsigned int order, area_order;
434 bool retried = false;
437 if (io_tlb_default_mem.nslabs)
440 if (swiotlb_force_disable)
443 io_tlb_default_mem.force_bounce = swiotlb_force_bounce;
445 #ifdef CONFIG_SWIOTLB_DYNAMIC
447 io_tlb_default_mem.can_grow = true;
448 if (IS_ENABLED(CONFIG_ZONE_DMA) && (gfp_mask & __GFP_DMA))
449 io_tlb_default_mem.phys_limit = DMA_BIT_MASK(zone_dma_bits);
450 else if (IS_ENABLED(CONFIG_ZONE_DMA32) && (gfp_mask & __GFP_DMA32))
451 io_tlb_default_mem.phys_limit = DMA_BIT_MASK(32);
453 io_tlb_default_mem.phys_limit = virt_to_phys(high_memory - 1);
457 swiotlb_adjust_nareas(num_possible_cpus());
460 order = get_order(nslabs << IO_TLB_SHIFT);
461 nslabs = SLABS_PER_PAGE << order;
463 while ((SLABS_PER_PAGE << order) > IO_TLB_MIN_SLABS) {
464 vstart = (void *)__get_free_pages(gfp_mask | __GFP_NOWARN,
469 nslabs = SLABS_PER_PAGE << order;
477 rc = remap(vstart, nslabs);
479 free_pages((unsigned long)vstart, order);
481 nslabs = ALIGN(nslabs >> 1, IO_TLB_SEGSIZE);
482 if (nslabs < IO_TLB_MIN_SLABS)
489 pr_warn("only able to allocate %ld MB\n",
490 (PAGE_SIZE << order) >> 20);
493 nareas = limit_nareas(default_nareas, nslabs);
494 area_order = get_order(array_size(sizeof(*mem->areas), nareas));
495 mem->areas = (struct io_tlb_area *)
496 __get_free_pages(GFP_KERNEL | __GFP_ZERO, area_order);
500 mem->slots = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
501 get_order(array_size(sizeof(*mem->slots), nslabs)));
505 set_memory_decrypted((unsigned long)vstart,
506 (nslabs << IO_TLB_SHIFT) >> PAGE_SHIFT);
507 swiotlb_init_io_tlb_pool(mem, virt_to_phys(vstart), nslabs, true,
509 add_mem_pool(&io_tlb_default_mem, mem);
511 swiotlb_print_info();
515 free_pages((unsigned long)mem->areas, area_order);
517 free_pages((unsigned long)vstart, order);
521 void __init swiotlb_exit(void)
523 struct io_tlb_pool *mem = &io_tlb_default_mem.defpool;
524 unsigned long tbl_vaddr;
525 size_t tbl_size, slots_size;
526 unsigned int area_order;
528 if (swiotlb_force_bounce)
534 pr_info("tearing down default memory pool\n");
535 tbl_vaddr = (unsigned long)phys_to_virt(mem->start);
536 tbl_size = PAGE_ALIGN(mem->end - mem->start);
537 slots_size = PAGE_ALIGN(array_size(sizeof(*mem->slots), mem->nslabs));
539 set_memory_encrypted(tbl_vaddr, tbl_size >> PAGE_SHIFT);
540 if (mem->late_alloc) {
541 area_order = get_order(array_size(sizeof(*mem->areas),
543 free_pages((unsigned long)mem->areas, area_order);
544 free_pages(tbl_vaddr, get_order(tbl_size));
545 free_pages((unsigned long)mem->slots, get_order(slots_size));
547 memblock_free_late(__pa(mem->areas),
548 array_size(sizeof(*mem->areas), mem->nareas));
549 memblock_free_late(mem->start, tbl_size);
550 memblock_free_late(__pa(mem->slots), slots_size);
553 memset(mem, 0, sizeof(*mem));
556 #ifdef CONFIG_SWIOTLB_DYNAMIC
559 * alloc_dma_pages() - allocate pages to be used for DMA
560 * @gfp: GFP flags for the allocation.
561 * @bytes: Size of the buffer.
562 * @phys_limit: Maximum allowed physical address of the buffer.
564 * Allocate pages from the buddy allocator. If successful, make the allocated
565 * pages decrypted that they can be used for DMA.
567 * Return: Decrypted pages, %NULL on allocation failure, or ERR_PTR(-EAGAIN)
568 * if the allocated physical address was above @phys_limit.
570 static struct page *alloc_dma_pages(gfp_t gfp, size_t bytes, u64 phys_limit)
572 unsigned int order = get_order(bytes);
577 page = alloc_pages(gfp, order);
581 paddr = page_to_phys(page);
582 if (paddr + bytes - 1 > phys_limit) {
583 __free_pages(page, order);
584 return ERR_PTR(-EAGAIN);
587 vaddr = phys_to_virt(paddr);
588 if (set_memory_decrypted((unsigned long)vaddr, PFN_UP(bytes)))
593 /* Intentional leak if pages cannot be encrypted again. */
594 if (!set_memory_encrypted((unsigned long)vaddr, PFN_UP(bytes)))
595 __free_pages(page, order);
600 * swiotlb_alloc_tlb() - allocate a dynamic IO TLB buffer
601 * @dev: Device for which a memory pool is allocated.
602 * @bytes: Size of the buffer.
603 * @phys_limit: Maximum allowed physical address of the buffer.
604 * @gfp: GFP flags for the allocation.
606 * Return: Allocated pages, or %NULL on allocation failure.
608 static struct page *swiotlb_alloc_tlb(struct device *dev, size_t bytes,
609 u64 phys_limit, gfp_t gfp)
614 * Allocate from the atomic pools if memory is encrypted and
615 * the allocation is atomic, because decrypting may block.
617 if (!gfpflags_allow_blocking(gfp) && dev && force_dma_unencrypted(dev)) {
620 if (!IS_ENABLED(CONFIG_DMA_COHERENT_POOL))
623 return dma_alloc_from_pool(dev, bytes, &vaddr, gfp,
627 gfp &= ~GFP_ZONEMASK;
628 if (phys_limit <= DMA_BIT_MASK(zone_dma_bits))
630 else if (phys_limit <= DMA_BIT_MASK(32))
633 while (IS_ERR(page = alloc_dma_pages(gfp, bytes, phys_limit))) {
634 if (IS_ENABLED(CONFIG_ZONE_DMA32) &&
635 phys_limit < DMA_BIT_MASK(64) &&
636 !(gfp & (__GFP_DMA32 | __GFP_DMA)))
638 else if (IS_ENABLED(CONFIG_ZONE_DMA) &&
640 gfp = (gfp & ~__GFP_DMA32) | __GFP_DMA;
649 * swiotlb_free_tlb() - free a dynamically allocated IO TLB buffer
650 * @vaddr: Virtual address of the buffer.
651 * @bytes: Size of the buffer.
653 static void swiotlb_free_tlb(void *vaddr, size_t bytes)
655 if (IS_ENABLED(CONFIG_DMA_COHERENT_POOL) &&
656 dma_free_from_pool(NULL, vaddr, bytes))
659 /* Intentional leak if pages cannot be encrypted again. */
660 if (!set_memory_encrypted((unsigned long)vaddr, PFN_UP(bytes)))
661 __free_pages(virt_to_page(vaddr), get_order(bytes));
665 * swiotlb_alloc_pool() - allocate a new IO TLB memory pool
666 * @dev: Device for which a memory pool is allocated.
667 * @minslabs: Minimum number of slabs.
668 * @nslabs: Desired (maximum) number of slabs.
669 * @nareas: Number of areas.
670 * @phys_limit: Maximum DMA buffer physical address.
671 * @gfp: GFP flags for the allocations.
673 * Allocate and initialize a new IO TLB memory pool. The actual number of
674 * slabs may be reduced if allocation of @nslabs fails. If even
675 * @minslabs cannot be allocated, this function fails.
677 * Return: New memory pool, or %NULL on allocation failure.
679 static struct io_tlb_pool *swiotlb_alloc_pool(struct device *dev,
680 unsigned long minslabs, unsigned long nslabs,
681 unsigned int nareas, u64 phys_limit, gfp_t gfp)
683 struct io_tlb_pool *pool;
684 unsigned int slot_order;
689 if (nslabs > SLABS_PER_PAGE << MAX_PAGE_ORDER) {
690 nslabs = SLABS_PER_PAGE << MAX_PAGE_ORDER;
691 nareas = limit_nareas(nareas, nslabs);
694 pool_size = sizeof(*pool) + array_size(sizeof(*pool->areas), nareas);
695 pool = kzalloc(pool_size, gfp);
698 pool->areas = (void *)pool + sizeof(*pool);
700 tlb_size = nslabs << IO_TLB_SHIFT;
701 while (!(tlb = swiotlb_alloc_tlb(dev, tlb_size, phys_limit, gfp))) {
702 if (nslabs <= minslabs)
704 nslabs = ALIGN(nslabs >> 1, IO_TLB_SEGSIZE);
705 nareas = limit_nareas(nareas, nslabs);
706 tlb_size = nslabs << IO_TLB_SHIFT;
709 slot_order = get_order(array_size(sizeof(*pool->slots), nslabs));
710 pool->slots = (struct io_tlb_slot *)
711 __get_free_pages(gfp, slot_order);
715 swiotlb_init_io_tlb_pool(pool, page_to_phys(tlb), nslabs, true, nareas);
719 swiotlb_free_tlb(page_address(tlb), tlb_size);
727 * swiotlb_dyn_alloc() - dynamic memory pool allocation worker
728 * @work: Pointer to dyn_alloc in struct io_tlb_mem.
730 static void swiotlb_dyn_alloc(struct work_struct *work)
732 struct io_tlb_mem *mem =
733 container_of(work, struct io_tlb_mem, dyn_alloc);
734 struct io_tlb_pool *pool;
736 pool = swiotlb_alloc_pool(NULL, IO_TLB_MIN_SLABS, default_nslabs,
737 default_nareas, mem->phys_limit, GFP_KERNEL);
739 pr_warn_ratelimited("Failed to allocate new pool");
743 add_mem_pool(mem, pool);
747 * swiotlb_dyn_free() - RCU callback to free a memory pool
748 * @rcu: RCU head in the corresponding struct io_tlb_pool.
750 static void swiotlb_dyn_free(struct rcu_head *rcu)
752 struct io_tlb_pool *pool = container_of(rcu, struct io_tlb_pool, rcu);
753 size_t slots_size = array_size(sizeof(*pool->slots), pool->nslabs);
754 size_t tlb_size = pool->end - pool->start;
756 free_pages((unsigned long)pool->slots, get_order(slots_size));
757 swiotlb_free_tlb(pool->vaddr, tlb_size);
762 * swiotlb_find_pool() - find the IO TLB pool for a physical address
763 * @dev: Device which has mapped the DMA buffer.
764 * @paddr: Physical address within the DMA buffer.
766 * Find the IO TLB memory pool descriptor which contains the given physical
769 * Return: Memory pool which contains @paddr, or %NULL if none.
771 struct io_tlb_pool *swiotlb_find_pool(struct device *dev, phys_addr_t paddr)
773 struct io_tlb_mem *mem = dev->dma_io_tlb_mem;
774 struct io_tlb_pool *pool;
777 list_for_each_entry_rcu(pool, &mem->pools, node) {
778 if (paddr >= pool->start && paddr < pool->end)
782 list_for_each_entry_rcu(pool, &dev->dma_io_tlb_pools, node) {
783 if (paddr >= pool->start && paddr < pool->end)
793 * swiotlb_del_pool() - remove an IO TLB pool from a device
794 * @dev: Owning device.
795 * @pool: Memory pool to be removed.
797 static void swiotlb_del_pool(struct device *dev, struct io_tlb_pool *pool)
801 spin_lock_irqsave(&dev->dma_io_tlb_lock, flags);
802 list_del_rcu(&pool->node);
803 spin_unlock_irqrestore(&dev->dma_io_tlb_lock, flags);
805 call_rcu(&pool->rcu, swiotlb_dyn_free);
808 #endif /* CONFIG_SWIOTLB_DYNAMIC */
811 * swiotlb_dev_init() - initialize swiotlb fields in &struct device
812 * @dev: Device to be initialized.
814 void swiotlb_dev_init(struct device *dev)
816 dev->dma_io_tlb_mem = &io_tlb_default_mem;
817 #ifdef CONFIG_SWIOTLB_DYNAMIC
818 INIT_LIST_HEAD(&dev->dma_io_tlb_pools);
819 spin_lock_init(&dev->dma_io_tlb_lock);
820 dev->dma_uses_io_tlb = false;
825 * Return the offset into a iotlb slot required to keep the device happy.
827 static unsigned int swiotlb_align_offset(struct device *dev, u64 addr)
829 return addr & dma_get_min_align_mask(dev) & (IO_TLB_SIZE - 1);
833 * Bounce: copy the swiotlb buffer from or back to the original dma location
835 static void swiotlb_bounce(struct device *dev, phys_addr_t tlb_addr, size_t size,
836 enum dma_data_direction dir)
838 struct io_tlb_pool *mem = swiotlb_find_pool(dev, tlb_addr);
839 int index = (tlb_addr - mem->start) >> IO_TLB_SHIFT;
840 phys_addr_t orig_addr = mem->slots[index].orig_addr;
841 size_t alloc_size = mem->slots[index].alloc_size;
842 unsigned long pfn = PFN_DOWN(orig_addr);
843 unsigned char *vaddr = mem->vaddr + tlb_addr - mem->start;
844 unsigned int tlb_offset, orig_addr_offset;
846 if (orig_addr == INVALID_PHYS_ADDR)
849 tlb_offset = tlb_addr & (IO_TLB_SIZE - 1);
850 orig_addr_offset = swiotlb_align_offset(dev, orig_addr);
851 if (tlb_offset < orig_addr_offset) {
852 dev_WARN_ONCE(dev, 1,
853 "Access before mapping start detected. orig offset %u, requested offset %u.\n",
854 orig_addr_offset, tlb_offset);
858 tlb_offset -= orig_addr_offset;
859 if (tlb_offset > alloc_size) {
860 dev_WARN_ONCE(dev, 1,
861 "Buffer overflow detected. Allocation size: %zu. Mapping size: %zu+%u.\n",
862 alloc_size, size, tlb_offset);
866 orig_addr += tlb_offset;
867 alloc_size -= tlb_offset;
869 if (size > alloc_size) {
870 dev_WARN_ONCE(dev, 1,
871 "Buffer overflow detected. Allocation size: %zu. Mapping size: %zu.\n",
876 if (PageHighMem(pfn_to_page(pfn))) {
877 unsigned int offset = orig_addr & ~PAGE_MASK;
883 sz = min_t(size_t, PAGE_SIZE - offset, size);
885 local_irq_save(flags);
886 page = pfn_to_page(pfn);
887 if (dir == DMA_TO_DEVICE)
888 memcpy_from_page(vaddr, page, offset, sz);
890 memcpy_to_page(page, offset, vaddr, sz);
891 local_irq_restore(flags);
898 } else if (dir == DMA_TO_DEVICE) {
899 memcpy(vaddr, phys_to_virt(orig_addr), size);
901 memcpy(phys_to_virt(orig_addr), vaddr, size);
905 static inline phys_addr_t slot_addr(phys_addr_t start, phys_addr_t idx)
907 return start + (idx << IO_TLB_SHIFT);
911 * Carefully handle integer overflow which can occur when boundary_mask == ~0UL.
913 static inline unsigned long get_max_slots(unsigned long boundary_mask)
915 return (boundary_mask >> IO_TLB_SHIFT) + 1;
918 static unsigned int wrap_area_index(struct io_tlb_pool *mem, unsigned int index)
920 if (index >= mem->area_nslabs)
926 * Track the total used slots with a global atomic value in order to have
927 * correct information to determine the high water mark. The mem_used()
928 * function gives imprecise results because there's no locking across
931 #ifdef CONFIG_DEBUG_FS
932 static void inc_used_and_hiwater(struct io_tlb_mem *mem, unsigned int nslots)
934 unsigned long old_hiwater, new_used;
936 new_used = atomic_long_add_return(nslots, &mem->total_used);
937 old_hiwater = atomic_long_read(&mem->used_hiwater);
939 if (new_used <= old_hiwater)
941 } while (!atomic_long_try_cmpxchg(&mem->used_hiwater,
942 &old_hiwater, new_used));
945 static void dec_used(struct io_tlb_mem *mem, unsigned int nslots)
947 atomic_long_sub(nslots, &mem->total_used);
950 #else /* !CONFIG_DEBUG_FS */
951 static void inc_used_and_hiwater(struct io_tlb_mem *mem, unsigned int nslots)
954 static void dec_used(struct io_tlb_mem *mem, unsigned int nslots)
957 #endif /* CONFIG_DEBUG_FS */
959 #ifdef CONFIG_SWIOTLB_DYNAMIC
960 #ifdef CONFIG_DEBUG_FS
961 static void inc_transient_used(struct io_tlb_mem *mem, unsigned int nslots)
963 atomic_long_add(nslots, &mem->transient_nslabs);
966 static void dec_transient_used(struct io_tlb_mem *mem, unsigned int nslots)
968 atomic_long_sub(nslots, &mem->transient_nslabs);
971 #else /* !CONFIG_DEBUG_FS */
972 static void inc_transient_used(struct io_tlb_mem *mem, unsigned int nslots)
975 static void dec_transient_used(struct io_tlb_mem *mem, unsigned int nslots)
978 #endif /* CONFIG_DEBUG_FS */
979 #endif /* CONFIG_SWIOTLB_DYNAMIC */
982 * swiotlb_search_pool_area() - search one memory area in one pool
983 * @dev: Device which maps the buffer.
984 * @pool: Memory pool to be searched.
985 * @area_index: Index of the IO TLB memory area to be searched.
986 * @orig_addr: Original (non-bounced) IO buffer address.
987 * @alloc_size: Total requested size of the bounce buffer,
988 * including initial alignment padding.
989 * @alloc_align_mask: Required alignment of the allocated buffer.
991 * Find a suitable sequence of IO TLB entries for the request and allocate
992 * a buffer from the given IO TLB memory area.
993 * This function takes care of locking.
995 * Return: Index of the first allocated slot, or -1 on error.
997 static int swiotlb_search_pool_area(struct device *dev, struct io_tlb_pool *pool,
998 int area_index, phys_addr_t orig_addr, size_t alloc_size,
999 unsigned int alloc_align_mask)
1001 struct io_tlb_area *area = pool->areas + area_index;
1002 unsigned long boundary_mask = dma_get_seg_boundary(dev);
1003 dma_addr_t tbl_dma_addr =
1004 phys_to_dma_unencrypted(dev, pool->start) & boundary_mask;
1005 unsigned long max_slots = get_max_slots(boundary_mask);
1006 unsigned int iotlb_align_mask = dma_get_min_align_mask(dev);
1007 unsigned int nslots = nr_slots(alloc_size), stride;
1008 unsigned int offset = swiotlb_align_offset(dev, orig_addr);
1009 unsigned int index, slots_checked, count = 0, i;
1010 unsigned long flags;
1011 unsigned int slot_base;
1012 unsigned int slot_index;
1015 BUG_ON(area_index >= pool->nareas);
1018 * Historically, swiotlb allocations >= PAGE_SIZE were guaranteed to be
1019 * page-aligned in the absence of any other alignment requirements.
1020 * 'alloc_align_mask' was later introduced to specify the alignment
1021 * explicitly, however this is passed as zero for streaming mappings
1022 * and so we preserve the old behaviour there in case any drivers are
1025 if (!alloc_align_mask && !iotlb_align_mask && alloc_size >= PAGE_SIZE)
1026 alloc_align_mask = PAGE_SIZE - 1;
1029 * Ensure that the allocation is at least slot-aligned and update
1030 * 'iotlb_align_mask' to ignore bits that will be preserved when
1031 * offsetting into the allocation.
1033 alloc_align_mask |= (IO_TLB_SIZE - 1);
1034 iotlb_align_mask &= ~alloc_align_mask;
1037 * For mappings with an alignment requirement don't bother looping to
1038 * unaligned slots once we found an aligned one.
1040 stride = get_max_slots(max(alloc_align_mask, iotlb_align_mask));
1042 spin_lock_irqsave(&area->lock, flags);
1043 if (unlikely(nslots > pool->area_nslabs - area->used))
1046 slot_base = area_index * pool->area_nslabs;
1047 index = area->index;
1049 for (slots_checked = 0; slots_checked < pool->area_nslabs; ) {
1050 phys_addr_t tlb_addr;
1052 slot_index = slot_base + index;
1053 tlb_addr = slot_addr(tbl_dma_addr, slot_index);
1055 if ((tlb_addr & alloc_align_mask) ||
1056 (orig_addr && (tlb_addr & iotlb_align_mask) !=
1057 (orig_addr & iotlb_align_mask))) {
1058 index = wrap_area_index(pool, index + 1);
1063 if (!iommu_is_span_boundary(slot_index, nslots,
1064 nr_slots(tbl_dma_addr),
1066 if (pool->slots[slot_index].list >= nslots)
1069 index = wrap_area_index(pool, index + stride);
1070 slots_checked += stride;
1074 spin_unlock_irqrestore(&area->lock, flags);
1079 * If we find a slot that indicates we have 'nslots' number of
1080 * contiguous buffers, we allocate the buffers from that slot onwards
1081 * and set the list of free entries to '0' indicating unavailable.
1083 for (i = slot_index; i < slot_index + nslots; i++) {
1084 pool->slots[i].list = 0;
1085 pool->slots[i].alloc_size = alloc_size - (offset +
1086 ((i - slot_index) << IO_TLB_SHIFT));
1088 for (i = slot_index - 1;
1089 io_tlb_offset(i) != IO_TLB_SEGSIZE - 1 &&
1090 pool->slots[i].list; i--)
1091 pool->slots[i].list = ++count;
1094 * Update the indices to avoid searching in the next round.
1096 area->index = wrap_area_index(pool, index + nslots);
1097 area->used += nslots;
1098 spin_unlock_irqrestore(&area->lock, flags);
1100 inc_used_and_hiwater(dev->dma_io_tlb_mem, nslots);
1104 #ifdef CONFIG_SWIOTLB_DYNAMIC
1107 * swiotlb_search_area() - search one memory area in all pools
1108 * @dev: Device which maps the buffer.
1109 * @start_cpu: Start CPU number.
1110 * @cpu_offset: Offset from @start_cpu.
1111 * @orig_addr: Original (non-bounced) IO buffer address.
1112 * @alloc_size: Total requested size of the bounce buffer,
1113 * including initial alignment padding.
1114 * @alloc_align_mask: Required alignment of the allocated buffer.
1115 * @retpool: Used memory pool, updated on return.
1117 * Search one memory area in all pools for a sequence of slots that match the
1118 * allocation constraints.
1120 * Return: Index of the first allocated slot, or -1 on error.
1122 static int swiotlb_search_area(struct device *dev, int start_cpu,
1123 int cpu_offset, phys_addr_t orig_addr, size_t alloc_size,
1124 unsigned int alloc_align_mask, struct io_tlb_pool **retpool)
1126 struct io_tlb_mem *mem = dev->dma_io_tlb_mem;
1127 struct io_tlb_pool *pool;
1132 list_for_each_entry_rcu(pool, &mem->pools, node) {
1133 if (cpu_offset >= pool->nareas)
1135 area_index = (start_cpu + cpu_offset) & (pool->nareas - 1);
1136 index = swiotlb_search_pool_area(dev, pool, area_index,
1137 orig_addr, alloc_size,
1149 * swiotlb_find_slots() - search for slots in the whole swiotlb
1150 * @dev: Device which maps the buffer.
1151 * @orig_addr: Original (non-bounced) IO buffer address.
1152 * @alloc_size: Total requested size of the bounce buffer,
1153 * including initial alignment padding.
1154 * @alloc_align_mask: Required alignment of the allocated buffer.
1155 * @retpool: Used memory pool, updated on return.
1157 * Search through the whole software IO TLB to find a sequence of slots that
1158 * match the allocation constraints.
1160 * Return: Index of the first allocated slot, or -1 on error.
1162 static int swiotlb_find_slots(struct device *dev, phys_addr_t orig_addr,
1163 size_t alloc_size, unsigned int alloc_align_mask,
1164 struct io_tlb_pool **retpool)
1166 struct io_tlb_mem *mem = dev->dma_io_tlb_mem;
1167 struct io_tlb_pool *pool;
1168 unsigned long nslabs;
1169 unsigned long flags;
1174 if (alloc_size > IO_TLB_SEGSIZE * IO_TLB_SIZE)
1177 cpu = raw_smp_processor_id();
1178 for (i = 0; i < default_nareas; ++i) {
1179 index = swiotlb_search_area(dev, cpu, i, orig_addr, alloc_size,
1180 alloc_align_mask, &pool);
1188 schedule_work(&mem->dyn_alloc);
1190 nslabs = nr_slots(alloc_size);
1191 phys_limit = min_not_zero(*dev->dma_mask, dev->bus_dma_limit);
1192 pool = swiotlb_alloc_pool(dev, nslabs, nslabs, 1, phys_limit,
1193 GFP_NOWAIT | __GFP_NOWARN);
1197 index = swiotlb_search_pool_area(dev, pool, 0, orig_addr,
1198 alloc_size, alloc_align_mask);
1200 swiotlb_dyn_free(&pool->rcu);
1204 pool->transient = true;
1205 spin_lock_irqsave(&dev->dma_io_tlb_lock, flags);
1206 list_add_rcu(&pool->node, &dev->dma_io_tlb_pools);
1207 spin_unlock_irqrestore(&dev->dma_io_tlb_lock, flags);
1208 inc_transient_used(mem, pool->nslabs);
1211 WRITE_ONCE(dev->dma_uses_io_tlb, true);
1214 * The general barrier orders reads and writes against a presumed store
1215 * of the SWIOTLB buffer address by a device driver (to a driver private
1216 * data structure). It serves two purposes.
1218 * First, the store to dev->dma_uses_io_tlb must be ordered before the
1219 * presumed store. This guarantees that the returned buffer address
1220 * cannot be passed to another CPU before updating dev->dma_uses_io_tlb.
1222 * Second, the load from mem->pools must be ordered before the same
1223 * presumed store. This guarantees that the returned buffer address
1224 * cannot be observed by another CPU before an update of the RCU list
1225 * that was made by swiotlb_dyn_alloc() on a third CPU (cf. multicopy
1228 * See also the comment in is_swiotlb_buffer().
1236 #else /* !CONFIG_SWIOTLB_DYNAMIC */
1238 static int swiotlb_find_slots(struct device *dev, phys_addr_t orig_addr,
1239 size_t alloc_size, unsigned int alloc_align_mask,
1240 struct io_tlb_pool **retpool)
1242 struct io_tlb_pool *pool;
1246 *retpool = pool = &dev->dma_io_tlb_mem->defpool;
1247 i = start = raw_smp_processor_id() & (pool->nareas - 1);
1249 index = swiotlb_search_pool_area(dev, pool, i, orig_addr,
1250 alloc_size, alloc_align_mask);
1253 if (++i >= pool->nareas)
1255 } while (i != start);
1259 #endif /* CONFIG_SWIOTLB_DYNAMIC */
1261 #ifdef CONFIG_DEBUG_FS
1264 * mem_used() - get number of used slots in an allocator
1265 * @mem: Software IO TLB allocator.
1267 * The result is accurate in this version of the function, because an atomic
1268 * counter is available if CONFIG_DEBUG_FS is set.
1270 * Return: Number of used slots.
1272 static unsigned long mem_used(struct io_tlb_mem *mem)
1274 return atomic_long_read(&mem->total_used);
1277 #else /* !CONFIG_DEBUG_FS */
1280 * mem_pool_used() - get number of used slots in a memory pool
1281 * @pool: Software IO TLB memory pool.
1283 * The result is not accurate, see mem_used().
1285 * Return: Approximate number of used slots.
1287 static unsigned long mem_pool_used(struct io_tlb_pool *pool)
1290 unsigned long used = 0;
1292 for (i = 0; i < pool->nareas; i++)
1293 used += pool->areas[i].used;
1298 * mem_used() - get number of used slots in an allocator
1299 * @mem: Software IO TLB allocator.
1301 * The result is not accurate, because there is no locking of individual
1304 * Return: Approximate number of used slots.
1306 static unsigned long mem_used(struct io_tlb_mem *mem)
1308 #ifdef CONFIG_SWIOTLB_DYNAMIC
1309 struct io_tlb_pool *pool;
1310 unsigned long used = 0;
1313 list_for_each_entry_rcu(pool, &mem->pools, node)
1314 used += mem_pool_used(pool);
1319 return mem_pool_used(&mem->defpool);
1323 #endif /* CONFIG_DEBUG_FS */
1325 phys_addr_t swiotlb_tbl_map_single(struct device *dev, phys_addr_t orig_addr,
1326 size_t mapping_size, size_t alloc_size,
1327 unsigned int alloc_align_mask, enum dma_data_direction dir,
1328 unsigned long attrs)
1330 struct io_tlb_mem *mem = dev->dma_io_tlb_mem;
1331 unsigned int offset = swiotlb_align_offset(dev, orig_addr);
1332 struct io_tlb_pool *pool;
1335 phys_addr_t tlb_addr;
1337 if (!mem || !mem->nslabs) {
1338 dev_warn_ratelimited(dev,
1339 "Can not allocate SWIOTLB buffer earlier and can't now provide you with the DMA bounce buffer");
1340 return (phys_addr_t)DMA_MAPPING_ERROR;
1343 if (cc_platform_has(CC_ATTR_MEM_ENCRYPT))
1344 pr_warn_once("Memory encryption is active and system is using DMA bounce buffers\n");
1346 if (mapping_size > alloc_size) {
1347 dev_warn_once(dev, "Invalid sizes (mapping: %zd bytes, alloc: %zd bytes)",
1348 mapping_size, alloc_size);
1349 return (phys_addr_t)DMA_MAPPING_ERROR;
1352 index = swiotlb_find_slots(dev, orig_addr,
1353 alloc_size + offset, alloc_align_mask, &pool);
1355 if (!(attrs & DMA_ATTR_NO_WARN))
1356 dev_warn_ratelimited(dev,
1357 "swiotlb buffer is full (sz: %zd bytes), total %lu (slots), used %lu (slots)\n",
1358 alloc_size, mem->nslabs, mem_used(mem));
1359 return (phys_addr_t)DMA_MAPPING_ERROR;
1363 * Save away the mapping from the original address to the DMA address.
1364 * This is needed when we sync the memory. Then we sync the buffer if
1367 for (i = 0; i < nr_slots(alloc_size + offset); i++)
1368 pool->slots[index + i].orig_addr = slot_addr(orig_addr, i);
1369 tlb_addr = slot_addr(pool->start, index) + offset;
1371 * When the device is writing memory, i.e. dir == DMA_FROM_DEVICE, copy
1372 * the original buffer to the TLB buffer before initiating DMA in order
1373 * to preserve the original's data if the device does a partial write,
1374 * i.e. if the device doesn't overwrite the entire buffer. Preserving
1375 * the original data, even if it's garbage, is necessary to match
1376 * hardware behavior. Use of swiotlb is supposed to be transparent,
1377 * i.e. swiotlb must not corrupt memory by clobbering unwritten bytes.
1379 swiotlb_bounce(dev, tlb_addr, mapping_size, DMA_TO_DEVICE);
1383 static void swiotlb_release_slots(struct device *dev, phys_addr_t tlb_addr)
1385 struct io_tlb_pool *mem = swiotlb_find_pool(dev, tlb_addr);
1386 unsigned long flags;
1387 unsigned int offset = swiotlb_align_offset(dev, tlb_addr);
1388 int index = (tlb_addr - offset - mem->start) >> IO_TLB_SHIFT;
1389 int nslots = nr_slots(mem->slots[index].alloc_size + offset);
1390 int aindex = index / mem->area_nslabs;
1391 struct io_tlb_area *area = &mem->areas[aindex];
1395 * Return the buffer to the free list by setting the corresponding
1396 * entries to indicate the number of contiguous entries available.
1397 * While returning the entries to the free list, we merge the entries
1398 * with slots below and above the pool being returned.
1400 BUG_ON(aindex >= mem->nareas);
1402 spin_lock_irqsave(&area->lock, flags);
1403 if (index + nslots < ALIGN(index + 1, IO_TLB_SEGSIZE))
1404 count = mem->slots[index + nslots].list;
1409 * Step 1: return the slots to the free list, merging the slots with
1410 * superceeding slots
1412 for (i = index + nslots - 1; i >= index; i--) {
1413 mem->slots[i].list = ++count;
1414 mem->slots[i].orig_addr = INVALID_PHYS_ADDR;
1415 mem->slots[i].alloc_size = 0;
1419 * Step 2: merge the returned slots with the preceding slots, if
1420 * available (non zero)
1423 io_tlb_offset(i) != IO_TLB_SEGSIZE - 1 && mem->slots[i].list;
1425 mem->slots[i].list = ++count;
1426 area->used -= nslots;
1427 spin_unlock_irqrestore(&area->lock, flags);
1429 dec_used(dev->dma_io_tlb_mem, nslots);
1432 #ifdef CONFIG_SWIOTLB_DYNAMIC
1435 * swiotlb_del_transient() - delete a transient memory pool
1436 * @dev: Device which mapped the buffer.
1437 * @tlb_addr: Physical address within a bounce buffer.
1439 * Check whether the address belongs to a transient SWIOTLB memory pool.
1440 * If yes, then delete the pool.
1442 * Return: %true if @tlb_addr belonged to a transient pool that was released.
1444 static bool swiotlb_del_transient(struct device *dev, phys_addr_t tlb_addr)
1446 struct io_tlb_pool *pool;
1448 pool = swiotlb_find_pool(dev, tlb_addr);
1449 if (!pool->transient)
1452 dec_used(dev->dma_io_tlb_mem, pool->nslabs);
1453 swiotlb_del_pool(dev, pool);
1454 dec_transient_used(dev->dma_io_tlb_mem, pool->nslabs);
1458 #else /* !CONFIG_SWIOTLB_DYNAMIC */
1460 static inline bool swiotlb_del_transient(struct device *dev,
1461 phys_addr_t tlb_addr)
1466 #endif /* CONFIG_SWIOTLB_DYNAMIC */
1469 * tlb_addr is the physical address of the bounce buffer to unmap.
1471 void swiotlb_tbl_unmap_single(struct device *dev, phys_addr_t tlb_addr,
1472 size_t mapping_size, enum dma_data_direction dir,
1473 unsigned long attrs)
1476 * First, sync the memory before unmapping the entry
1478 if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC) &&
1479 (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL))
1480 swiotlb_bounce(dev, tlb_addr, mapping_size, DMA_FROM_DEVICE);
1482 if (swiotlb_del_transient(dev, tlb_addr))
1484 swiotlb_release_slots(dev, tlb_addr);
1487 void swiotlb_sync_single_for_device(struct device *dev, phys_addr_t tlb_addr,
1488 size_t size, enum dma_data_direction dir)
1490 if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL)
1491 swiotlb_bounce(dev, tlb_addr, size, DMA_TO_DEVICE);
1493 BUG_ON(dir != DMA_FROM_DEVICE);
1496 void swiotlb_sync_single_for_cpu(struct device *dev, phys_addr_t tlb_addr,
1497 size_t size, enum dma_data_direction dir)
1499 if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL)
1500 swiotlb_bounce(dev, tlb_addr, size, DMA_FROM_DEVICE);
1502 BUG_ON(dir != DMA_TO_DEVICE);
1506 * Create a swiotlb mapping for the buffer at @paddr, and in case of DMAing
1507 * to the device copy the data into it as well.
1509 dma_addr_t swiotlb_map(struct device *dev, phys_addr_t paddr, size_t size,
1510 enum dma_data_direction dir, unsigned long attrs)
1512 phys_addr_t swiotlb_addr;
1513 dma_addr_t dma_addr;
1515 trace_swiotlb_bounced(dev, phys_to_dma(dev, paddr), size);
1517 swiotlb_addr = swiotlb_tbl_map_single(dev, paddr, size, size, 0, dir,
1519 if (swiotlb_addr == (phys_addr_t)DMA_MAPPING_ERROR)
1520 return DMA_MAPPING_ERROR;
1522 /* Ensure that the address returned is DMA'ble */
1523 dma_addr = phys_to_dma_unencrypted(dev, swiotlb_addr);
1524 if (unlikely(!dma_capable(dev, dma_addr, size, true))) {
1525 swiotlb_tbl_unmap_single(dev, swiotlb_addr, size, dir,
1526 attrs | DMA_ATTR_SKIP_CPU_SYNC);
1527 dev_WARN_ONCE(dev, 1,
1528 "swiotlb addr %pad+%zu overflow (mask %llx, bus limit %llx).\n",
1529 &dma_addr, size, *dev->dma_mask, dev->bus_dma_limit);
1530 return DMA_MAPPING_ERROR;
1533 if (!dev_is_dma_coherent(dev) && !(attrs & DMA_ATTR_SKIP_CPU_SYNC))
1534 arch_sync_dma_for_device(swiotlb_addr, size, dir);
1538 size_t swiotlb_max_mapping_size(struct device *dev)
1540 int min_align_mask = dma_get_min_align_mask(dev);
1544 * swiotlb_find_slots() skips slots according to
1545 * min align mask. This affects max mapping size.
1546 * Take it into acount here.
1549 min_align = roundup(min_align_mask, IO_TLB_SIZE);
1551 return ((size_t)IO_TLB_SIZE) * IO_TLB_SEGSIZE - min_align;
1555 * is_swiotlb_allocated() - check if the default software IO TLB is initialized
1557 bool is_swiotlb_allocated(void)
1559 return io_tlb_default_mem.nslabs;
1562 bool is_swiotlb_active(struct device *dev)
1564 struct io_tlb_mem *mem = dev->dma_io_tlb_mem;
1566 return mem && mem->nslabs;
1570 * default_swiotlb_base() - get the base address of the default SWIOTLB
1572 * Get the lowest physical address used by the default software IO TLB pool.
1574 phys_addr_t default_swiotlb_base(void)
1576 #ifdef CONFIG_SWIOTLB_DYNAMIC
1577 io_tlb_default_mem.can_grow = false;
1579 return io_tlb_default_mem.defpool.start;
1583 * default_swiotlb_limit() - get the address limit of the default SWIOTLB
1585 * Get the highest physical address used by the default software IO TLB pool.
1587 phys_addr_t default_swiotlb_limit(void)
1589 #ifdef CONFIG_SWIOTLB_DYNAMIC
1590 return io_tlb_default_mem.phys_limit;
1592 return io_tlb_default_mem.defpool.end - 1;
1596 #ifdef CONFIG_DEBUG_FS
1597 #ifdef CONFIG_SWIOTLB_DYNAMIC
1598 static unsigned long mem_transient_used(struct io_tlb_mem *mem)
1600 return atomic_long_read(&mem->transient_nslabs);
1603 static int io_tlb_transient_used_get(void *data, u64 *val)
1605 struct io_tlb_mem *mem = data;
1607 *val = mem_transient_used(mem);
1611 DEFINE_DEBUGFS_ATTRIBUTE(fops_io_tlb_transient_used, io_tlb_transient_used_get,
1613 #endif /* CONFIG_SWIOTLB_DYNAMIC */
1615 static int io_tlb_used_get(void *data, u64 *val)
1617 struct io_tlb_mem *mem = data;
1619 *val = mem_used(mem);
1623 static int io_tlb_hiwater_get(void *data, u64 *val)
1625 struct io_tlb_mem *mem = data;
1627 *val = atomic_long_read(&mem->used_hiwater);
1631 static int io_tlb_hiwater_set(void *data, u64 val)
1633 struct io_tlb_mem *mem = data;
1635 /* Only allow setting to zero */
1639 atomic_long_set(&mem->used_hiwater, val);
1643 DEFINE_DEBUGFS_ATTRIBUTE(fops_io_tlb_used, io_tlb_used_get, NULL, "%llu\n");
1644 DEFINE_DEBUGFS_ATTRIBUTE(fops_io_tlb_hiwater, io_tlb_hiwater_get,
1645 io_tlb_hiwater_set, "%llu\n");
1647 static void swiotlb_create_debugfs_files(struct io_tlb_mem *mem,
1648 const char *dirname)
1650 atomic_long_set(&mem->total_used, 0);
1651 atomic_long_set(&mem->used_hiwater, 0);
1653 mem->debugfs = debugfs_create_dir(dirname, io_tlb_default_mem.debugfs);
1657 debugfs_create_ulong("io_tlb_nslabs", 0400, mem->debugfs, &mem->nslabs);
1658 debugfs_create_file("io_tlb_used", 0400, mem->debugfs, mem,
1660 debugfs_create_file("io_tlb_used_hiwater", 0600, mem->debugfs, mem,
1661 &fops_io_tlb_hiwater);
1662 #ifdef CONFIG_SWIOTLB_DYNAMIC
1663 atomic_long_set(&mem->transient_nslabs, 0);
1664 debugfs_create_file("io_tlb_transient_nslabs", 0400, mem->debugfs,
1665 mem, &fops_io_tlb_transient_used);
1669 static int __init swiotlb_create_default_debugfs(void)
1671 swiotlb_create_debugfs_files(&io_tlb_default_mem, "swiotlb");
1675 late_initcall(swiotlb_create_default_debugfs);
1677 #else /* !CONFIG_DEBUG_FS */
1679 static inline void swiotlb_create_debugfs_files(struct io_tlb_mem *mem,
1680 const char *dirname)
1684 #endif /* CONFIG_DEBUG_FS */
1686 #ifdef CONFIG_DMA_RESTRICTED_POOL
1688 struct page *swiotlb_alloc(struct device *dev, size_t size)
1690 struct io_tlb_mem *mem = dev->dma_io_tlb_mem;
1691 struct io_tlb_pool *pool;
1692 phys_addr_t tlb_addr;
1699 align = (1 << (get_order(size) + PAGE_SHIFT)) - 1;
1700 index = swiotlb_find_slots(dev, 0, size, align, &pool);
1704 tlb_addr = slot_addr(pool->start, index);
1705 if (unlikely(!PAGE_ALIGNED(tlb_addr))) {
1706 dev_WARN_ONCE(dev, 1, "Cannot allocate pages from non page-aligned swiotlb addr 0x%pa.\n",
1708 swiotlb_release_slots(dev, tlb_addr);
1712 return pfn_to_page(PFN_DOWN(tlb_addr));
1715 bool swiotlb_free(struct device *dev, struct page *page, size_t size)
1717 phys_addr_t tlb_addr = page_to_phys(page);
1719 if (!is_swiotlb_buffer(dev, tlb_addr))
1722 swiotlb_release_slots(dev, tlb_addr);
1727 static int rmem_swiotlb_device_init(struct reserved_mem *rmem,
1730 struct io_tlb_mem *mem = rmem->priv;
1731 unsigned long nslabs = rmem->size >> IO_TLB_SHIFT;
1733 /* Set Per-device io tlb area to one */
1734 unsigned int nareas = 1;
1736 if (PageHighMem(pfn_to_page(PHYS_PFN(rmem->base)))) {
1737 dev_err(dev, "Restricted DMA pool must be accessible within the linear mapping.");
1742 * Since multiple devices can share the same pool, the private data,
1743 * io_tlb_mem struct, will be initialized by the first device attached
1747 struct io_tlb_pool *pool;
1749 mem = kzalloc(sizeof(*mem), GFP_KERNEL);
1752 pool = &mem->defpool;
1754 pool->slots = kcalloc(nslabs, sizeof(*pool->slots), GFP_KERNEL);
1760 pool->areas = kcalloc(nareas, sizeof(*pool->areas),
1768 set_memory_decrypted((unsigned long)phys_to_virt(rmem->base),
1769 rmem->size >> PAGE_SHIFT);
1770 swiotlb_init_io_tlb_pool(pool, rmem->base, nslabs,
1772 mem->force_bounce = true;
1773 mem->for_alloc = true;
1774 #ifdef CONFIG_SWIOTLB_DYNAMIC
1775 spin_lock_init(&mem->lock);
1777 add_mem_pool(mem, pool);
1781 swiotlb_create_debugfs_files(mem, rmem->name);
1784 dev->dma_io_tlb_mem = mem;
1789 static void rmem_swiotlb_device_release(struct reserved_mem *rmem,
1792 dev->dma_io_tlb_mem = &io_tlb_default_mem;
1795 static const struct reserved_mem_ops rmem_swiotlb_ops = {
1796 .device_init = rmem_swiotlb_device_init,
1797 .device_release = rmem_swiotlb_device_release,
1800 static int __init rmem_swiotlb_setup(struct reserved_mem *rmem)
1802 unsigned long node = rmem->fdt_node;
1804 if (of_get_flat_dt_prop(node, "reusable", NULL) ||
1805 of_get_flat_dt_prop(node, "linux,cma-default", NULL) ||
1806 of_get_flat_dt_prop(node, "linux,dma-default", NULL) ||
1807 of_get_flat_dt_prop(node, "no-map", NULL))
1810 rmem->ops = &rmem_swiotlb_ops;
1811 pr_info("Reserved memory: created restricted DMA pool at %pa, size %ld MiB\n",
1812 &rmem->base, (unsigned long)rmem->size / SZ_1M);
1816 RESERVEDMEM_OF_DECLARE(dma, "restricted-dma-pool", rmem_swiotlb_setup);
1817 #endif /* CONFIG_DMA_RESTRICTED_POOL */