1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
6 #include <linux/dma-noncoherent.h>
8 #include <asm/cacheflush.h>
11 * ARCH specific callbacks for generic noncoherent DMA ops (dma/noncoherent.c)
12 * - hardware IOC not available (or "dma-coherent" not set for device in DT)
13 * - But still handle both coherent and non-coherent requests from caller
15 * For DMA coherent hardware (IOC) generic code suffices
17 void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle,
18 gfp_t gfp, unsigned long attrs)
20 unsigned long order = get_order(size);
24 bool need_coh = !(attrs & DMA_ATTR_NON_CONSISTENT);
27 * __GFP_HIGHMEM flag is cleared by upper layer functions
28 * (in include/linux/dma-mapping.h) so we should never get a
31 BUG_ON(gfp & __GFP_HIGHMEM);
33 page = alloc_pages(gfp | __GFP_ZERO, order);
37 /* This is linear addr (0x8000_0000 based) */
38 paddr = page_to_phys(page);
43 * A coherent buffer needs MMU mapping to enforce non-cachability.
44 * kvaddr is kernel Virtual address (0x7000_0000 based).
47 kvaddr = ioremap_nocache(paddr, size);
49 __free_pages(page, order);
53 kvaddr = (void *)(u32)paddr;
57 * Evict any existing L1 and/or L2 lines for the backing page
58 * in case it was used earlier as a normal "cached" page.
59 * Yeah this bit us - STAR 9000898266
61 * Although core does call flush_cache_vmap(), it gets kvaddr hence
62 * can't be used to efficiently flush L1 and/or L2 which need paddr
63 * Currently flush_cache_vmap nukes the L1 cache completely which
64 * will be optimized as a separate commit
67 dma_cache_wback_inv(paddr, size);
72 void arch_dma_free(struct device *dev, size_t size, void *vaddr,
73 dma_addr_t dma_handle, unsigned long attrs)
75 phys_addr_t paddr = dma_handle;
76 struct page *page = virt_to_page(paddr);
78 if (!(attrs & DMA_ATTR_NON_CONSISTENT))
79 iounmap((void __force __iomem *)vaddr);
81 __free_pages(page, get_order(size));
84 long arch_dma_coherent_to_pfn(struct device *dev, void *cpu_addr,
87 return __phys_to_pfn(dma_addr);
91 * Cache operations depending on function and direction argument, inspired by
92 * https://lkml.org/lkml/2018/5/18/979
93 * "dma_sync_*_for_cpu and direction=TO_DEVICE (was Re: [PATCH 02/20]
94 * dma-mapping: provide a generic dma-noncoherent implementation)"
96 * | map == for_device | unmap == for_cpu
97 * |----------------------------------------------------------------
98 * TO_DEV | writeback writeback | none none
99 * FROM_DEV | invalidate invalidate | invalidate* invalidate*
100 * BIDIR | writeback+inv writeback+inv | invalidate invalidate
102 * [*] needed for CPU speculative prefetches
104 * NOTE: we don't check the validity of direction argument as it is done in
105 * upper layer functions (in include/linux/dma-mapping.h)
108 void arch_sync_dma_for_device(struct device *dev, phys_addr_t paddr,
109 size_t size, enum dma_data_direction dir)
113 dma_cache_wback(paddr, size);
116 case DMA_FROM_DEVICE:
117 dma_cache_inv(paddr, size);
120 case DMA_BIDIRECTIONAL:
121 dma_cache_wback_inv(paddr, size);
129 void arch_sync_dma_for_cpu(struct device *dev, phys_addr_t paddr,
130 size_t size, enum dma_data_direction dir)
136 /* FROM_DEVICE invalidate needed if speculative CPU prefetch only */
137 case DMA_FROM_DEVICE:
138 case DMA_BIDIRECTIONAL:
139 dma_cache_inv(paddr, size);
148 * Plug in direct dma map ops.
150 void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
151 const struct iommu_ops *iommu, bool coherent)
154 * IOC hardware snoops all DMA traffic keeping the caches consistent
155 * with memory - eliding need for any explicit cache maintenance of
158 if (is_isa_arcv2() && ioc_enable && coherent)
159 dev->dma_coherent = true;
161 dev_info(dev, "use %sncoherent DMA ops\n",
162 dev->dma_coherent ? "" : "non");