Commit | Line | Data |
---|---|---|
989d42e8 | 1 | // SPDX-License-Identifier: GPL-2.0 |
9ac7849e | 2 | /* |
cf65a0f6 | 3 | * arch-independent dma-mapping routines |
9ac7849e TH |
4 | * |
5 | * Copyright (c) 2006 SUSE Linux Products GmbH | |
6 | * Copyright (c) 2006 Tejun Heo <teheo@suse.de> | |
9ac7849e | 7 | */ |
05887cb6 | 8 | #include <linux/memblock.h> /* for max_pfn */ |
09515ef5 | 9 | #include <linux/acpi.h> |
356da6d0 | 10 | #include <linux/dma-direct.h> |
58b04406 | 11 | #include <linux/dma-noncoherent.h> |
1b6bc32f | 12 | #include <linux/export.h> |
5a0e3ad6 | 13 | #include <linux/gfp.h> |
09515ef5 | 14 | #include <linux/of_device.h> |
513510dd LA |
15 | #include <linux/slab.h> |
16 | #include <linux/vmalloc.h> | |
9ac7849e TH |
17 | |
18 | /* | |
19 | * Managed DMA API | |
20 | */ | |
21 | struct dma_devres { | |
22 | size_t size; | |
23 | void *vaddr; | |
24 | dma_addr_t dma_handle; | |
63d36c95 | 25 | unsigned long attrs; |
9ac7849e TH |
26 | }; |
27 | ||
63d36c95 | 28 | static void dmam_release(struct device *dev, void *res) |
9ac7849e TH |
29 | { |
30 | struct dma_devres *this = res; | |
31 | ||
63d36c95 CH |
32 | dma_free_attrs(dev, this->size, this->vaddr, this->dma_handle, |
33 | this->attrs); | |
9ac7849e TH |
34 | } |
35 | ||
36 | static int dmam_match(struct device *dev, void *res, void *match_data) | |
37 | { | |
38 | struct dma_devres *this = res, *match = match_data; | |
39 | ||
40 | if (this->vaddr == match->vaddr) { | |
41 | WARN_ON(this->size != match->size || | |
42 | this->dma_handle != match->dma_handle); | |
43 | return 1; | |
44 | } | |
45 | return 0; | |
46 | } | |
47 | ||
9ac7849e TH |
48 | /** |
49 | * dmam_free_coherent - Managed dma_free_coherent() | |
50 | * @dev: Device to free coherent memory for | |
51 | * @size: Size of allocation | |
52 | * @vaddr: Virtual address of the memory to free | |
53 | * @dma_handle: DMA handle of the memory to free | |
54 | * | |
55 | * Managed dma_free_coherent(). | |
56 | */ | |
57 | void dmam_free_coherent(struct device *dev, size_t size, void *vaddr, | |
58 | dma_addr_t dma_handle) | |
59 | { | |
60 | struct dma_devres match_data = { size, vaddr, dma_handle }; | |
61 | ||
62 | dma_free_coherent(dev, size, vaddr, dma_handle); | |
63d36c95 | 63 | WARN_ON(devres_destroy(dev, dmam_release, dmam_match, &match_data)); |
9ac7849e TH |
64 | } |
65 | EXPORT_SYMBOL(dmam_free_coherent); | |
66 | ||
67 | /** | |
63d36c95 | 68 | * dmam_alloc_attrs - Managed dma_alloc_attrs() |
9ac7849e TH |
69 | * @dev: Device to allocate non_coherent memory for |
70 | * @size: Size of allocation | |
71 | * @dma_handle: Out argument for allocated DMA handle | |
72 | * @gfp: Allocation flags | |
63d36c95 | 73 | * @attrs: Flags in the DMA_ATTR_* namespace. |
9ac7849e | 74 | * |
63d36c95 CH |
75 | * Managed dma_alloc_attrs(). Memory allocated using this function will be |
76 | * automatically released on driver detach. | |
9ac7849e TH |
77 | * |
78 | * RETURNS: | |
79 | * Pointer to allocated memory on success, NULL on failure. | |
80 | */ | |
63d36c95 CH |
81 | void *dmam_alloc_attrs(struct device *dev, size_t size, dma_addr_t *dma_handle, |
82 | gfp_t gfp, unsigned long attrs) | |
9ac7849e TH |
83 | { |
84 | struct dma_devres *dr; | |
85 | void *vaddr; | |
86 | ||
63d36c95 | 87 | dr = devres_alloc(dmam_release, sizeof(*dr), gfp); |
9ac7849e TH |
88 | if (!dr) |
89 | return NULL; | |
90 | ||
63d36c95 | 91 | vaddr = dma_alloc_attrs(dev, size, dma_handle, gfp, attrs); |
9ac7849e TH |
92 | if (!vaddr) { |
93 | devres_free(dr); | |
94 | return NULL; | |
95 | } | |
96 | ||
97 | dr->vaddr = vaddr; | |
98 | dr->dma_handle = *dma_handle; | |
99 | dr->size = size; | |
63d36c95 | 100 | dr->attrs = attrs; |
9ac7849e TH |
101 | |
102 | devres_add(dev, dr); | |
103 | ||
104 | return vaddr; | |
105 | } | |
63d36c95 | 106 | EXPORT_SYMBOL(dmam_alloc_attrs); |
9ac7849e | 107 | |
d2b7428e MS |
108 | /* |
109 | * Create scatter-list for the already allocated DMA buffer. | |
110 | */ | |
111 | int dma_common_get_sgtable(struct device *dev, struct sg_table *sgt, | |
9406a49f CH |
112 | void *cpu_addr, dma_addr_t dma_addr, size_t size, |
113 | unsigned long attrs) | |
d2b7428e | 114 | { |
9406a49f | 115 | struct page *page; |
d2b7428e MS |
116 | int ret; |
117 | ||
9406a49f | 118 | if (!dev_is_dma_coherent(dev)) { |
66d7780f CH |
119 | unsigned long pfn; |
120 | ||
9406a49f CH |
121 | if (!IS_ENABLED(CONFIG_ARCH_HAS_DMA_COHERENT_TO_PFN)) |
122 | return -ENXIO; | |
d2b7428e | 123 | |
66d7780f CH |
124 | /* If the PFN is not valid, we do not have a struct page */ |
125 | pfn = arch_dma_coherent_to_pfn(dev, cpu_addr, dma_addr); | |
126 | if (!pfn_valid(pfn)) | |
127 | return -ENXIO; | |
128 | page = pfn_to_page(pfn); | |
9406a49f CH |
129 | } else { |
130 | page = virt_to_page(cpu_addr); | |
131 | } | |
132 | ||
133 | ret = sg_alloc_table(sgt, 1, GFP_KERNEL); | |
134 | if (!ret) | |
135 | sg_set_page(sgt->sgl, page, PAGE_ALIGN(size), 0); | |
136 | return ret; | |
d2b7428e | 137 | } |
7249c1a5 CH |
138 | |
139 | int dma_get_sgtable_attrs(struct device *dev, struct sg_table *sgt, | |
140 | void *cpu_addr, dma_addr_t dma_addr, size_t size, | |
141 | unsigned long attrs) | |
142 | { | |
143 | const struct dma_map_ops *ops = get_dma_ops(dev); | |
356da6d0 CH |
144 | |
145 | if (!dma_is_direct(ops) && ops->get_sgtable) | |
7249c1a5 CH |
146 | return ops->get_sgtable(dev, sgt, cpu_addr, dma_addr, size, |
147 | attrs); | |
148 | return dma_common_get_sgtable(dev, sgt, cpu_addr, dma_addr, size, | |
149 | attrs); | |
150 | } | |
151 | EXPORT_SYMBOL(dma_get_sgtable_attrs); | |
d2b7428e | 152 | |
33dcb37c CH |
153 | #ifdef CONFIG_MMU |
154 | /* | |
155 | * Return the page attributes used for mapping dma_alloc_* memory, either in | |
156 | * kernel space if remapping is needed, or to userspace through dma_mmap_*. | |
157 | */ | |
158 | pgprot_t dma_pgprot(struct device *dev, pgprot_t prot, unsigned long attrs) | |
159 | { | |
160 | if (dev_is_dma_coherent(dev) || | |
161 | (IS_ENABLED(CONFIG_DMA_NONCOHERENT_CACHE_SYNC) && | |
162 | (attrs & DMA_ATTR_NON_CONSISTENT))) | |
163 | return prot; | |
164 | if (IS_ENABLED(CONFIG_ARCH_HAS_DMA_MMAP_PGPROT)) | |
165 | return arch_dma_mmap_pgprot(dev, prot, attrs); | |
166 | return pgprot_noncached(prot); | |
167 | } | |
168 | #endif /* CONFIG_MMU */ | |
169 | ||
64ccc9c0 MS |
170 | /* |
171 | * Create userspace mapping for the DMA-coherent memory. | |
172 | */ | |
173 | int dma_common_mmap(struct device *dev, struct vm_area_struct *vma, | |
58b04406 CH |
174 | void *cpu_addr, dma_addr_t dma_addr, size_t size, |
175 | unsigned long attrs) | |
64ccc9c0 | 176 | { |
07c75d7a | 177 | #ifndef CONFIG_ARCH_NO_COHERENT_DMA_MMAP |
95da00e3 | 178 | unsigned long user_count = vma_pages(vma); |
64ccc9c0 | 179 | unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT; |
64ccc9c0 | 180 | unsigned long off = vma->vm_pgoff; |
58b04406 CH |
181 | unsigned long pfn; |
182 | int ret = -ENXIO; | |
64ccc9c0 | 183 | |
33dcb37c | 184 | vma->vm_page_prot = dma_pgprot(dev, vma->vm_page_prot, attrs); |
64ccc9c0 | 185 | |
43fc509c | 186 | if (dma_mmap_from_dev_coherent(dev, vma, cpu_addr, size, &ret)) |
64ccc9c0 MS |
187 | return ret; |
188 | ||
58b04406 CH |
189 | if (off >= count || user_count > count - off) |
190 | return -ENXIO; | |
191 | ||
192 | if (!dev_is_dma_coherent(dev)) { | |
193 | if (!IS_ENABLED(CONFIG_ARCH_HAS_DMA_COHERENT_TO_PFN)) | |
194 | return -ENXIO; | |
66d7780f CH |
195 | |
196 | /* If the PFN is not valid, we do not have a struct page */ | |
58b04406 | 197 | pfn = arch_dma_coherent_to_pfn(dev, cpu_addr, dma_addr); |
66d7780f CH |
198 | if (!pfn_valid(pfn)) |
199 | return -ENXIO; | |
58b04406 CH |
200 | } else { |
201 | pfn = page_to_pfn(virt_to_page(cpu_addr)); | |
202 | } | |
64ccc9c0 | 203 | |
58b04406 CH |
204 | return remap_pfn_range(vma, vma->vm_start, pfn + vma->vm_pgoff, |
205 | user_count << PAGE_SHIFT, vma->vm_page_prot); | |
206 | #else | |
207 | return -ENXIO; | |
208 | #endif /* !CONFIG_ARCH_NO_COHERENT_DMA_MMAP */ | |
64ccc9c0 | 209 | } |
7249c1a5 CH |
210 | |
211 | /** | |
212 | * dma_mmap_attrs - map a coherent DMA allocation into user space | |
213 | * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices | |
214 | * @vma: vm_area_struct describing requested user mapping | |
215 | * @cpu_addr: kernel CPU-view address returned from dma_alloc_attrs | |
216 | * @dma_addr: device-view address returned from dma_alloc_attrs | |
217 | * @size: size of memory originally requested in dma_alloc_attrs | |
218 | * @attrs: attributes of mapping properties requested in dma_alloc_attrs | |
219 | * | |
220 | * Map a coherent DMA buffer previously allocated by dma_alloc_attrs into user | |
221 | * space. The coherent DMA buffer must not be freed by the driver until the | |
222 | * user space mapping has been released. | |
223 | */ | |
224 | int dma_mmap_attrs(struct device *dev, struct vm_area_struct *vma, | |
225 | void *cpu_addr, dma_addr_t dma_addr, size_t size, | |
226 | unsigned long attrs) | |
227 | { | |
228 | const struct dma_map_ops *ops = get_dma_ops(dev); | |
356da6d0 CH |
229 | |
230 | if (!dma_is_direct(ops) && ops->mmap) | |
7249c1a5 CH |
231 | return ops->mmap(dev, vma, cpu_addr, dma_addr, size, attrs); |
232 | return dma_common_mmap(dev, vma, cpu_addr, dma_addr, size, attrs); | |
233 | } | |
234 | EXPORT_SYMBOL(dma_mmap_attrs); | |
05887cb6 | 235 | |
05887cb6 CH |
236 | static u64 dma_default_get_required_mask(struct device *dev) |
237 | { | |
238 | u32 low_totalram = ((max_pfn - 1) << PAGE_SHIFT); | |
239 | u32 high_totalram = ((max_pfn - 1) >> (32 - PAGE_SHIFT)); | |
240 | u64 mask; | |
241 | ||
242 | if (!high_totalram) { | |
243 | /* convert to mask just covering totalram */ | |
244 | low_totalram = (1 << (fls(low_totalram) - 1)); | |
245 | low_totalram += low_totalram - 1; | |
246 | mask = low_totalram; | |
247 | } else { | |
248 | high_totalram = (1 << (fls(high_totalram) - 1)); | |
249 | high_totalram += high_totalram - 1; | |
250 | mask = (((u64)high_totalram) << 32) + 0xffffffff; | |
251 | } | |
252 | return mask; | |
253 | } | |
254 | ||
255 | u64 dma_get_required_mask(struct device *dev) | |
256 | { | |
257 | const struct dma_map_ops *ops = get_dma_ops(dev); | |
258 | ||
356da6d0 CH |
259 | if (dma_is_direct(ops)) |
260 | return dma_direct_get_required_mask(dev); | |
05887cb6 CH |
261 | if (ops->get_required_mask) |
262 | return ops->get_required_mask(dev); | |
263 | return dma_default_get_required_mask(dev); | |
264 | } | |
265 | EXPORT_SYMBOL_GPL(dma_get_required_mask); | |
05887cb6 | 266 | |
7249c1a5 CH |
267 | void *dma_alloc_attrs(struct device *dev, size_t size, dma_addr_t *dma_handle, |
268 | gfp_t flag, unsigned long attrs) | |
269 | { | |
270 | const struct dma_map_ops *ops = get_dma_ops(dev); | |
271 | void *cpu_addr; | |
272 | ||
148a97d5 | 273 | WARN_ON_ONCE(!dev->coherent_dma_mask); |
7249c1a5 CH |
274 | |
275 | if (dma_alloc_from_dev_coherent(dev, size, dma_handle, &cpu_addr)) | |
276 | return cpu_addr; | |
277 | ||
278 | /* let the implementation decide on the zone to allocate from: */ | |
279 | flag &= ~(__GFP_DMA | __GFP_DMA32 | __GFP_HIGHMEM); | |
280 | ||
356da6d0 CH |
281 | if (dma_is_direct(ops)) |
282 | cpu_addr = dma_direct_alloc(dev, size, dma_handle, flag, attrs); | |
283 | else if (ops->alloc) | |
284 | cpu_addr = ops->alloc(dev, size, dma_handle, flag, attrs); | |
285 | else | |
7249c1a5 CH |
286 | return NULL; |
287 | ||
7249c1a5 CH |
288 | debug_dma_alloc_coherent(dev, size, *dma_handle, cpu_addr); |
289 | return cpu_addr; | |
290 | } | |
291 | EXPORT_SYMBOL(dma_alloc_attrs); | |
292 | ||
293 | void dma_free_attrs(struct device *dev, size_t size, void *cpu_addr, | |
294 | dma_addr_t dma_handle, unsigned long attrs) | |
295 | { | |
296 | const struct dma_map_ops *ops = get_dma_ops(dev); | |
297 | ||
7249c1a5 CH |
298 | if (dma_release_from_dev_coherent(dev, get_order(size), cpu_addr)) |
299 | return; | |
300 | /* | |
301 | * On non-coherent platforms which implement DMA-coherent buffers via | |
302 | * non-cacheable remaps, ops->free() may call vunmap(). Thus getting | |
303 | * this far in IRQ context is a) at risk of a BUG_ON() or trying to | |
304 | * sleep on some machines, and b) an indication that the driver is | |
305 | * probably misusing the coherent API anyway. | |
306 | */ | |
307 | WARN_ON(irqs_disabled()); | |
308 | ||
356da6d0 | 309 | if (!cpu_addr) |
7249c1a5 CH |
310 | return; |
311 | ||
312 | debug_dma_free_coherent(dev, size, cpu_addr, dma_handle); | |
356da6d0 CH |
313 | if (dma_is_direct(ops)) |
314 | dma_direct_free(dev, size, cpu_addr, dma_handle, attrs); | |
315 | else if (ops->free) | |
316 | ops->free(dev, size, cpu_addr, dma_handle, attrs); | |
7249c1a5 CH |
317 | } |
318 | EXPORT_SYMBOL(dma_free_attrs); | |
319 | ||
320 | static inline void dma_check_mask(struct device *dev, u64 mask) | |
321 | { | |
322 | if (sme_active() && (mask < (((u64)sme_get_me_mask() << 1) - 1))) | |
323 | dev_warn(dev, "SME is active, device will require DMA bounce buffers\n"); | |
324 | } | |
325 | ||
326 | int dma_supported(struct device *dev, u64 mask) | |
327 | { | |
328 | const struct dma_map_ops *ops = get_dma_ops(dev); | |
329 | ||
356da6d0 CH |
330 | if (dma_is_direct(ops)) |
331 | return dma_direct_supported(dev, mask); | |
8b1cce9f | 332 | if (!ops->dma_supported) |
7249c1a5 CH |
333 | return 1; |
334 | return ops->dma_supported(dev, mask); | |
335 | } | |
336 | EXPORT_SYMBOL(dma_supported); | |
337 | ||
11ddce15 CH |
338 | #ifdef CONFIG_ARCH_HAS_DMA_SET_MASK |
339 | void arch_dma_set_mask(struct device *dev, u64 mask); | |
340 | #else | |
341 | #define arch_dma_set_mask(dev, mask) do { } while (0) | |
342 | #endif | |
343 | ||
7249c1a5 CH |
344 | int dma_set_mask(struct device *dev, u64 mask) |
345 | { | |
4a54d16f CH |
346 | /* |
347 | * Truncate the mask to the actually supported dma_addr_t width to | |
348 | * avoid generating unsupportable addresses. | |
349 | */ | |
350 | mask = (dma_addr_t)mask; | |
351 | ||
7249c1a5 CH |
352 | if (!dev->dma_mask || !dma_supported(dev, mask)) |
353 | return -EIO; | |
354 | ||
11ddce15 | 355 | arch_dma_set_mask(dev, mask); |
7249c1a5 CH |
356 | dma_check_mask(dev, mask); |
357 | *dev->dma_mask = mask; | |
358 | return 0; | |
359 | } | |
360 | EXPORT_SYMBOL(dma_set_mask); | |
7249c1a5 CH |
361 | |
362 | #ifndef CONFIG_ARCH_HAS_DMA_SET_COHERENT_MASK | |
363 | int dma_set_coherent_mask(struct device *dev, u64 mask) | |
364 | { | |
4a54d16f CH |
365 | /* |
366 | * Truncate the mask to the actually supported dma_addr_t width to | |
367 | * avoid generating unsupportable addresses. | |
368 | */ | |
369 | mask = (dma_addr_t)mask; | |
370 | ||
7249c1a5 CH |
371 | if (!dma_supported(dev, mask)) |
372 | return -EIO; | |
373 | ||
374 | dma_check_mask(dev, mask); | |
375 | dev->coherent_dma_mask = mask; | |
376 | return 0; | |
377 | } | |
378 | EXPORT_SYMBOL(dma_set_coherent_mask); | |
379 | #endif | |
8ddbe594 CH |
380 | |
381 | void dma_cache_sync(struct device *dev, void *vaddr, size_t size, | |
382 | enum dma_data_direction dir) | |
383 | { | |
384 | const struct dma_map_ops *ops = get_dma_ops(dev); | |
385 | ||
386 | BUG_ON(!valid_dma_direction(dir)); | |
356da6d0 CH |
387 | |
388 | if (dma_is_direct(ops)) | |
389 | arch_dma_cache_sync(dev, vaddr, size, dir); | |
390 | else if (ops->cache_sync) | |
8ddbe594 CH |
391 | ops->cache_sync(dev, vaddr, size, dir); |
392 | } | |
393 | EXPORT_SYMBOL(dma_cache_sync); | |
133d624b JR |
394 | |
395 | size_t dma_max_mapping_size(struct device *dev) | |
396 | { | |
397 | const struct dma_map_ops *ops = get_dma_ops(dev); | |
398 | size_t size = SIZE_MAX; | |
399 | ||
400 | if (dma_is_direct(ops)) | |
401 | size = dma_direct_max_mapping_size(dev); | |
402 | else if (ops && ops->max_mapping_size) | |
403 | size = ops->max_mapping_size(dev); | |
404 | ||
405 | return size; | |
406 | } | |
407 | EXPORT_SYMBOL_GPL(dma_max_mapping_size); |