Commit | Line | Data |
---|---|---|
b2441318 | 1 | // SPDX-License-Identifier: GPL-2.0 |
ee7e5516 DB |
2 | /* |
3 | * Coherent per-device memory handling. | |
4 | * Borrowed from i386 | |
5 | */ | |
6b03ae0d | 6 | #include <linux/io.h> |
5a0e3ad6 | 7 | #include <linux/slab.h> |
ee7e5516 | 8 | #include <linux/kernel.h> |
08a999ce | 9 | #include <linux/module.h> |
e0d07278 | 10 | #include <linux/dma-direct.h> |
0a0f0d8b | 11 | #include <linux/dma-map-ops.h> |
ee7e5516 DB |
12 | |
13 | struct dma_coherent_mem { | |
14 | void *virt_base; | |
ed1d218c | 15 | dma_addr_t device_base; |
88a984ba | 16 | unsigned long pfn_base; |
ee7e5516 | 17 | int size; |
ee7e5516 | 18 | unsigned long *bitmap; |
7bfa5ab6 | 19 | spinlock_t spinlock; |
c41f9ea9 | 20 | bool use_dev_dma_pfn_offset; |
ee7e5516 DB |
21 | }; |
22 | ||
93228b44 VM |
23 | static inline struct dma_coherent_mem *dev_get_coherent_memory(struct device *dev) |
24 | { | |
25 | if (dev && dev->dma_mem) | |
26 | return dev->dma_mem; | |
43fc509c | 27 | return NULL; |
93228b44 VM |
28 | } |
29 | ||
c41f9ea9 VM |
30 | static inline dma_addr_t dma_get_device_base(struct device *dev, |
31 | struct dma_coherent_mem * mem) | |
32 | { | |
33 | if (mem->use_dev_dma_pfn_offset) | |
e0d07278 JQ |
34 | return phys_to_dma(dev, PFN_PHYS(mem->pfn_base)); |
35 | return mem->device_base; | |
c41f9ea9 VM |
36 | } |
37 | ||
a6933571 CH |
38 | static struct dma_coherent_mem *dma_init_coherent_memory(phys_addr_t phys_addr, |
39 | dma_addr_t device_addr, size_t size, bool use_dma_pfn_offset) | |
ee7e5516 | 40 | { |
a6933571 | 41 | struct dma_coherent_mem *dma_mem; |
ee7e5516 | 42 | int pages = size >> PAGE_SHIFT; |
a6933571 | 43 | void *mem_base; |
ee7e5516 | 44 | |
a6933571 CH |
45 | if (!size) |
46 | return ERR_PTR(-EINVAL); | |
ee7e5516 | 47 | |
2436bdcd | 48 | mem_base = memremap(phys_addr, size, MEMREMAP_WC); |
a6933571 CH |
49 | if (!mem_base) |
50 | return ERR_PTR(-EINVAL); | |
51 | ||
7bfa5ab6 | 52 | dma_mem = kzalloc(sizeof(struct dma_coherent_mem), GFP_KERNEL); |
a6933571 CH |
53 | if (!dma_mem) |
54 | goto out_unmap_membase; | |
9fbd8dc1 | 55 | dma_mem->bitmap = bitmap_zalloc(pages, GFP_KERNEL); |
a6933571 CH |
56 | if (!dma_mem->bitmap) |
57 | goto out_free_dma_mem; | |
7bfa5ab6 MS |
58 | |
59 | dma_mem->virt_base = mem_base; | |
60 | dma_mem->device_base = device_addr; | |
61 | dma_mem->pfn_base = PFN_DOWN(phys_addr); | |
62 | dma_mem->size = pages; | |
a6933571 | 63 | dma_mem->use_dev_dma_pfn_offset = use_dma_pfn_offset; |
7bfa5ab6 | 64 | spin_lock_init(&dma_mem->spinlock); |
ee7e5516 | 65 | |
a6933571 | 66 | return dma_mem; |
ee7e5516 | 67 | |
a6933571 | 68 | out_free_dma_mem: |
7bfa5ab6 | 69 | kfree(dma_mem); |
a6933571 CH |
70 | out_unmap_membase: |
71 | memunmap(mem_base); | |
72 | pr_err("Reserved memory: failed to init DMA memory pool at %pa, size %zd MiB\n", | |
73 | &phys_addr, size / SZ_1M); | |
74 | return ERR_PTR(-ENOMEM); | |
ee7e5516 | 75 | } |
7bfa5ab6 MS |
76 | |
77 | static void dma_release_coherent_memory(struct dma_coherent_mem *mem) | |
78 | { | |
79 | if (!mem) | |
80 | return; | |
6b03ae0d | 81 | |
2436bdcd | 82 | memunmap(mem->virt_base); |
9fbd8dc1 | 83 | bitmap_free(mem->bitmap); |
7bfa5ab6 MS |
84 | kfree(mem); |
85 | } | |
86 | ||
87 | static int dma_assign_coherent_memory(struct device *dev, | |
88 | struct dma_coherent_mem *mem) | |
89 | { | |
93228b44 VM |
90 | if (!dev) |
91 | return -ENODEV; | |
92 | ||
7bfa5ab6 MS |
93 | if (dev->dma_mem) |
94 | return -EBUSY; | |
95 | ||
96 | dev->dma_mem = mem; | |
7bfa5ab6 MS |
97 | return 0; |
98 | } | |
99 | ||
a92df4f6 CH |
100 | /* |
101 | * Declare a region of memory to be handed out by dma_alloc_coherent() when it | |
102 | * is asked for coherent memory for this device. This shall only be used | |
103 | * from platform code, usually based on the device tree description. | |
bab16223 | 104 | * |
a92df4f6 CH |
105 | * phys_addr is the CPU physical address to which the memory is currently |
106 | * assigned (this will be ioremapped so the CPU can access the region). | |
107 | * | |
108 | * device_addr is the DMA address the device needs to be programmed with to | |
109 | * actually address this memory (this will be handed out as the dma_addr_t in | |
110 | * dma_alloc_coherent()). | |
111 | * | |
112 | * size is the size of the area (must be a multiple of PAGE_SIZE). | |
113 | * | |
114 | * As a simplification for the platforms, only *one* such region of memory may | |
115 | * be declared per device. | |
116 | */ | |
7bfa5ab6 | 117 | int dma_declare_coherent_memory(struct device *dev, phys_addr_t phys_addr, |
82c5de0a | 118 | dma_addr_t device_addr, size_t size) |
7bfa5ab6 MS |
119 | { |
120 | struct dma_coherent_mem *mem; | |
2436bdcd | 121 | int ret; |
7bfa5ab6 | 122 | |
a6933571 CH |
123 | mem = dma_init_coherent_memory(phys_addr, device_addr, size, false); |
124 | if (IS_ERR(mem)) | |
125 | return PTR_ERR(mem); | |
7bfa5ab6 | 126 | |
2436bdcd CH |
127 | ret = dma_assign_coherent_memory(dev, mem); |
128 | if (ret) | |
129 | dma_release_coherent_memory(mem); | |
130 | return ret; | |
7bfa5ab6 | 131 | } |
ee7e5516 | 132 | |
a445e940 VM |
133 | static void *__dma_alloc_from_coherent(struct device *dev, |
134 | struct dma_coherent_mem *mem, | |
135 | ssize_t size, dma_addr_t *dma_handle) | |
ee7e5516 | 136 | { |
ee7e5516 | 137 | int order = get_order(size); |
7bfa5ab6 | 138 | unsigned long flags; |
eccd83e1 | 139 | int pageno; |
43fc509c | 140 | void *ret; |
ee7e5516 | 141 | |
7bfa5ab6 | 142 | spin_lock_irqsave(&mem->spinlock, flags); |
0609697e | 143 | |
286c21de | 144 | if (unlikely(size > ((dma_addr_t)mem->size << PAGE_SHIFT))) |
0609697e | 145 | goto err; |
eccd83e1 AM |
146 | |
147 | pageno = bitmap_find_free_region(mem->bitmap, mem->size, order); | |
0609697e PM |
148 | if (unlikely(pageno < 0)) |
149 | goto err; | |
150 | ||
151 | /* | |
43fc509c | 152 | * Memory was found in the coherent area. |
0609697e | 153 | */ |
286c21de KG |
154 | *dma_handle = dma_get_device_base(dev, mem) + |
155 | ((dma_addr_t)pageno << PAGE_SHIFT); | |
156 | ret = mem->virt_base + ((dma_addr_t)pageno << PAGE_SHIFT); | |
dd01c75f | 157 | spin_unlock_irqrestore(&mem->spinlock, flags); |
2436bdcd | 158 | memset(ret, 0, size); |
43fc509c | 159 | return ret; |
0609697e | 160 | err: |
7bfa5ab6 | 161 | spin_unlock_irqrestore(&mem->spinlock, flags); |
43fc509c VM |
162 | return NULL; |
163 | } | |
164 | ||
165 | /** | |
166 | * dma_alloc_from_dev_coherent() - allocate memory from device coherent pool | |
167 | * @dev: device from which we allocate memory | |
168 | * @size: size of requested memory area | |
169 | * @dma_handle: This will be filled with the correct dma handle | |
170 | * @ret: This pointer will be filled with the virtual address | |
171 | * to allocated area. | |
172 | * | |
173 | * This function should be only called from per-arch dma_alloc_coherent() | |
174 | * to support allocation from per-device coherent memory pools. | |
175 | * | |
176 | * Returns 0 if dma_alloc_coherent should continue with allocating from | |
177 | * generic memory areas, or !0 if dma_alloc_coherent should return @ret. | |
178 | */ | |
179 | int dma_alloc_from_dev_coherent(struct device *dev, ssize_t size, | |
180 | dma_addr_t *dma_handle, void **ret) | |
181 | { | |
182 | struct dma_coherent_mem *mem = dev_get_coherent_memory(dev); | |
183 | ||
184 | if (!mem) | |
185 | return 0; | |
186 | ||
a445e940 | 187 | *ret = __dma_alloc_from_coherent(dev, mem, size, dma_handle); |
82c5de0a | 188 | return 1; |
ee7e5516 DB |
189 | } |
190 | ||
43fc509c VM |
191 | static int __dma_release_from_coherent(struct dma_coherent_mem *mem, |
192 | int order, void *vaddr) | |
193 | { | |
ee7e5516 | 194 | if (mem && vaddr >= mem->virt_base && vaddr < |
286c21de | 195 | (mem->virt_base + ((dma_addr_t)mem->size << PAGE_SHIFT))) { |
ee7e5516 | 196 | int page = (vaddr - mem->virt_base) >> PAGE_SHIFT; |
7bfa5ab6 | 197 | unsigned long flags; |
ee7e5516 | 198 | |
7bfa5ab6 | 199 | spin_lock_irqsave(&mem->spinlock, flags); |
ee7e5516 | 200 | bitmap_release_region(mem->bitmap, page, order); |
7bfa5ab6 | 201 | spin_unlock_irqrestore(&mem->spinlock, flags); |
ee7e5516 DB |
202 | return 1; |
203 | } | |
204 | return 0; | |
205 | } | |
bca0fa5f MS |
206 | |
207 | /** | |
43fc509c | 208 | * dma_release_from_dev_coherent() - free memory to device coherent memory pool |
bca0fa5f | 209 | * @dev: device from which the memory was allocated |
43fc509c VM |
210 | * @order: the order of pages allocated |
211 | * @vaddr: virtual address of allocated pages | |
bca0fa5f MS |
212 | * |
213 | * This checks whether the memory was allocated from the per-device | |
43fc509c | 214 | * coherent memory pool and if so, releases that memory. |
bca0fa5f | 215 | * |
43fc509c VM |
216 | * Returns 1 if we correctly released the memory, or 0 if the caller should |
217 | * proceed with releasing memory from generic pools. | |
bca0fa5f | 218 | */ |
43fc509c | 219 | int dma_release_from_dev_coherent(struct device *dev, int order, void *vaddr) |
bca0fa5f | 220 | { |
93228b44 | 221 | struct dma_coherent_mem *mem = dev_get_coherent_memory(dev); |
bca0fa5f | 222 | |
43fc509c VM |
223 | return __dma_release_from_coherent(mem, order, vaddr); |
224 | } | |
43fc509c | 225 | |
43fc509c VM |
226 | static int __dma_mmap_from_coherent(struct dma_coherent_mem *mem, |
227 | struct vm_area_struct *vma, void *vaddr, size_t size, int *ret) | |
228 | { | |
bca0fa5f | 229 | if (mem && vaddr >= mem->virt_base && vaddr + size <= |
286c21de | 230 | (mem->virt_base + ((dma_addr_t)mem->size << PAGE_SHIFT))) { |
bca0fa5f MS |
231 | unsigned long off = vma->vm_pgoff; |
232 | int start = (vaddr - mem->virt_base) >> PAGE_SHIFT; | |
286c21de | 233 | unsigned long user_count = vma_pages(vma); |
9ca5d4fd | 234 | int count = PAGE_ALIGN(size) >> PAGE_SHIFT; |
bca0fa5f MS |
235 | |
236 | *ret = -ENXIO; | |
237 | if (off < count && user_count <= count - off) { | |
88a984ba | 238 | unsigned long pfn = mem->pfn_base + start + off; |
bca0fa5f MS |
239 | *ret = remap_pfn_range(vma, vma->vm_start, pfn, |
240 | user_count << PAGE_SHIFT, | |
241 | vma->vm_page_prot); | |
242 | } | |
243 | return 1; | |
244 | } | |
245 | return 0; | |
246 | } | |
43fc509c VM |
247 | |
248 | /** | |
249 | * dma_mmap_from_dev_coherent() - mmap memory from the device coherent pool | |
250 | * @dev: device from which the memory was allocated | |
251 | * @vma: vm_area for the userspace memory | |
252 | * @vaddr: cpu address returned by dma_alloc_from_dev_coherent | |
253 | * @size: size of the memory buffer allocated | |
254 | * @ret: result from remap_pfn_range() | |
255 | * | |
256 | * This checks whether the memory was allocated from the per-device | |
257 | * coherent memory pool and if so, maps that memory to the provided vma. | |
258 | * | |
41d0bbc7 RM |
259 | * Returns 1 if @vaddr belongs to the device coherent pool and the caller |
260 | * should return @ret, or 0 if they should proceed with mapping memory from | |
261 | * generic areas. | |
43fc509c VM |
262 | */ |
263 | int dma_mmap_from_dev_coherent(struct device *dev, struct vm_area_struct *vma, | |
264 | void *vaddr, size_t size, int *ret) | |
265 | { | |
266 | struct dma_coherent_mem *mem = dev_get_coherent_memory(dev); | |
267 | ||
268 | return __dma_mmap_from_coherent(mem, vma, vaddr, size, ret); | |
269 | } | |
43fc509c | 270 | |
22f9feb4 CH |
271 | #ifdef CONFIG_DMA_GLOBAL_POOL |
272 | static struct dma_coherent_mem *dma_coherent_default_memory __ro_after_init; | |
273 | ||
274 | void *dma_alloc_from_global_coherent(struct device *dev, ssize_t size, | |
275 | dma_addr_t *dma_handle) | |
276 | { | |
277 | if (!dma_coherent_default_memory) | |
278 | return NULL; | |
279 | ||
280 | return __dma_alloc_from_coherent(dev, dma_coherent_default_memory, size, | |
281 | dma_handle); | |
282 | } | |
283 | ||
284 | int dma_release_from_global_coherent(int order, void *vaddr) | |
285 | { | |
286 | if (!dma_coherent_default_memory) | |
287 | return 0; | |
288 | ||
289 | return __dma_release_from_coherent(dma_coherent_default_memory, order, | |
290 | vaddr); | |
291 | } | |
292 | ||
43fc509c VM |
293 | int dma_mmap_from_global_coherent(struct vm_area_struct *vma, void *vaddr, |
294 | size_t size, int *ret) | |
295 | { | |
296 | if (!dma_coherent_default_memory) | |
297 | return 0; | |
298 | ||
299 | return __dma_mmap_from_coherent(dma_coherent_default_memory, vma, | |
300 | vaddr, size, ret); | |
301 | } | |
7bfa5ab6 | 302 | |
39a2d350 CH |
303 | int dma_init_global_coherent(phys_addr_t phys_addr, size_t size) |
304 | { | |
305 | struct dma_coherent_mem *mem; | |
306 | ||
307 | mem = dma_init_coherent_memory(phys_addr, phys_addr, size, true); | |
308 | if (IS_ERR(mem)) | |
309 | return PTR_ERR(mem); | |
310 | dma_coherent_default_memory = mem; | |
311 | pr_info("DMA: default coherent area is set\n"); | |
312 | return 0; | |
313 | } | |
22f9feb4 | 314 | #endif /* CONFIG_DMA_GLOBAL_POOL */ |
39a2d350 | 315 | |
7bfa5ab6 MS |
316 | /* |
317 | * Support for reserved memory regions defined in device tree | |
318 | */ | |
319 | #ifdef CONFIG_OF_RESERVED_MEM | |
320 | #include <linux/of.h> | |
321 | #include <linux/of_fdt.h> | |
322 | #include <linux/of_reserved_mem.h> | |
323 | ||
22f9feb4 | 324 | #ifdef CONFIG_DMA_GLOBAL_POOL |
93228b44 | 325 | static struct reserved_mem *dma_reserved_default_memory __initdata; |
22f9feb4 | 326 | #endif |
93228b44 | 327 | |
7bfa5ab6 MS |
328 | static int rmem_dma_device_init(struct reserved_mem *rmem, struct device *dev) |
329 | { | |
a6933571 CH |
330 | if (!rmem->priv) { |
331 | struct dma_coherent_mem *mem; | |
332 | ||
333 | mem = dma_init_coherent_memory(rmem->base, rmem->base, | |
334 | rmem->size, true); | |
335 | if (IS_ERR(mem)) | |
336 | return PTR_ERR(mem); | |
337 | rmem->priv = mem; | |
7bfa5ab6 | 338 | } |
a6933571 | 339 | dma_assign_coherent_memory(dev, rmem->priv); |
7bfa5ab6 MS |
340 | return 0; |
341 | } | |
342 | ||
343 | static void rmem_dma_device_release(struct reserved_mem *rmem, | |
344 | struct device *dev) | |
345 | { | |
93228b44 VM |
346 | if (dev) |
347 | dev->dma_mem = NULL; | |
7bfa5ab6 MS |
348 | } |
349 | ||
350 | static const struct reserved_mem_ops rmem_dma_ops = { | |
351 | .device_init = rmem_dma_device_init, | |
352 | .device_release = rmem_dma_device_release, | |
353 | }; | |
354 | ||
355 | static int __init rmem_dma_setup(struct reserved_mem *rmem) | |
356 | { | |
357 | unsigned long node = rmem->fdt_node; | |
358 | ||
359 | if (of_get_flat_dt_prop(node, "reusable", NULL)) | |
360 | return -EINVAL; | |
361 | ||
362 | #ifdef CONFIG_ARM | |
363 | if (!of_get_flat_dt_prop(node, "no-map", NULL)) { | |
364 | pr_err("Reserved memory: regions without no-map are not yet supported\n"); | |
365 | return -EINVAL; | |
366 | } | |
70d6aa0e | 367 | #endif |
93228b44 | 368 | |
70d6aa0e | 369 | #ifdef CONFIG_DMA_GLOBAL_POOL |
93228b44 VM |
370 | if (of_get_flat_dt_prop(node, "linux,dma-default", NULL)) { |
371 | WARN(dma_reserved_default_memory, | |
372 | "Reserved memory: region for default DMA coherent area is redefined\n"); | |
373 | dma_reserved_default_memory = rmem; | |
374 | } | |
7bfa5ab6 MS |
375 | #endif |
376 | ||
377 | rmem->ops = &rmem_dma_ops; | |
378 | pr_info("Reserved memory: created DMA memory pool at %pa, size %ld MiB\n", | |
379 | &rmem->base, (unsigned long)rmem->size / SZ_1M); | |
380 | return 0; | |
381 | } | |
93228b44 | 382 | |
22f9feb4 | 383 | #ifdef CONFIG_DMA_GLOBAL_POOL |
93228b44 VM |
384 | static int __init dma_init_reserved_memory(void) |
385 | { | |
93228b44 VM |
386 | if (!dma_reserved_default_memory) |
387 | return -ENOMEM; | |
39a2d350 CH |
388 | return dma_init_global_coherent(dma_reserved_default_memory->base, |
389 | dma_reserved_default_memory->size); | |
93228b44 | 390 | } |
93228b44 | 391 | core_initcall(dma_init_reserved_memory); |
22f9feb4 | 392 | #endif /* CONFIG_DMA_GLOBAL_POOL */ |
93228b44 | 393 | |
7bfa5ab6 MS |
394 | RESERVEDMEM_OF_DECLARE(dma, "shared-dma-pool", rmem_dma_setup); |
395 | #endif |