Commit | Line | Data |
---|---|---|
b2441318 | 1 | // SPDX-License-Identifier: GPL-2.0 |
ee7e5516 DB |
2 | /* |
3 | * Coherent per-device memory handling. | |
4 | * Borrowed from i386 | |
5 | */ | |
6b03ae0d | 6 | #include <linux/io.h> |
5a0e3ad6 | 7 | #include <linux/slab.h> |
ee7e5516 | 8 | #include <linux/kernel.h> |
08a999ce | 9 | #include <linux/module.h> |
ee7e5516 DB |
10 | #include <linux/dma-mapping.h> |
11 | ||
12 | struct dma_coherent_mem { | |
13 | void *virt_base; | |
ed1d218c | 14 | dma_addr_t device_base; |
88a984ba | 15 | unsigned long pfn_base; |
ee7e5516 | 16 | int size; |
ee7e5516 | 17 | unsigned long *bitmap; |
7bfa5ab6 | 18 | spinlock_t spinlock; |
c41f9ea9 | 19 | bool use_dev_dma_pfn_offset; |
ee7e5516 DB |
20 | }; |
21 | ||
93228b44 VM |
22 | static struct dma_coherent_mem *dma_coherent_default_memory __ro_after_init; |
23 | ||
24 | static inline struct dma_coherent_mem *dev_get_coherent_memory(struct device *dev) | |
25 | { | |
26 | if (dev && dev->dma_mem) | |
27 | return dev->dma_mem; | |
43fc509c | 28 | return NULL; |
93228b44 VM |
29 | } |
30 | ||
c41f9ea9 VM |
31 | static inline dma_addr_t dma_get_device_base(struct device *dev, |
32 | struct dma_coherent_mem * mem) | |
33 | { | |
34 | if (mem->use_dev_dma_pfn_offset) | |
35 | return (mem->pfn_base - dev->dma_pfn_offset) << PAGE_SHIFT; | |
36 | else | |
37 | return mem->device_base; | |
38 | } | |
39 | ||
82c5de0a CH |
40 | static int dma_init_coherent_memory(phys_addr_t phys_addr, |
41 | dma_addr_t device_addr, size_t size, | |
42 | struct dma_coherent_mem **mem) | |
ee7e5516 | 43 | { |
7bfa5ab6 | 44 | struct dma_coherent_mem *dma_mem = NULL; |
be4311a2 | 45 | void *mem_base = NULL; |
ee7e5516 DB |
46 | int pages = size >> PAGE_SHIFT; |
47 | int bitmap_size = BITS_TO_LONGS(pages) * sizeof(long); | |
d35b0996 | 48 | int ret; |
ee7e5516 | 49 | |
d35b0996 AB |
50 | if (!size) { |
51 | ret = -EINVAL; | |
ee7e5516 | 52 | goto out; |
d35b0996 | 53 | } |
ee7e5516 | 54 | |
2436bdcd | 55 | mem_base = memremap(phys_addr, size, MEMREMAP_WC); |
d35b0996 AB |
56 | if (!mem_base) { |
57 | ret = -EINVAL; | |
ee7e5516 | 58 | goto out; |
d35b0996 | 59 | } |
7bfa5ab6 | 60 | dma_mem = kzalloc(sizeof(struct dma_coherent_mem), GFP_KERNEL); |
d35b0996 AB |
61 | if (!dma_mem) { |
62 | ret = -ENOMEM; | |
ee7e5516 | 63 | goto out; |
d35b0996 | 64 | } |
7bfa5ab6 | 65 | dma_mem->bitmap = kzalloc(bitmap_size, GFP_KERNEL); |
d35b0996 AB |
66 | if (!dma_mem->bitmap) { |
67 | ret = -ENOMEM; | |
7bfa5ab6 | 68 | goto out; |
d35b0996 | 69 | } |
7bfa5ab6 MS |
70 | |
71 | dma_mem->virt_base = mem_base; | |
72 | dma_mem->device_base = device_addr; | |
73 | dma_mem->pfn_base = PFN_DOWN(phys_addr); | |
74 | dma_mem->size = pages; | |
7bfa5ab6 | 75 | spin_lock_init(&dma_mem->spinlock); |
ee7e5516 | 76 | |
7bfa5ab6 | 77 | *mem = dma_mem; |
d35b0996 | 78 | return 0; |
ee7e5516 | 79 | |
7bfa5ab6 MS |
80 | out: |
81 | kfree(dma_mem); | |
2436bdcd CH |
82 | if (mem_base) |
83 | memunmap(mem_base); | |
d35b0996 | 84 | return ret; |
ee7e5516 | 85 | } |
7bfa5ab6 MS |
86 | |
87 | static void dma_release_coherent_memory(struct dma_coherent_mem *mem) | |
88 | { | |
89 | if (!mem) | |
90 | return; | |
6b03ae0d | 91 | |
2436bdcd | 92 | memunmap(mem->virt_base); |
7bfa5ab6 MS |
93 | kfree(mem->bitmap); |
94 | kfree(mem); | |
95 | } | |
96 | ||
97 | static int dma_assign_coherent_memory(struct device *dev, | |
98 | struct dma_coherent_mem *mem) | |
99 | { | |
93228b44 VM |
100 | if (!dev) |
101 | return -ENODEV; | |
102 | ||
7bfa5ab6 MS |
103 | if (dev->dma_mem) |
104 | return -EBUSY; | |
105 | ||
106 | dev->dma_mem = mem; | |
7bfa5ab6 MS |
107 | return 0; |
108 | } | |
109 | ||
110 | int dma_declare_coherent_memory(struct device *dev, phys_addr_t phys_addr, | |
82c5de0a | 111 | dma_addr_t device_addr, size_t size) |
7bfa5ab6 MS |
112 | { |
113 | struct dma_coherent_mem *mem; | |
2436bdcd | 114 | int ret; |
7bfa5ab6 | 115 | |
82c5de0a | 116 | ret = dma_init_coherent_memory(phys_addr, device_addr, size, &mem); |
2436bdcd CH |
117 | if (ret) |
118 | return ret; | |
7bfa5ab6 | 119 | |
2436bdcd CH |
120 | ret = dma_assign_coherent_memory(dev, mem); |
121 | if (ret) | |
122 | dma_release_coherent_memory(mem); | |
123 | return ret; | |
7bfa5ab6 | 124 | } |
ee7e5516 | 125 | |
a445e940 VM |
126 | static void *__dma_alloc_from_coherent(struct device *dev, |
127 | struct dma_coherent_mem *mem, | |
128 | ssize_t size, dma_addr_t *dma_handle) | |
ee7e5516 | 129 | { |
ee7e5516 | 130 | int order = get_order(size); |
7bfa5ab6 | 131 | unsigned long flags; |
eccd83e1 | 132 | int pageno; |
43fc509c | 133 | void *ret; |
ee7e5516 | 134 | |
7bfa5ab6 | 135 | spin_lock_irqsave(&mem->spinlock, flags); |
0609697e | 136 | |
cdf57cab | 137 | if (unlikely(size > (mem->size << PAGE_SHIFT))) |
0609697e | 138 | goto err; |
eccd83e1 AM |
139 | |
140 | pageno = bitmap_find_free_region(mem->bitmap, mem->size, order); | |
0609697e PM |
141 | if (unlikely(pageno < 0)) |
142 | goto err; | |
143 | ||
144 | /* | |
43fc509c | 145 | * Memory was found in the coherent area. |
0609697e | 146 | */ |
a445e940 | 147 | *dma_handle = dma_get_device_base(dev, mem) + (pageno << PAGE_SHIFT); |
43fc509c | 148 | ret = mem->virt_base + (pageno << PAGE_SHIFT); |
dd01c75f | 149 | spin_unlock_irqrestore(&mem->spinlock, flags); |
2436bdcd | 150 | memset(ret, 0, size); |
43fc509c | 151 | return ret; |
0609697e | 152 | err: |
7bfa5ab6 | 153 | spin_unlock_irqrestore(&mem->spinlock, flags); |
43fc509c VM |
154 | return NULL; |
155 | } | |
156 | ||
157 | /** | |
158 | * dma_alloc_from_dev_coherent() - allocate memory from device coherent pool | |
159 | * @dev: device from which we allocate memory | |
160 | * @size: size of requested memory area | |
161 | * @dma_handle: This will be filled with the correct dma handle | |
162 | * @ret: This pointer will be filled with the virtual address | |
163 | * to allocated area. | |
164 | * | |
165 | * This function should be only called from per-arch dma_alloc_coherent() | |
166 | * to support allocation from per-device coherent memory pools. | |
167 | * | |
168 | * Returns 0 if dma_alloc_coherent should continue with allocating from | |
169 | * generic memory areas, or !0 if dma_alloc_coherent should return @ret. | |
170 | */ | |
171 | int dma_alloc_from_dev_coherent(struct device *dev, ssize_t size, | |
172 | dma_addr_t *dma_handle, void **ret) | |
173 | { | |
174 | struct dma_coherent_mem *mem = dev_get_coherent_memory(dev); | |
175 | ||
176 | if (!mem) | |
177 | return 0; | |
178 | ||
a445e940 | 179 | *ret = __dma_alloc_from_coherent(dev, mem, size, dma_handle); |
82c5de0a | 180 | return 1; |
ee7e5516 DB |
181 | } |
182 | ||
a445e940 VM |
183 | void *dma_alloc_from_global_coherent(struct device *dev, ssize_t size, |
184 | dma_addr_t *dma_handle) | |
ee7e5516 | 185 | { |
43fc509c VM |
186 | if (!dma_coherent_default_memory) |
187 | return NULL; | |
188 | ||
a445e940 VM |
189 | return __dma_alloc_from_coherent(dev, dma_coherent_default_memory, size, |
190 | dma_handle); | |
43fc509c | 191 | } |
ee7e5516 | 192 | |
43fc509c VM |
193 | static int __dma_release_from_coherent(struct dma_coherent_mem *mem, |
194 | int order, void *vaddr) | |
195 | { | |
ee7e5516 DB |
196 | if (mem && vaddr >= mem->virt_base && vaddr < |
197 | (mem->virt_base + (mem->size << PAGE_SHIFT))) { | |
198 | int page = (vaddr - mem->virt_base) >> PAGE_SHIFT; | |
7bfa5ab6 | 199 | unsigned long flags; |
ee7e5516 | 200 | |
7bfa5ab6 | 201 | spin_lock_irqsave(&mem->spinlock, flags); |
ee7e5516 | 202 | bitmap_release_region(mem->bitmap, page, order); |
7bfa5ab6 | 203 | spin_unlock_irqrestore(&mem->spinlock, flags); |
ee7e5516 DB |
204 | return 1; |
205 | } | |
206 | return 0; | |
207 | } | |
bca0fa5f MS |
208 | |
209 | /** | |
43fc509c | 210 | * dma_release_from_dev_coherent() - free memory to device coherent memory pool |
bca0fa5f | 211 | * @dev: device from which the memory was allocated |
43fc509c VM |
212 | * @order: the order of pages allocated |
213 | * @vaddr: virtual address of allocated pages | |
bca0fa5f MS |
214 | * |
215 | * This checks whether the memory was allocated from the per-device | |
43fc509c | 216 | * coherent memory pool and if so, releases that memory. |
bca0fa5f | 217 | * |
43fc509c VM |
218 | * Returns 1 if we correctly released the memory, or 0 if the caller should |
219 | * proceed with releasing memory from generic pools. | |
bca0fa5f | 220 | */ |
43fc509c | 221 | int dma_release_from_dev_coherent(struct device *dev, int order, void *vaddr) |
bca0fa5f | 222 | { |
93228b44 | 223 | struct dma_coherent_mem *mem = dev_get_coherent_memory(dev); |
bca0fa5f | 224 | |
43fc509c VM |
225 | return __dma_release_from_coherent(mem, order, vaddr); |
226 | } | |
43fc509c VM |
227 | |
228 | int dma_release_from_global_coherent(int order, void *vaddr) | |
229 | { | |
230 | if (!dma_coherent_default_memory) | |
231 | return 0; | |
232 | ||
233 | return __dma_release_from_coherent(dma_coherent_default_memory, order, | |
234 | vaddr); | |
235 | } | |
236 | ||
237 | static int __dma_mmap_from_coherent(struct dma_coherent_mem *mem, | |
238 | struct vm_area_struct *vma, void *vaddr, size_t size, int *ret) | |
239 | { | |
bca0fa5f MS |
240 | if (mem && vaddr >= mem->virt_base && vaddr + size <= |
241 | (mem->virt_base + (mem->size << PAGE_SHIFT))) { | |
242 | unsigned long off = vma->vm_pgoff; | |
243 | int start = (vaddr - mem->virt_base) >> PAGE_SHIFT; | |
e688f144 | 244 | int user_count = vma_pages(vma); |
9ca5d4fd | 245 | int count = PAGE_ALIGN(size) >> PAGE_SHIFT; |
bca0fa5f MS |
246 | |
247 | *ret = -ENXIO; | |
248 | if (off < count && user_count <= count - off) { | |
88a984ba | 249 | unsigned long pfn = mem->pfn_base + start + off; |
bca0fa5f MS |
250 | *ret = remap_pfn_range(vma, vma->vm_start, pfn, |
251 | user_count << PAGE_SHIFT, | |
252 | vma->vm_page_prot); | |
253 | } | |
254 | return 1; | |
255 | } | |
256 | return 0; | |
257 | } | |
43fc509c VM |
258 | |
259 | /** | |
260 | * dma_mmap_from_dev_coherent() - mmap memory from the device coherent pool | |
261 | * @dev: device from which the memory was allocated | |
262 | * @vma: vm_area for the userspace memory | |
263 | * @vaddr: cpu address returned by dma_alloc_from_dev_coherent | |
264 | * @size: size of the memory buffer allocated | |
265 | * @ret: result from remap_pfn_range() | |
266 | * | |
267 | * This checks whether the memory was allocated from the per-device | |
268 | * coherent memory pool and if so, maps that memory to the provided vma. | |
269 | * | |
41d0bbc7 RM |
270 | * Returns 1 if @vaddr belongs to the device coherent pool and the caller |
271 | * should return @ret, or 0 if they should proceed with mapping memory from | |
272 | * generic areas. | |
43fc509c VM |
273 | */ |
274 | int dma_mmap_from_dev_coherent(struct device *dev, struct vm_area_struct *vma, | |
275 | void *vaddr, size_t size, int *ret) | |
276 | { | |
277 | struct dma_coherent_mem *mem = dev_get_coherent_memory(dev); | |
278 | ||
279 | return __dma_mmap_from_coherent(mem, vma, vaddr, size, ret); | |
280 | } | |
43fc509c VM |
281 | |
282 | int dma_mmap_from_global_coherent(struct vm_area_struct *vma, void *vaddr, | |
283 | size_t size, int *ret) | |
284 | { | |
285 | if (!dma_coherent_default_memory) | |
286 | return 0; | |
287 | ||
288 | return __dma_mmap_from_coherent(dma_coherent_default_memory, vma, | |
289 | vaddr, size, ret); | |
290 | } | |
7bfa5ab6 MS |
291 | |
292 | /* | |
293 | * Support for reserved memory regions defined in device tree | |
294 | */ | |
295 | #ifdef CONFIG_OF_RESERVED_MEM | |
296 | #include <linux/of.h> | |
297 | #include <linux/of_fdt.h> | |
298 | #include <linux/of_reserved_mem.h> | |
299 | ||
93228b44 VM |
300 | static struct reserved_mem *dma_reserved_default_memory __initdata; |
301 | ||
7bfa5ab6 MS |
302 | static int rmem_dma_device_init(struct reserved_mem *rmem, struct device *dev) |
303 | { | |
304 | struct dma_coherent_mem *mem = rmem->priv; | |
d35b0996 AB |
305 | int ret; |
306 | ||
6d573398 AB |
307 | if (!mem) { |
308 | ret = dma_init_coherent_memory(rmem->base, rmem->base, | |
82c5de0a | 309 | rmem->size, &mem); |
6d573398 AB |
310 | if (ret) { |
311 | pr_err("Reserved memory: failed to init DMA memory pool at %pa, size %ld MiB\n", | |
312 | &rmem->base, (unsigned long)rmem->size / SZ_1M); | |
313 | return ret; | |
314 | } | |
7bfa5ab6 | 315 | } |
c41f9ea9 | 316 | mem->use_dev_dma_pfn_offset = true; |
7bfa5ab6 MS |
317 | rmem->priv = mem; |
318 | dma_assign_coherent_memory(dev, mem); | |
319 | return 0; | |
320 | } | |
321 | ||
322 | static void rmem_dma_device_release(struct reserved_mem *rmem, | |
323 | struct device *dev) | |
324 | { | |
93228b44 VM |
325 | if (dev) |
326 | dev->dma_mem = NULL; | |
7bfa5ab6 MS |
327 | } |
328 | ||
329 | static const struct reserved_mem_ops rmem_dma_ops = { | |
330 | .device_init = rmem_dma_device_init, | |
331 | .device_release = rmem_dma_device_release, | |
332 | }; | |
333 | ||
334 | static int __init rmem_dma_setup(struct reserved_mem *rmem) | |
335 | { | |
336 | unsigned long node = rmem->fdt_node; | |
337 | ||
338 | if (of_get_flat_dt_prop(node, "reusable", NULL)) | |
339 | return -EINVAL; | |
340 | ||
341 | #ifdef CONFIG_ARM | |
342 | if (!of_get_flat_dt_prop(node, "no-map", NULL)) { | |
343 | pr_err("Reserved memory: regions without no-map are not yet supported\n"); | |
344 | return -EINVAL; | |
345 | } | |
93228b44 VM |
346 | |
347 | if (of_get_flat_dt_prop(node, "linux,dma-default", NULL)) { | |
348 | WARN(dma_reserved_default_memory, | |
349 | "Reserved memory: region for default DMA coherent area is redefined\n"); | |
350 | dma_reserved_default_memory = rmem; | |
351 | } | |
7bfa5ab6 MS |
352 | #endif |
353 | ||
354 | rmem->ops = &rmem_dma_ops; | |
355 | pr_info("Reserved memory: created DMA memory pool at %pa, size %ld MiB\n", | |
356 | &rmem->base, (unsigned long)rmem->size / SZ_1M); | |
357 | return 0; | |
358 | } | |
93228b44 VM |
359 | |
360 | static int __init dma_init_reserved_memory(void) | |
361 | { | |
362 | const struct reserved_mem_ops *ops; | |
363 | int ret; | |
364 | ||
365 | if (!dma_reserved_default_memory) | |
366 | return -ENOMEM; | |
367 | ||
368 | ops = dma_reserved_default_memory->ops; | |
369 | ||
370 | /* | |
371 | * We rely on rmem_dma_device_init() does not propagate error of | |
372 | * dma_assign_coherent_memory() for "NULL" device. | |
373 | */ | |
374 | ret = ops->device_init(dma_reserved_default_memory, NULL); | |
375 | ||
376 | if (!ret) { | |
377 | dma_coherent_default_memory = dma_reserved_default_memory->priv; | |
378 | pr_info("DMA: default coherent area is set\n"); | |
379 | } | |
380 | ||
381 | return ret; | |
382 | } | |
383 | ||
384 | core_initcall(dma_init_reserved_memory); | |
385 | ||
7bfa5ab6 MS |
386 | RESERVEDMEM_OF_DECLARE(dma, "shared-dma-pool", rmem_dma_setup); |
387 | #endif |