Commit | Line | Data |
---|---|---|
09b55412 CM |
1 | /* |
2 | * SWIOTLB-based DMA API implementation | |
3 | * | |
4 | * Copyright (C) 2012 ARM Ltd. | |
5 | * Author: Catalin Marinas <catalin.marinas@arm.com> | |
6 | * | |
7 | * This program is free software; you can redistribute it and/or modify | |
8 | * it under the terms of the GNU General Public License version 2 as | |
9 | * published by the Free Software Foundation. | |
10 | * | |
11 | * This program is distributed in the hope that it will be useful, | |
12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
14 | * GNU General Public License for more details. | |
15 | * | |
16 | * You should have received a copy of the GNU General Public License | |
17 | * along with this program. If not, see <http://www.gnu.org/licenses/>. | |
18 | */ | |
19 | ||
20 | #include <linux/gfp.h> | |
21 | #include <linux/export.h> | |
22 | #include <linux/slab.h> | |
d4932f9e | 23 | #include <linux/genalloc.h> |
09b55412 | 24 | #include <linux/dma-mapping.h> |
6ac2104d | 25 | #include <linux/dma-contiguous.h> |
09b55412 CM |
26 | #include <linux/vmalloc.h> |
27 | #include <linux/swiotlb.h> | |
28 | ||
29 | #include <asm/cacheflush.h> | |
30 | ||
31 | struct dma_map_ops *dma_ops; | |
32 | EXPORT_SYMBOL(dma_ops); | |
33 | ||
214fdbe7 LA |
34 | static pgprot_t __get_dma_pgprot(struct dma_attrs *attrs, pgprot_t prot, |
35 | bool coherent) | |
36 | { | |
196adf2f | 37 | if (!coherent || dma_get_attr(DMA_ATTR_WRITE_COMBINE, attrs)) |
214fdbe7 | 38 | return pgprot_writecombine(prot); |
214fdbe7 LA |
39 | return prot; |
40 | } | |
41 | ||
d4932f9e LA |
42 | static struct gen_pool *atomic_pool; |
43 | ||
44 | #define DEFAULT_DMA_COHERENT_POOL_SIZE SZ_256K | |
45 | static size_t atomic_pool_size = DEFAULT_DMA_COHERENT_POOL_SIZE; | |
46 | ||
47 | static int __init early_coherent_pool(char *p) | |
48 | { | |
49 | atomic_pool_size = memparse(p, &p); | |
50 | return 0; | |
51 | } | |
52 | early_param("coherent_pool", early_coherent_pool); | |
53 | ||
7132813c | 54 | static void *__alloc_from_pool(size_t size, struct page **ret_page, gfp_t flags) |
d4932f9e LA |
55 | { |
56 | unsigned long val; | |
57 | void *ptr = NULL; | |
58 | ||
59 | if (!atomic_pool) { | |
60 | WARN(1, "coherent pool not initialised!\n"); | |
61 | return NULL; | |
62 | } | |
63 | ||
64 | val = gen_pool_alloc(atomic_pool, size); | |
65 | if (val) { | |
66 | phys_addr_t phys = gen_pool_virt_to_phys(atomic_pool, val); | |
67 | ||
68 | *ret_page = phys_to_page(phys); | |
69 | ptr = (void *)val; | |
6829e274 | 70 | memset(ptr, 0, size); |
d4932f9e LA |
71 | } |
72 | ||
73 | return ptr; | |
74 | } | |
75 | ||
76 | static bool __in_atomic_pool(void *start, size_t size) | |
77 | { | |
78 | return addr_in_gen_pool(atomic_pool, (unsigned long)start, size); | |
79 | } | |
80 | ||
81 | static int __free_from_pool(void *start, size_t size) | |
82 | { | |
83 | if (!__in_atomic_pool(start, size)) | |
84 | return 0; | |
85 | ||
86 | gen_pool_free(atomic_pool, (unsigned long)start, size); | |
87 | ||
88 | return 1; | |
89 | } | |
90 | ||
bb10eb7b RH |
91 | static void *__dma_alloc_coherent(struct device *dev, size_t size, |
92 | dma_addr_t *dma_handle, gfp_t flags, | |
93 | struct dma_attrs *attrs) | |
09b55412 | 94 | { |
c666e8d5 LA |
95 | if (dev == NULL) { |
96 | WARN_ONCE(1, "Use an actual device structure for DMA allocation\n"); | |
97 | return NULL; | |
98 | } | |
99 | ||
19e7640d | 100 | if (IS_ENABLED(CONFIG_ZONE_DMA) && |
09b55412 | 101 | dev->coherent_dma_mask <= DMA_BIT_MASK(32)) |
19e7640d | 102 | flags |= GFP_DMA; |
ba9cc453 | 103 | if (dev_get_cma_area(dev) && (flags & __GFP_WAIT)) { |
6ac2104d | 104 | struct page *page; |
7132813c | 105 | void *addr; |
6ac2104d LA |
106 | |
107 | page = dma_alloc_from_contiguous(dev, size >> PAGE_SHIFT, | |
108 | get_order(size)); | |
109 | if (!page) | |
110 | return NULL; | |
111 | ||
112 | *dma_handle = phys_to_dma(dev, page_to_phys(page)); | |
7132813c | 113 | addr = page_address(page); |
6829e274 | 114 | memset(addr, 0, size); |
7132813c | 115 | return addr; |
6ac2104d LA |
116 | } else { |
117 | return swiotlb_alloc_coherent(dev, size, dma_handle, flags); | |
118 | } | |
09b55412 CM |
119 | } |
120 | ||
bb10eb7b RH |
121 | static void __dma_free_coherent(struct device *dev, size_t size, |
122 | void *vaddr, dma_addr_t dma_handle, | |
123 | struct dma_attrs *attrs) | |
09b55412 | 124 | { |
d4932f9e LA |
125 | bool freed; |
126 | phys_addr_t paddr = dma_to_phys(dev, dma_handle); | |
127 | ||
c666e8d5 LA |
128 | if (dev == NULL) { |
129 | WARN_ONCE(1, "Use an actual device structure for DMA allocation\n"); | |
130 | return; | |
131 | } | |
132 | ||
d4932f9e | 133 | freed = dma_release_from_contiguous(dev, |
6ac2104d LA |
134 | phys_to_page(paddr), |
135 | size >> PAGE_SHIFT); | |
d4932f9e | 136 | if (!freed) |
6ac2104d | 137 | swiotlb_free_coherent(dev, size, vaddr, dma_handle); |
09b55412 CM |
138 | } |
139 | ||
9d3bfbb4 CM |
140 | static void *__dma_alloc(struct device *dev, size_t size, |
141 | dma_addr_t *dma_handle, gfp_t flags, | |
142 | struct dma_attrs *attrs) | |
7363590d | 143 | { |
d4932f9e | 144 | struct page *page; |
7363590d | 145 | void *ptr, *coherent_ptr; |
9d3bfbb4 | 146 | bool coherent = is_device_dma_coherent(dev); |
97942c28 | 147 | pgprot_t prot = __get_dma_pgprot(attrs, PAGE_KERNEL, false); |
7363590d CM |
148 | |
149 | size = PAGE_ALIGN(size); | |
d4932f9e | 150 | |
9d3bfbb4 | 151 | if (!coherent && !(flags & __GFP_WAIT)) { |
d4932f9e | 152 | struct page *page = NULL; |
7132813c | 153 | void *addr = __alloc_from_pool(size, &page, flags); |
d4932f9e LA |
154 | |
155 | if (addr) | |
156 | *dma_handle = phys_to_dma(dev, page_to_phys(page)); | |
157 | ||
158 | return addr; | |
d4932f9e | 159 | } |
7363590d CM |
160 | |
161 | ptr = __dma_alloc_coherent(dev, size, dma_handle, flags, attrs); | |
162 | if (!ptr) | |
163 | goto no_mem; | |
7363590d | 164 | |
9d3bfbb4 CM |
165 | /* no need for non-cacheable mapping if coherent */ |
166 | if (coherent) | |
167 | return ptr; | |
168 | ||
7363590d CM |
169 | /* remove any dirty cache lines on the kernel alias */ |
170 | __dma_flush_range(ptr, ptr + size); | |
171 | ||
172 | /* create a coherent mapping */ | |
173 | page = virt_to_page(ptr); | |
d4932f9e | 174 | coherent_ptr = dma_common_contiguous_remap(page, size, VM_USERMAP, |
97942c28 | 175 | prot, NULL); |
7363590d CM |
176 | if (!coherent_ptr) |
177 | goto no_map; | |
178 | ||
179 | return coherent_ptr; | |
180 | ||
181 | no_map: | |
182 | __dma_free_coherent(dev, size, ptr, *dma_handle, attrs); | |
183 | no_mem: | |
a52ce121 | 184 | *dma_handle = DMA_ERROR_CODE; |
7363590d CM |
185 | return NULL; |
186 | } | |
187 | ||
9d3bfbb4 CM |
188 | static void __dma_free(struct device *dev, size_t size, |
189 | void *vaddr, dma_addr_t dma_handle, | |
190 | struct dma_attrs *attrs) | |
7363590d CM |
191 | { |
192 | void *swiotlb_addr = phys_to_virt(dma_to_phys(dev, dma_handle)); | |
193 | ||
2cff98b9 DN |
194 | size = PAGE_ALIGN(size); |
195 | ||
9d3bfbb4 CM |
196 | if (!is_device_dma_coherent(dev)) { |
197 | if (__free_from_pool(vaddr, size)) | |
198 | return; | |
199 | vunmap(vaddr); | |
200 | } | |
7363590d CM |
201 | __dma_free_coherent(dev, size, swiotlb_addr, dma_handle, attrs); |
202 | } | |
203 | ||
204 | static dma_addr_t __swiotlb_map_page(struct device *dev, struct page *page, | |
205 | unsigned long offset, size_t size, | |
206 | enum dma_data_direction dir, | |
207 | struct dma_attrs *attrs) | |
208 | { | |
209 | dma_addr_t dev_addr; | |
210 | ||
211 | dev_addr = swiotlb_map_page(dev, page, offset, size, dir, attrs); | |
9d3bfbb4 CM |
212 | if (!is_device_dma_coherent(dev)) |
213 | __dma_map_area(phys_to_virt(dma_to_phys(dev, dev_addr)), size, dir); | |
7363590d CM |
214 | |
215 | return dev_addr; | |
216 | } | |
217 | ||
218 | ||
219 | static void __swiotlb_unmap_page(struct device *dev, dma_addr_t dev_addr, | |
220 | size_t size, enum dma_data_direction dir, | |
221 | struct dma_attrs *attrs) | |
222 | { | |
9d3bfbb4 CM |
223 | if (!is_device_dma_coherent(dev)) |
224 | __dma_unmap_area(phys_to_virt(dma_to_phys(dev, dev_addr)), size, dir); | |
7363590d CM |
225 | swiotlb_unmap_page(dev, dev_addr, size, dir, attrs); |
226 | } | |
227 | ||
228 | static int __swiotlb_map_sg_attrs(struct device *dev, struct scatterlist *sgl, | |
229 | int nelems, enum dma_data_direction dir, | |
230 | struct dma_attrs *attrs) | |
231 | { | |
232 | struct scatterlist *sg; | |
233 | int i, ret; | |
234 | ||
235 | ret = swiotlb_map_sg_attrs(dev, sgl, nelems, dir, attrs); | |
9d3bfbb4 CM |
236 | if (!is_device_dma_coherent(dev)) |
237 | for_each_sg(sgl, sg, ret, i) | |
238 | __dma_map_area(phys_to_virt(dma_to_phys(dev, sg->dma_address)), | |
239 | sg->length, dir); | |
7363590d CM |
240 | |
241 | return ret; | |
242 | } | |
243 | ||
244 | static void __swiotlb_unmap_sg_attrs(struct device *dev, | |
245 | struct scatterlist *sgl, int nelems, | |
246 | enum dma_data_direction dir, | |
247 | struct dma_attrs *attrs) | |
248 | { | |
249 | struct scatterlist *sg; | |
250 | int i; | |
251 | ||
9d3bfbb4 CM |
252 | if (!is_device_dma_coherent(dev)) |
253 | for_each_sg(sgl, sg, nelems, i) | |
254 | __dma_unmap_area(phys_to_virt(dma_to_phys(dev, sg->dma_address)), | |
255 | sg->length, dir); | |
7363590d CM |
256 | swiotlb_unmap_sg_attrs(dev, sgl, nelems, dir, attrs); |
257 | } | |
258 | ||
259 | static void __swiotlb_sync_single_for_cpu(struct device *dev, | |
260 | dma_addr_t dev_addr, size_t size, | |
261 | enum dma_data_direction dir) | |
262 | { | |
9d3bfbb4 CM |
263 | if (!is_device_dma_coherent(dev)) |
264 | __dma_unmap_area(phys_to_virt(dma_to_phys(dev, dev_addr)), size, dir); | |
7363590d CM |
265 | swiotlb_sync_single_for_cpu(dev, dev_addr, size, dir); |
266 | } | |
267 | ||
268 | static void __swiotlb_sync_single_for_device(struct device *dev, | |
269 | dma_addr_t dev_addr, size_t size, | |
270 | enum dma_data_direction dir) | |
271 | { | |
272 | swiotlb_sync_single_for_device(dev, dev_addr, size, dir); | |
9d3bfbb4 CM |
273 | if (!is_device_dma_coherent(dev)) |
274 | __dma_map_area(phys_to_virt(dma_to_phys(dev, dev_addr)), size, dir); | |
7363590d CM |
275 | } |
276 | ||
277 | static void __swiotlb_sync_sg_for_cpu(struct device *dev, | |
278 | struct scatterlist *sgl, int nelems, | |
279 | enum dma_data_direction dir) | |
280 | { | |
281 | struct scatterlist *sg; | |
282 | int i; | |
283 | ||
9d3bfbb4 CM |
284 | if (!is_device_dma_coherent(dev)) |
285 | for_each_sg(sgl, sg, nelems, i) | |
286 | __dma_unmap_area(phys_to_virt(dma_to_phys(dev, sg->dma_address)), | |
287 | sg->length, dir); | |
7363590d CM |
288 | swiotlb_sync_sg_for_cpu(dev, sgl, nelems, dir); |
289 | } | |
290 | ||
291 | static void __swiotlb_sync_sg_for_device(struct device *dev, | |
292 | struct scatterlist *sgl, int nelems, | |
293 | enum dma_data_direction dir) | |
294 | { | |
295 | struct scatterlist *sg; | |
296 | int i; | |
297 | ||
298 | swiotlb_sync_sg_for_device(dev, sgl, nelems, dir); | |
9d3bfbb4 CM |
299 | if (!is_device_dma_coherent(dev)) |
300 | for_each_sg(sgl, sg, nelems, i) | |
301 | __dma_map_area(phys_to_virt(dma_to_phys(dev, sg->dma_address)), | |
302 | sg->length, dir); | |
7363590d CM |
303 | } |
304 | ||
aaf6f2f0 RM |
305 | static int __swiotlb_mmap(struct device *dev, |
306 | struct vm_area_struct *vma, | |
307 | void *cpu_addr, dma_addr_t dma_addr, size_t size, | |
308 | struct dma_attrs *attrs) | |
6e8d7968 LA |
309 | { |
310 | int ret = -ENXIO; | |
311 | unsigned long nr_vma_pages = (vma->vm_end - vma->vm_start) >> | |
312 | PAGE_SHIFT; | |
313 | unsigned long nr_pages = PAGE_ALIGN(size) >> PAGE_SHIFT; | |
314 | unsigned long pfn = dma_to_phys(dev, dma_addr) >> PAGE_SHIFT; | |
315 | unsigned long off = vma->vm_pgoff; | |
316 | ||
aaf6f2f0 RM |
317 | vma->vm_page_prot = __get_dma_pgprot(attrs, vma->vm_page_prot, |
318 | is_device_dma_coherent(dev)); | |
319 | ||
6e8d7968 LA |
320 | if (dma_mmap_from_coherent(dev, vma, cpu_addr, size, &ret)) |
321 | return ret; | |
322 | ||
323 | if (off < nr_pages && nr_vma_pages <= (nr_pages - off)) { | |
324 | ret = remap_pfn_range(vma, vma->vm_start, | |
325 | pfn + off, | |
326 | vma->vm_end - vma->vm_start, | |
327 | vma->vm_page_prot); | |
328 | } | |
329 | ||
330 | return ret; | |
331 | } | |
332 | ||
1d1ddf67 RM |
333 | static int __swiotlb_get_sgtable(struct device *dev, struct sg_table *sgt, |
334 | void *cpu_addr, dma_addr_t handle, size_t size, | |
335 | struct dma_attrs *attrs) | |
336 | { | |
337 | int ret = sg_alloc_table(sgt, 1, GFP_KERNEL); | |
338 | ||
339 | if (!ret) | |
340 | sg_set_page(sgt->sgl, phys_to_page(dma_to_phys(dev, handle)), | |
341 | PAGE_ALIGN(size), 0); | |
342 | ||
343 | return ret; | |
344 | } | |
345 | ||
9d3bfbb4 CM |
346 | static struct dma_map_ops swiotlb_dma_ops = { |
347 | .alloc = __dma_alloc, | |
348 | .free = __dma_free, | |
349 | .mmap = __swiotlb_mmap, | |
1d1ddf67 | 350 | .get_sgtable = __swiotlb_get_sgtable, |
7363590d CM |
351 | .map_page = __swiotlb_map_page, |
352 | .unmap_page = __swiotlb_unmap_page, | |
353 | .map_sg = __swiotlb_map_sg_attrs, | |
354 | .unmap_sg = __swiotlb_unmap_sg_attrs, | |
355 | .sync_single_for_cpu = __swiotlb_sync_single_for_cpu, | |
356 | .sync_single_for_device = __swiotlb_sync_single_for_device, | |
357 | .sync_sg_for_cpu = __swiotlb_sync_sg_for_cpu, | |
358 | .sync_sg_for_device = __swiotlb_sync_sg_for_device, | |
359 | .dma_supported = swiotlb_dma_supported, | |
360 | .mapping_error = swiotlb_dma_mapping_error, | |
361 | }; | |
09b55412 | 362 | |
d4932f9e LA |
363 | static int __init atomic_pool_init(void) |
364 | { | |
365 | pgprot_t prot = __pgprot(PROT_NORMAL_NC); | |
366 | unsigned long nr_pages = atomic_pool_size >> PAGE_SHIFT; | |
367 | struct page *page; | |
368 | void *addr; | |
369 | unsigned int pool_size_order = get_order(atomic_pool_size); | |
370 | ||
371 | if (dev_get_cma_area(NULL)) | |
372 | page = dma_alloc_from_contiguous(NULL, nr_pages, | |
373 | pool_size_order); | |
374 | else | |
375 | page = alloc_pages(GFP_DMA, pool_size_order); | |
376 | ||
377 | if (page) { | |
378 | int ret; | |
379 | void *page_addr = page_address(page); | |
380 | ||
381 | memset(page_addr, 0, atomic_pool_size); | |
382 | __dma_flush_range(page_addr, page_addr + atomic_pool_size); | |
383 | ||
384 | atomic_pool = gen_pool_create(PAGE_SHIFT, -1); | |
385 | if (!atomic_pool) | |
386 | goto free_page; | |
387 | ||
388 | addr = dma_common_contiguous_remap(page, atomic_pool_size, | |
389 | VM_USERMAP, prot, atomic_pool_init); | |
390 | ||
391 | if (!addr) | |
392 | goto destroy_genpool; | |
393 | ||
394 | ret = gen_pool_add_virt(atomic_pool, (unsigned long)addr, | |
395 | page_to_phys(page), | |
396 | atomic_pool_size, -1); | |
397 | if (ret) | |
398 | goto remove_mapping; | |
399 | ||
400 | gen_pool_set_algo(atomic_pool, | |
401 | gen_pool_first_fit_order_align, | |
402 | (void *)PAGE_SHIFT); | |
403 | ||
404 | pr_info("DMA: preallocated %zu KiB pool for atomic allocations\n", | |
405 | atomic_pool_size / 1024); | |
406 | return 0; | |
407 | } | |
408 | goto out; | |
409 | ||
410 | remove_mapping: | |
411 | dma_common_free_remap(addr, atomic_pool_size, VM_USERMAP); | |
412 | destroy_genpool: | |
413 | gen_pool_destroy(atomic_pool); | |
414 | atomic_pool = NULL; | |
415 | free_page: | |
416 | if (!dma_release_from_contiguous(NULL, page, nr_pages)) | |
417 | __free_pages(page, pool_size_order); | |
418 | out: | |
419 | pr_err("DMA: failed to allocate %zu KiB pool for atomic coherent allocation\n", | |
420 | atomic_pool_size / 1024); | |
421 | return -ENOMEM; | |
422 | } | |
423 | ||
b6197b93 SS |
424 | /******************************************** |
425 | * The following APIs are for dummy DMA ops * | |
426 | ********************************************/ | |
427 | ||
428 | static void *__dummy_alloc(struct device *dev, size_t size, | |
429 | dma_addr_t *dma_handle, gfp_t flags, | |
430 | struct dma_attrs *attrs) | |
431 | { | |
432 | return NULL; | |
433 | } | |
434 | ||
435 | static void __dummy_free(struct device *dev, size_t size, | |
436 | void *vaddr, dma_addr_t dma_handle, | |
437 | struct dma_attrs *attrs) | |
438 | { | |
439 | } | |
440 | ||
441 | static int __dummy_mmap(struct device *dev, | |
442 | struct vm_area_struct *vma, | |
443 | void *cpu_addr, dma_addr_t dma_addr, size_t size, | |
444 | struct dma_attrs *attrs) | |
445 | { | |
446 | return -ENXIO; | |
447 | } | |
448 | ||
449 | static dma_addr_t __dummy_map_page(struct device *dev, struct page *page, | |
450 | unsigned long offset, size_t size, | |
451 | enum dma_data_direction dir, | |
452 | struct dma_attrs *attrs) | |
453 | { | |
454 | return DMA_ERROR_CODE; | |
455 | } | |
456 | ||
457 | static void __dummy_unmap_page(struct device *dev, dma_addr_t dev_addr, | |
458 | size_t size, enum dma_data_direction dir, | |
459 | struct dma_attrs *attrs) | |
460 | { | |
461 | } | |
462 | ||
463 | static int __dummy_map_sg(struct device *dev, struct scatterlist *sgl, | |
464 | int nelems, enum dma_data_direction dir, | |
465 | struct dma_attrs *attrs) | |
466 | { | |
467 | return 0; | |
468 | } | |
469 | ||
470 | static void __dummy_unmap_sg(struct device *dev, | |
471 | struct scatterlist *sgl, int nelems, | |
472 | enum dma_data_direction dir, | |
473 | struct dma_attrs *attrs) | |
474 | { | |
475 | } | |
476 | ||
477 | static void __dummy_sync_single(struct device *dev, | |
478 | dma_addr_t dev_addr, size_t size, | |
479 | enum dma_data_direction dir) | |
480 | { | |
481 | } | |
482 | ||
483 | static void __dummy_sync_sg(struct device *dev, | |
484 | struct scatterlist *sgl, int nelems, | |
485 | enum dma_data_direction dir) | |
486 | { | |
487 | } | |
488 | ||
489 | static int __dummy_mapping_error(struct device *hwdev, dma_addr_t dma_addr) | |
490 | { | |
491 | return 1; | |
492 | } | |
493 | ||
494 | static int __dummy_dma_supported(struct device *hwdev, u64 mask) | |
495 | { | |
496 | return 0; | |
497 | } | |
498 | ||
499 | struct dma_map_ops dummy_dma_ops = { | |
500 | .alloc = __dummy_alloc, | |
501 | .free = __dummy_free, | |
502 | .mmap = __dummy_mmap, | |
503 | .map_page = __dummy_map_page, | |
504 | .unmap_page = __dummy_unmap_page, | |
505 | .map_sg = __dummy_map_sg, | |
506 | .unmap_sg = __dummy_unmap_sg, | |
507 | .sync_single_for_cpu = __dummy_sync_single, | |
508 | .sync_single_for_device = __dummy_sync_single, | |
509 | .sync_sg_for_cpu = __dummy_sync_sg, | |
510 | .sync_sg_for_device = __dummy_sync_sg, | |
511 | .mapping_error = __dummy_mapping_error, | |
512 | .dma_supported = __dummy_dma_supported, | |
513 | }; | |
514 | EXPORT_SYMBOL(dummy_dma_ops); | |
515 | ||
a1e50a82 | 516 | static int __init arm64_dma_init(void) |
09b55412 | 517 | { |
a1e50a82 | 518 | int ret; |
3690951f | 519 | |
9d3bfbb4 | 520 | dma_ops = &swiotlb_dma_ops; |
3690951f | 521 | |
a1e50a82 | 522 | ret = atomic_pool_init(); |
d4932f9e LA |
523 | |
524 | return ret; | |
525 | } | |
526 | arch_initcall(arm64_dma_init); | |
09b55412 CM |
527 | |
528 | #define PREALLOC_DMA_DEBUG_ENTRIES 4096 | |
529 | ||
530 | static int __init dma_debug_do_init(void) | |
531 | { | |
532 | dma_debug_init(PREALLOC_DMA_DEBUG_ENTRIES); | |
533 | return 0; | |
534 | } | |
535 | fs_initcall(dma_debug_do_init); |