Merge branches 'acpi-resources', 'acpi-battery', 'acpi-doc' and 'acpi-pnp'
[linux-2.6-block.git] / arch / arm64 / mm / dma-mapping.c
CommitLineData
09b55412
CM
1/*
2 * SWIOTLB-based DMA API implementation
3 *
4 * Copyright (C) 2012 ARM Ltd.
5 * Author: Catalin Marinas <catalin.marinas@arm.com>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program. If not, see <http://www.gnu.org/licenses/>.
18 */
19
20#include <linux/gfp.h>
21#include <linux/export.h>
22#include <linux/slab.h>
d4932f9e 23#include <linux/genalloc.h>
09b55412 24#include <linux/dma-mapping.h>
6ac2104d 25#include <linux/dma-contiguous.h>
09b55412
CM
26#include <linux/vmalloc.h>
27#include <linux/swiotlb.h>
28
29#include <asm/cacheflush.h>
30
31struct dma_map_ops *dma_ops;
32EXPORT_SYMBOL(dma_ops);
33
214fdbe7
LA
34static pgprot_t __get_dma_pgprot(struct dma_attrs *attrs, pgprot_t prot,
35 bool coherent)
36{
196adf2f 37 if (!coherent || dma_get_attr(DMA_ATTR_WRITE_COMBINE, attrs))
214fdbe7 38 return pgprot_writecombine(prot);
214fdbe7
LA
39 return prot;
40}
41
d4932f9e
LA
42static struct gen_pool *atomic_pool;
43
44#define DEFAULT_DMA_COHERENT_POOL_SIZE SZ_256K
45static size_t atomic_pool_size = DEFAULT_DMA_COHERENT_POOL_SIZE;
46
47static int __init early_coherent_pool(char *p)
48{
49 atomic_pool_size = memparse(p, &p);
50 return 0;
51}
52early_param("coherent_pool", early_coherent_pool);
53
7132813c 54static void *__alloc_from_pool(size_t size, struct page **ret_page, gfp_t flags)
d4932f9e
LA
55{
56 unsigned long val;
57 void *ptr = NULL;
58
59 if (!atomic_pool) {
60 WARN(1, "coherent pool not initialised!\n");
61 return NULL;
62 }
63
64 val = gen_pool_alloc(atomic_pool, size);
65 if (val) {
66 phys_addr_t phys = gen_pool_virt_to_phys(atomic_pool, val);
67
68 *ret_page = phys_to_page(phys);
69 ptr = (void *)val;
6829e274 70 memset(ptr, 0, size);
d4932f9e
LA
71 }
72
73 return ptr;
74}
75
76static bool __in_atomic_pool(void *start, size_t size)
77{
78 return addr_in_gen_pool(atomic_pool, (unsigned long)start, size);
79}
80
81static int __free_from_pool(void *start, size_t size)
82{
83 if (!__in_atomic_pool(start, size))
84 return 0;
85
86 gen_pool_free(atomic_pool, (unsigned long)start, size);
87
88 return 1;
89}
90
bb10eb7b
RH
91static void *__dma_alloc_coherent(struct device *dev, size_t size,
92 dma_addr_t *dma_handle, gfp_t flags,
93 struct dma_attrs *attrs)
09b55412 94{
c666e8d5
LA
95 if (dev == NULL) {
96 WARN_ONCE(1, "Use an actual device structure for DMA allocation\n");
97 return NULL;
98 }
99
19e7640d 100 if (IS_ENABLED(CONFIG_ZONE_DMA) &&
09b55412 101 dev->coherent_dma_mask <= DMA_BIT_MASK(32))
19e7640d 102 flags |= GFP_DMA;
d4932f9e 103 if (IS_ENABLED(CONFIG_DMA_CMA) && (flags & __GFP_WAIT)) {
6ac2104d 104 struct page *page;
7132813c 105 void *addr;
6ac2104d
LA
106
107 page = dma_alloc_from_contiguous(dev, size >> PAGE_SHIFT,
108 get_order(size));
109 if (!page)
110 return NULL;
111
112 *dma_handle = phys_to_dma(dev, page_to_phys(page));
7132813c 113 addr = page_address(page);
6829e274 114 memset(addr, 0, size);
7132813c 115 return addr;
6ac2104d
LA
116 } else {
117 return swiotlb_alloc_coherent(dev, size, dma_handle, flags);
118 }
09b55412
CM
119}
120
bb10eb7b
RH
121static void __dma_free_coherent(struct device *dev, size_t size,
122 void *vaddr, dma_addr_t dma_handle,
123 struct dma_attrs *attrs)
09b55412 124{
d4932f9e
LA
125 bool freed;
126 phys_addr_t paddr = dma_to_phys(dev, dma_handle);
127
c666e8d5
LA
128 if (dev == NULL) {
129 WARN_ONCE(1, "Use an actual device structure for DMA allocation\n");
130 return;
131 }
132
d4932f9e 133 freed = dma_release_from_contiguous(dev,
6ac2104d
LA
134 phys_to_page(paddr),
135 size >> PAGE_SHIFT);
d4932f9e 136 if (!freed)
6ac2104d 137 swiotlb_free_coherent(dev, size, vaddr, dma_handle);
09b55412
CM
138}
139
9d3bfbb4
CM
140static void *__dma_alloc(struct device *dev, size_t size,
141 dma_addr_t *dma_handle, gfp_t flags,
142 struct dma_attrs *attrs)
7363590d 143{
d4932f9e 144 struct page *page;
7363590d 145 void *ptr, *coherent_ptr;
9d3bfbb4 146 bool coherent = is_device_dma_coherent(dev);
7363590d
CM
147
148 size = PAGE_ALIGN(size);
d4932f9e 149
9d3bfbb4 150 if (!coherent && !(flags & __GFP_WAIT)) {
d4932f9e 151 struct page *page = NULL;
7132813c 152 void *addr = __alloc_from_pool(size, &page, flags);
d4932f9e
LA
153
154 if (addr)
155 *dma_handle = phys_to_dma(dev, page_to_phys(page));
156
157 return addr;
d4932f9e 158 }
7363590d
CM
159
160 ptr = __dma_alloc_coherent(dev, size, dma_handle, flags, attrs);
161 if (!ptr)
162 goto no_mem;
7363590d 163
9d3bfbb4
CM
164 /* no need for non-cacheable mapping if coherent */
165 if (coherent)
166 return ptr;
167
7363590d
CM
168 /* remove any dirty cache lines on the kernel alias */
169 __dma_flush_range(ptr, ptr + size);
170
171 /* create a coherent mapping */
172 page = virt_to_page(ptr);
d4932f9e
LA
173 coherent_ptr = dma_common_contiguous_remap(page, size, VM_USERMAP,
174 __get_dma_pgprot(attrs,
175 __pgprot(PROT_NORMAL_NC), false),
176 NULL);
7363590d
CM
177 if (!coherent_ptr)
178 goto no_map;
179
180 return coherent_ptr;
181
182no_map:
183 __dma_free_coherent(dev, size, ptr, *dma_handle, attrs);
184no_mem:
a52ce121 185 *dma_handle = DMA_ERROR_CODE;
7363590d
CM
186 return NULL;
187}
188
9d3bfbb4
CM
189static void __dma_free(struct device *dev, size_t size,
190 void *vaddr, dma_addr_t dma_handle,
191 struct dma_attrs *attrs)
7363590d
CM
192{
193 void *swiotlb_addr = phys_to_virt(dma_to_phys(dev, dma_handle));
194
2cff98b9
DN
195 size = PAGE_ALIGN(size);
196
9d3bfbb4
CM
197 if (!is_device_dma_coherent(dev)) {
198 if (__free_from_pool(vaddr, size))
199 return;
200 vunmap(vaddr);
201 }
7363590d
CM
202 __dma_free_coherent(dev, size, swiotlb_addr, dma_handle, attrs);
203}
204
205static dma_addr_t __swiotlb_map_page(struct device *dev, struct page *page,
206 unsigned long offset, size_t size,
207 enum dma_data_direction dir,
208 struct dma_attrs *attrs)
209{
210 dma_addr_t dev_addr;
211
212 dev_addr = swiotlb_map_page(dev, page, offset, size, dir, attrs);
9d3bfbb4
CM
213 if (!is_device_dma_coherent(dev))
214 __dma_map_area(phys_to_virt(dma_to_phys(dev, dev_addr)), size, dir);
7363590d
CM
215
216 return dev_addr;
217}
218
219
220static void __swiotlb_unmap_page(struct device *dev, dma_addr_t dev_addr,
221 size_t size, enum dma_data_direction dir,
222 struct dma_attrs *attrs)
223{
9d3bfbb4
CM
224 if (!is_device_dma_coherent(dev))
225 __dma_unmap_area(phys_to_virt(dma_to_phys(dev, dev_addr)), size, dir);
7363590d
CM
226 swiotlb_unmap_page(dev, dev_addr, size, dir, attrs);
227}
228
229static int __swiotlb_map_sg_attrs(struct device *dev, struct scatterlist *sgl,
230 int nelems, enum dma_data_direction dir,
231 struct dma_attrs *attrs)
232{
233 struct scatterlist *sg;
234 int i, ret;
235
236 ret = swiotlb_map_sg_attrs(dev, sgl, nelems, dir, attrs);
9d3bfbb4
CM
237 if (!is_device_dma_coherent(dev))
238 for_each_sg(sgl, sg, ret, i)
239 __dma_map_area(phys_to_virt(dma_to_phys(dev, sg->dma_address)),
240 sg->length, dir);
7363590d
CM
241
242 return ret;
243}
244
245static void __swiotlb_unmap_sg_attrs(struct device *dev,
246 struct scatterlist *sgl, int nelems,
247 enum dma_data_direction dir,
248 struct dma_attrs *attrs)
249{
250 struct scatterlist *sg;
251 int i;
252
9d3bfbb4
CM
253 if (!is_device_dma_coherent(dev))
254 for_each_sg(sgl, sg, nelems, i)
255 __dma_unmap_area(phys_to_virt(dma_to_phys(dev, sg->dma_address)),
256 sg->length, dir);
7363590d
CM
257 swiotlb_unmap_sg_attrs(dev, sgl, nelems, dir, attrs);
258}
259
260static void __swiotlb_sync_single_for_cpu(struct device *dev,
261 dma_addr_t dev_addr, size_t size,
262 enum dma_data_direction dir)
263{
9d3bfbb4
CM
264 if (!is_device_dma_coherent(dev))
265 __dma_unmap_area(phys_to_virt(dma_to_phys(dev, dev_addr)), size, dir);
7363590d
CM
266 swiotlb_sync_single_for_cpu(dev, dev_addr, size, dir);
267}
268
269static void __swiotlb_sync_single_for_device(struct device *dev,
270 dma_addr_t dev_addr, size_t size,
271 enum dma_data_direction dir)
272{
273 swiotlb_sync_single_for_device(dev, dev_addr, size, dir);
9d3bfbb4
CM
274 if (!is_device_dma_coherent(dev))
275 __dma_map_area(phys_to_virt(dma_to_phys(dev, dev_addr)), size, dir);
7363590d
CM
276}
277
278static void __swiotlb_sync_sg_for_cpu(struct device *dev,
279 struct scatterlist *sgl, int nelems,
280 enum dma_data_direction dir)
281{
282 struct scatterlist *sg;
283 int i;
284
9d3bfbb4
CM
285 if (!is_device_dma_coherent(dev))
286 for_each_sg(sgl, sg, nelems, i)
287 __dma_unmap_area(phys_to_virt(dma_to_phys(dev, sg->dma_address)),
288 sg->length, dir);
7363590d
CM
289 swiotlb_sync_sg_for_cpu(dev, sgl, nelems, dir);
290}
291
292static void __swiotlb_sync_sg_for_device(struct device *dev,
293 struct scatterlist *sgl, int nelems,
294 enum dma_data_direction dir)
295{
296 struct scatterlist *sg;
297 int i;
298
299 swiotlb_sync_sg_for_device(dev, sgl, nelems, dir);
9d3bfbb4
CM
300 if (!is_device_dma_coherent(dev))
301 for_each_sg(sgl, sg, nelems, i)
302 __dma_map_area(phys_to_virt(dma_to_phys(dev, sg->dma_address)),
303 sg->length, dir);
7363590d
CM
304}
305
6e8d7968
LA
306/* vma->vm_page_prot must be set appropriately before calling this function */
307static int __dma_common_mmap(struct device *dev, struct vm_area_struct *vma,
308 void *cpu_addr, dma_addr_t dma_addr, size_t size)
309{
310 int ret = -ENXIO;
311 unsigned long nr_vma_pages = (vma->vm_end - vma->vm_start) >>
312 PAGE_SHIFT;
313 unsigned long nr_pages = PAGE_ALIGN(size) >> PAGE_SHIFT;
314 unsigned long pfn = dma_to_phys(dev, dma_addr) >> PAGE_SHIFT;
315 unsigned long off = vma->vm_pgoff;
316
317 if (dma_mmap_from_coherent(dev, vma, cpu_addr, size, &ret))
318 return ret;
319
320 if (off < nr_pages && nr_vma_pages <= (nr_pages - off)) {
321 ret = remap_pfn_range(vma, vma->vm_start,
322 pfn + off,
323 vma->vm_end - vma->vm_start,
324 vma->vm_page_prot);
325 }
326
327 return ret;
328}
329
9d3bfbb4
CM
330static int __swiotlb_mmap(struct device *dev,
331 struct vm_area_struct *vma,
332 void *cpu_addr, dma_addr_t dma_addr, size_t size,
333 struct dma_attrs *attrs)
6e8d7968 334{
9d3bfbb4
CM
335 vma->vm_page_prot = __get_dma_pgprot(attrs, vma->vm_page_prot,
336 is_device_dma_coherent(dev));
6e8d7968
LA
337 return __dma_common_mmap(dev, vma, cpu_addr, dma_addr, size);
338}
339
9d3bfbb4
CM
340static struct dma_map_ops swiotlb_dma_ops = {
341 .alloc = __dma_alloc,
342 .free = __dma_free,
343 .mmap = __swiotlb_mmap,
7363590d
CM
344 .map_page = __swiotlb_map_page,
345 .unmap_page = __swiotlb_unmap_page,
346 .map_sg = __swiotlb_map_sg_attrs,
347 .unmap_sg = __swiotlb_unmap_sg_attrs,
348 .sync_single_for_cpu = __swiotlb_sync_single_for_cpu,
349 .sync_single_for_device = __swiotlb_sync_single_for_device,
350 .sync_sg_for_cpu = __swiotlb_sync_sg_for_cpu,
351 .sync_sg_for_device = __swiotlb_sync_sg_for_device,
352 .dma_supported = swiotlb_dma_supported,
353 .mapping_error = swiotlb_dma_mapping_error,
354};
09b55412 355
d4932f9e
LA
356static int __init atomic_pool_init(void)
357{
358 pgprot_t prot = __pgprot(PROT_NORMAL_NC);
359 unsigned long nr_pages = atomic_pool_size >> PAGE_SHIFT;
360 struct page *page;
361 void *addr;
362 unsigned int pool_size_order = get_order(atomic_pool_size);
363
364 if (dev_get_cma_area(NULL))
365 page = dma_alloc_from_contiguous(NULL, nr_pages,
366 pool_size_order);
367 else
368 page = alloc_pages(GFP_DMA, pool_size_order);
369
370 if (page) {
371 int ret;
372 void *page_addr = page_address(page);
373
374 memset(page_addr, 0, atomic_pool_size);
375 __dma_flush_range(page_addr, page_addr + atomic_pool_size);
376
377 atomic_pool = gen_pool_create(PAGE_SHIFT, -1);
378 if (!atomic_pool)
379 goto free_page;
380
381 addr = dma_common_contiguous_remap(page, atomic_pool_size,
382 VM_USERMAP, prot, atomic_pool_init);
383
384 if (!addr)
385 goto destroy_genpool;
386
387 ret = gen_pool_add_virt(atomic_pool, (unsigned long)addr,
388 page_to_phys(page),
389 atomic_pool_size, -1);
390 if (ret)
391 goto remove_mapping;
392
393 gen_pool_set_algo(atomic_pool,
394 gen_pool_first_fit_order_align,
395 (void *)PAGE_SHIFT);
396
397 pr_info("DMA: preallocated %zu KiB pool for atomic allocations\n",
398 atomic_pool_size / 1024);
399 return 0;
400 }
401 goto out;
402
403remove_mapping:
404 dma_common_free_remap(addr, atomic_pool_size, VM_USERMAP);
405destroy_genpool:
406 gen_pool_destroy(atomic_pool);
407 atomic_pool = NULL;
408free_page:
409 if (!dma_release_from_contiguous(NULL, page, nr_pages))
410 __free_pages(page, pool_size_order);
411out:
412 pr_err("DMA: failed to allocate %zu KiB pool for atomic coherent allocation\n",
413 atomic_pool_size / 1024);
414 return -ENOMEM;
415}
416
a1e50a82 417static int __init arm64_dma_init(void)
09b55412 418{
a1e50a82 419 int ret;
3690951f 420
9d3bfbb4 421 dma_ops = &swiotlb_dma_ops;
3690951f 422
a1e50a82 423 ret = atomic_pool_init();
d4932f9e
LA
424
425 return ret;
426}
427arch_initcall(arm64_dma_init);
09b55412
CM
428
429#define PREALLOC_DMA_DEBUG_ENTRIES 4096
430
431static int __init dma_debug_do_init(void)
432{
433 dma_debug_init(PREALLOC_DMA_DEBUG_ENTRIES);
434 return 0;
435}
436fs_initcall(dma_debug_do_init);