ARM: 8506/1: common: DMA-mapping: add DMA_ATTR_ALLOC_SINGLE_PAGES attribute
[linux-2.6-block.git] / arch / arm / mm / dma-mapping.c
CommitLineData
1da177e4 1/*
0ddbccd1 2 * linux/arch/arm/mm/dma-mapping.c
1da177e4
LT
3 *
4 * Copyright (C) 2000-2004 Russell King
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 *
10 * DMA uncached mapping support.
11 */
11a5aa32 12#include <linux/bootmem.h>
1da177e4
LT
13#include <linux/module.h>
14#include <linux/mm.h>
36d0fd21 15#include <linux/genalloc.h>
5a0e3ad6 16#include <linux/gfp.h>
1da177e4
LT
17#include <linux/errno.h>
18#include <linux/list.h>
19#include <linux/init.h>
20#include <linux/device.h>
21#include <linux/dma-mapping.h>
c7909509 22#include <linux/dma-contiguous.h>
39af22a7 23#include <linux/highmem.h>
c7909509 24#include <linux/memblock.h>
99d1717d 25#include <linux/slab.h>
4ce63fcd 26#include <linux/iommu.h>
e9da6e99 27#include <linux/io.h>
4ce63fcd 28#include <linux/vmalloc.h>
158e8bfe 29#include <linux/sizes.h>
a254129e 30#include <linux/cma.h>
1da177e4 31
23759dc6 32#include <asm/memory.h>
43377453 33#include <asm/highmem.h>
1da177e4 34#include <asm/cacheflush.h>
1da177e4 35#include <asm/tlbflush.h>
99d1717d 36#include <asm/mach/arch.h>
4ce63fcd 37#include <asm/dma-iommu.h>
c7909509
MS
38#include <asm/mach/map.h>
39#include <asm/system_info.h>
40#include <asm/dma-contiguous.h>
37134cd5 41
1234e3fd 42#include "dma.h"
022ae537
RK
43#include "mm.h"
44
15237e1f
MS
45/*
46 * The DMA API is built upon the notion of "buffer ownership". A buffer
47 * is either exclusively owned by the CPU (and therefore may be accessed
48 * by it) or exclusively owned by the DMA device. These helper functions
49 * represent the transitions between these two ownership states.
50 *
51 * Note, however, that on later ARMs, this notion does not work due to
52 * speculative prefetches. We model our approach on the assumption that
53 * the CPU does do speculative prefetches, which means we clean caches
54 * before transfers and delay cache invalidation until transfer completion.
55 *
15237e1f 56 */
51fde349 57static void __dma_page_cpu_to_dev(struct page *, unsigned long,
15237e1f 58 size_t, enum dma_data_direction);
51fde349 59static void __dma_page_dev_to_cpu(struct page *, unsigned long,
15237e1f
MS
60 size_t, enum dma_data_direction);
61
2dc6a016
MS
62/**
63 * arm_dma_map_page - map a portion of a page for streaming DMA
64 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
65 * @page: page that buffer resides in
66 * @offset: offset into page for start of buffer
67 * @size: size of buffer to map
68 * @dir: DMA transfer direction
69 *
70 * Ensure that any data held in the cache is appropriately discarded
71 * or written back.
72 *
73 * The device owns this memory once this call has completed. The CPU
74 * can regain ownership by calling dma_unmap_page().
75 */
51fde349 76static dma_addr_t arm_dma_map_page(struct device *dev, struct page *page,
2dc6a016
MS
77 unsigned long offset, size_t size, enum dma_data_direction dir,
78 struct dma_attrs *attrs)
79{
dd37e940 80 if (!dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs))
51fde349
MS
81 __dma_page_cpu_to_dev(page, offset, size, dir);
82 return pfn_to_dma(dev, page_to_pfn(page)) + offset;
2dc6a016
MS
83}
84
dd37e940
RH
85static dma_addr_t arm_coherent_dma_map_page(struct device *dev, struct page *page,
86 unsigned long offset, size_t size, enum dma_data_direction dir,
87 struct dma_attrs *attrs)
88{
89 return pfn_to_dma(dev, page_to_pfn(page)) + offset;
90}
91
2dc6a016
MS
92/**
93 * arm_dma_unmap_page - unmap a buffer previously mapped through dma_map_page()
94 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
95 * @handle: DMA address of buffer
96 * @size: size of buffer (same as passed to dma_map_page)
97 * @dir: DMA transfer direction (same as passed to dma_map_page)
98 *
99 * Unmap a page streaming mode DMA translation. The handle and size
100 * must match what was provided in the previous dma_map_page() call.
101 * All other usages are undefined.
102 *
103 * After this call, reads by the CPU to the buffer are guaranteed to see
104 * whatever the device wrote there.
105 */
51fde349 106static void arm_dma_unmap_page(struct device *dev, dma_addr_t handle,
2dc6a016
MS
107 size_t size, enum dma_data_direction dir,
108 struct dma_attrs *attrs)
109{
dd37e940 110 if (!dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs))
51fde349
MS
111 __dma_page_dev_to_cpu(pfn_to_page(dma_to_pfn(dev, handle)),
112 handle & ~PAGE_MASK, size, dir);
2dc6a016
MS
113}
114
51fde349 115static void arm_dma_sync_single_for_cpu(struct device *dev,
2dc6a016
MS
116 dma_addr_t handle, size_t size, enum dma_data_direction dir)
117{
118 unsigned int offset = handle & (PAGE_SIZE - 1);
119 struct page *page = pfn_to_page(dma_to_pfn(dev, handle-offset));
dd37e940 120 __dma_page_dev_to_cpu(page, offset, size, dir);
2dc6a016
MS
121}
122
51fde349 123static void arm_dma_sync_single_for_device(struct device *dev,
2dc6a016
MS
124 dma_addr_t handle, size_t size, enum dma_data_direction dir)
125{
126 unsigned int offset = handle & (PAGE_SIZE - 1);
127 struct page *page = pfn_to_page(dma_to_pfn(dev, handle-offset));
dd37e940 128 __dma_page_cpu_to_dev(page, offset, size, dir);
2dc6a016
MS
129}
130
2dc6a016 131struct dma_map_ops arm_dma_ops = {
f99d6034
MS
132 .alloc = arm_dma_alloc,
133 .free = arm_dma_free,
134 .mmap = arm_dma_mmap,
dc2832e1 135 .get_sgtable = arm_dma_get_sgtable,
2dc6a016
MS
136 .map_page = arm_dma_map_page,
137 .unmap_page = arm_dma_unmap_page,
138 .map_sg = arm_dma_map_sg,
139 .unmap_sg = arm_dma_unmap_sg,
140 .sync_single_for_cpu = arm_dma_sync_single_for_cpu,
141 .sync_single_for_device = arm_dma_sync_single_for_device,
142 .sync_sg_for_cpu = arm_dma_sync_sg_for_cpu,
143 .sync_sg_for_device = arm_dma_sync_sg_for_device,
144 .set_dma_mask = arm_dma_set_mask,
145};
146EXPORT_SYMBOL(arm_dma_ops);
147
dd37e940
RH
148static void *arm_coherent_dma_alloc(struct device *dev, size_t size,
149 dma_addr_t *handle, gfp_t gfp, struct dma_attrs *attrs);
150static void arm_coherent_dma_free(struct device *dev, size_t size, void *cpu_addr,
151 dma_addr_t handle, struct dma_attrs *attrs);
55af8a91
ML
152static int arm_coherent_dma_mmap(struct device *dev, struct vm_area_struct *vma,
153 void *cpu_addr, dma_addr_t dma_addr, size_t size,
154 struct dma_attrs *attrs);
dd37e940
RH
155
156struct dma_map_ops arm_coherent_dma_ops = {
157 .alloc = arm_coherent_dma_alloc,
158 .free = arm_coherent_dma_free,
55af8a91 159 .mmap = arm_coherent_dma_mmap,
dd37e940
RH
160 .get_sgtable = arm_dma_get_sgtable,
161 .map_page = arm_coherent_dma_map_page,
162 .map_sg = arm_dma_map_sg,
163 .set_dma_mask = arm_dma_set_mask,
164};
165EXPORT_SYMBOL(arm_coherent_dma_ops);
166
9f28cde0
RK
167static int __dma_supported(struct device *dev, u64 mask, bool warn)
168{
169 unsigned long max_dma_pfn;
170
171 /*
172 * If the mask allows for more memory than we can address,
173 * and we actually have that much memory, then we must
174 * indicate that DMA to this device is not supported.
175 */
176 if (sizeof(mask) != sizeof(dma_addr_t) &&
177 mask > (dma_addr_t)~0 &&
8bf1268f 178 dma_to_pfn(dev, ~0) < max_pfn - 1) {
9f28cde0
RK
179 if (warn) {
180 dev_warn(dev, "Coherent DMA mask %#llx is larger than dma_addr_t allows\n",
181 mask);
182 dev_warn(dev, "Driver did not use or check the return value from dma_set_coherent_mask()?\n");
183 }
184 return 0;
185 }
186
187 max_dma_pfn = min(max_pfn, arm_dma_pfn_limit);
188
189 /*
190 * Translate the device's DMA mask to a PFN limit. This
191 * PFN number includes the page which we can DMA to.
192 */
193 if (dma_to_pfn(dev, mask) < max_dma_pfn) {
194 if (warn)
195 dev_warn(dev, "Coherent DMA mask %#llx (pfn %#lx-%#lx) covers a smaller range of system memory than the DMA zone pfn 0x0-%#lx\n",
196 mask,
197 dma_to_pfn(dev, 0), dma_to_pfn(dev, mask) + 1,
198 max_dma_pfn + 1);
199 return 0;
200 }
201
202 return 1;
203}
204
ab6494f0
CM
205static u64 get_coherent_dma_mask(struct device *dev)
206{
4dcfa600 207 u64 mask = (u64)DMA_BIT_MASK(32);
ab6494f0
CM
208
209 if (dev) {
210 mask = dev->coherent_dma_mask;
211
212 /*
213 * Sanity check the DMA mask - it must be non-zero, and
214 * must be able to be satisfied by a DMA allocation.
215 */
216 if (mask == 0) {
217 dev_warn(dev, "coherent DMA mask is unset\n");
218 return 0;
219 }
220
9f28cde0 221 if (!__dma_supported(dev, mask, true))
ab6494f0 222 return 0;
ab6494f0 223 }
1da177e4 224
ab6494f0
CM
225 return mask;
226}
227
c7909509
MS
228static void __dma_clear_buffer(struct page *page, size_t size)
229{
c7909509
MS
230 /*
231 * Ensure that the allocated pages are zeroed, and that any data
232 * lurking in the kernel direct-mapped region is invalidated.
233 */
9848e48f
MS
234 if (PageHighMem(page)) {
235 phys_addr_t base = __pfn_to_phys(page_to_pfn(page));
236 phys_addr_t end = base + size;
237 while (size > 0) {
238 void *ptr = kmap_atomic(page);
239 memset(ptr, 0, PAGE_SIZE);
240 dmac_flush_range(ptr, ptr + PAGE_SIZE);
241 kunmap_atomic(ptr);
242 page++;
243 size -= PAGE_SIZE;
244 }
245 outer_flush_range(base, end);
246 } else {
247 void *ptr = page_address(page);
4ce63fcd
MS
248 memset(ptr, 0, size);
249 dmac_flush_range(ptr, ptr + size);
250 outer_flush_range(__pa(ptr), __pa(ptr) + size);
251 }
c7909509
MS
252}
253
7a9a32a9
RK
254/*
255 * Allocate a DMA buffer for 'dev' of size 'size' using the
256 * specified gfp mask. Note that 'size' must be page aligned.
257 */
258static struct page *__dma_alloc_buffer(struct device *dev, size_t size, gfp_t gfp)
259{
260 unsigned long order = get_order(size);
261 struct page *page, *p, *e;
7a9a32a9
RK
262
263 page = alloc_pages(gfp, order);
264 if (!page)
265 return NULL;
266
267 /*
268 * Now split the huge page and free the excess pages
269 */
270 split_page(page, order);
271 for (p = page + (size >> PAGE_SHIFT), e = page + (1 << order); p < e; p++)
272 __free_page(p);
273
c7909509 274 __dma_clear_buffer(page, size);
7a9a32a9
RK
275
276 return page;
277}
278
279/*
280 * Free a DMA buffer. 'size' must be page aligned.
281 */
282static void __dma_free_buffer(struct page *page, size_t size)
283{
284 struct page *e = page + (size >> PAGE_SHIFT);
285
286 while (page < e) {
287 __free_page(page);
288 page++;
289 }
290}
291
ab6494f0 292#ifdef CONFIG_MMU
a5e9d38b 293
e9da6e99 294static void *__alloc_from_contiguous(struct device *dev, size_t size,
9848e48f 295 pgprot_t prot, struct page **ret_page,
6e8266e3 296 const void *caller, bool want_vaddr);
99d1717d 297
e9da6e99
MS
298static void *__alloc_remap_buffer(struct device *dev, size_t size, gfp_t gfp,
299 pgprot_t prot, struct page **ret_page,
6e8266e3 300 const void *caller, bool want_vaddr);
99d1717d 301
e9da6e99
MS
302static void *
303__dma_alloc_remap(struct page *page, size_t size, gfp_t gfp, pgprot_t prot,
304 const void *caller)
99d1717d 305{
e9da6e99
MS
306 /*
307 * DMA allocation can be mapped to user space, so lets
308 * set VM_USERMAP flags too.
309 */
513510dd
LA
310 return dma_common_contiguous_remap(page, size,
311 VM_ARM_DMA_CONSISTENT | VM_USERMAP,
312 prot, caller);
99d1717d 313}
1da177e4 314
e9da6e99 315static void __dma_free_remap(void *cpu_addr, size_t size)
88c58f3b 316{
513510dd
LA
317 dma_common_free_remap(cpu_addr, size,
318 VM_ARM_DMA_CONSISTENT | VM_USERMAP);
88c58f3b 319}
88c58f3b 320
6e5267aa 321#define DEFAULT_DMA_COHERENT_POOL_SIZE SZ_256K
36d0fd21 322static struct gen_pool *atomic_pool;
6e5267aa 323
36d0fd21 324static size_t atomic_pool_size = DEFAULT_DMA_COHERENT_POOL_SIZE;
c7909509
MS
325
326static int __init early_coherent_pool(char *p)
327{
36d0fd21 328 atomic_pool_size = memparse(p, &p);
c7909509
MS
329 return 0;
330}
331early_param("coherent_pool", early_coherent_pool);
332
6e5267aa
MS
333void __init init_dma_coherent_pool_size(unsigned long size)
334{
335 /*
336 * Catch any attempt to set the pool size too late.
337 */
36d0fd21 338 BUG_ON(atomic_pool);
6e5267aa
MS
339
340 /*
341 * Set architecture specific coherent pool size only if
342 * it has not been changed by kernel command line parameter.
343 */
36d0fd21
LA
344 if (atomic_pool_size == DEFAULT_DMA_COHERENT_POOL_SIZE)
345 atomic_pool_size = size;
6e5267aa
MS
346}
347
c7909509
MS
348/*
349 * Initialise the coherent pool for atomic allocations.
350 */
e9da6e99 351static int __init atomic_pool_init(void)
c7909509 352{
71b55663 353 pgprot_t prot = pgprot_dmacoherent(PAGE_KERNEL);
9d1400cf 354 gfp_t gfp = GFP_KERNEL | GFP_DMA;
c7909509
MS
355 struct page *page;
356 void *ptr;
c7909509 357
36d0fd21
LA
358 atomic_pool = gen_pool_create(PAGE_SHIFT, -1);
359 if (!atomic_pool)
360 goto out;
6b3fe472 361
e464ef16 362 if (dev_get_cma_area(NULL))
36d0fd21 363 ptr = __alloc_from_contiguous(NULL, atomic_pool_size, prot,
6e8266e3 364 &page, atomic_pool_init, true);
e9da6e99 365 else
36d0fd21 366 ptr = __alloc_remap_buffer(NULL, atomic_pool_size, gfp, prot,
6e8266e3 367 &page, atomic_pool_init, true);
c7909509 368 if (ptr) {
36d0fd21
LA
369 int ret;
370
371 ret = gen_pool_add_virt(atomic_pool, (unsigned long)ptr,
372 page_to_phys(page),
373 atomic_pool_size, -1);
374 if (ret)
375 goto destroy_genpool;
376
377 gen_pool_set_algo(atomic_pool,
378 gen_pool_first_fit_order_align,
379 (void *)PAGE_SHIFT);
380 pr_info("DMA: preallocated %zd KiB pool for atomic coherent allocations\n",
381 atomic_pool_size / 1024);
c7909509
MS
382 return 0;
383 }
ec10665c 384
36d0fd21
LA
385destroy_genpool:
386 gen_pool_destroy(atomic_pool);
387 atomic_pool = NULL;
388out:
389 pr_err("DMA: failed to allocate %zx KiB pool for atomic coherent allocation\n",
390 atomic_pool_size / 1024);
c7909509
MS
391 return -ENOMEM;
392}
393/*
394 * CMA is activated by core_initcall, so we must be called after it.
395 */
e9da6e99 396postcore_initcall(atomic_pool_init);
c7909509
MS
397
398struct dma_contig_early_reserve {
399 phys_addr_t base;
400 unsigned long size;
401};
402
403static struct dma_contig_early_reserve dma_mmu_remap[MAX_CMA_AREAS] __initdata;
404
405static int dma_mmu_remap_num __initdata;
406
407void __init dma_contiguous_early_fixup(phys_addr_t base, unsigned long size)
408{
409 dma_mmu_remap[dma_mmu_remap_num].base = base;
410 dma_mmu_remap[dma_mmu_remap_num].size = size;
411 dma_mmu_remap_num++;
412}
413
414void __init dma_contiguous_remap(void)
415{
416 int i;
417 for (i = 0; i < dma_mmu_remap_num; i++) {
418 phys_addr_t start = dma_mmu_remap[i].base;
419 phys_addr_t end = start + dma_mmu_remap[i].size;
420 struct map_desc map;
421 unsigned long addr;
422
423 if (end > arm_lowmem_limit)
424 end = arm_lowmem_limit;
425 if (start >= end)
39f78e70 426 continue;
c7909509
MS
427
428 map.pfn = __phys_to_pfn(start);
429 map.virtual = __phys_to_virt(start);
430 map.length = end - start;
431 map.type = MT_MEMORY_DMA_READY;
432
433 /*
6b076991
RK
434 * Clear previous low-memory mapping to ensure that the
435 * TLB does not see any conflicting entries, then flush
436 * the TLB of the old entries before creating new mappings.
437 *
438 * This ensures that any speculatively loaded TLB entries
439 * (even though they may be rare) can not cause any problems,
440 * and ensures that this code is architecturally compliant.
c7909509
MS
441 */
442 for (addr = __phys_to_virt(start); addr < __phys_to_virt(end);
61f6c7a4 443 addr += PMD_SIZE)
c7909509
MS
444 pmd_clear(pmd_off_k(addr));
445
6b076991
RK
446 flush_tlb_kernel_range(__phys_to_virt(start),
447 __phys_to_virt(end));
448
c7909509
MS
449 iotable_init(&map, 1);
450 }
451}
452
c7909509
MS
453static int __dma_update_pte(pte_t *pte, pgtable_t token, unsigned long addr,
454 void *data)
455{
456 struct page *page = virt_to_page(addr);
457 pgprot_t prot = *(pgprot_t *)data;
458
459 set_pte_ext(pte, mk_pte(page, prot), 0);
460 return 0;
461}
462
463static void __dma_remap(struct page *page, size_t size, pgprot_t prot)
464{
465 unsigned long start = (unsigned long) page_address(page);
466 unsigned end = start + size;
467
468 apply_to_page_range(&init_mm, start, size, __dma_update_pte, &prot);
c7909509
MS
469 flush_tlb_kernel_range(start, end);
470}
471
472static void *__alloc_remap_buffer(struct device *dev, size_t size, gfp_t gfp,
473 pgprot_t prot, struct page **ret_page,
6e8266e3 474 const void *caller, bool want_vaddr)
c7909509
MS
475{
476 struct page *page;
6e8266e3 477 void *ptr = NULL;
c7909509
MS
478 page = __dma_alloc_buffer(dev, size, gfp);
479 if (!page)
480 return NULL;
6e8266e3
CC
481 if (!want_vaddr)
482 goto out;
c7909509
MS
483
484 ptr = __dma_alloc_remap(page, size, gfp, prot, caller);
485 if (!ptr) {
486 __dma_free_buffer(page, size);
487 return NULL;
488 }
489
6e8266e3 490 out:
c7909509
MS
491 *ret_page = page;
492 return ptr;
493}
494
e9da6e99 495static void *__alloc_from_pool(size_t size, struct page **ret_page)
c7909509 496{
36d0fd21 497 unsigned long val;
e9da6e99 498 void *ptr = NULL;
c7909509 499
36d0fd21 500 if (!atomic_pool) {
e9da6e99 501 WARN(1, "coherent pool not initialised!\n");
c7909509
MS
502 return NULL;
503 }
504
36d0fd21
LA
505 val = gen_pool_alloc(atomic_pool, size);
506 if (val) {
507 phys_addr_t phys = gen_pool_virt_to_phys(atomic_pool, val);
508
509 *ret_page = phys_to_page(phys);
510 ptr = (void *)val;
c7909509 511 }
e9da6e99
MS
512
513 return ptr;
c7909509
MS
514}
515
21d0a759
HD
516static bool __in_atomic_pool(void *start, size_t size)
517{
36d0fd21 518 return addr_in_gen_pool(atomic_pool, (unsigned long)start, size);
21d0a759
HD
519}
520
e9da6e99 521static int __free_from_pool(void *start, size_t size)
c7909509 522{
21d0a759 523 if (!__in_atomic_pool(start, size))
c7909509
MS
524 return 0;
525
36d0fd21 526 gen_pool_free(atomic_pool, (unsigned long)start, size);
e9da6e99 527
c7909509
MS
528 return 1;
529}
530
531static void *__alloc_from_contiguous(struct device *dev, size_t size,
9848e48f 532 pgprot_t prot, struct page **ret_page,
6e8266e3 533 const void *caller, bool want_vaddr)
c7909509
MS
534{
535 unsigned long order = get_order(size);
536 size_t count = size >> PAGE_SHIFT;
537 struct page *page;
6e8266e3 538 void *ptr = NULL;
c7909509
MS
539
540 page = dma_alloc_from_contiguous(dev, count, order);
541 if (!page)
542 return NULL;
543
544 __dma_clear_buffer(page, size);
c7909509 545
6e8266e3
CC
546 if (!want_vaddr)
547 goto out;
548
9848e48f
MS
549 if (PageHighMem(page)) {
550 ptr = __dma_alloc_remap(page, size, GFP_KERNEL, prot, caller);
551 if (!ptr) {
552 dma_release_from_contiguous(dev, page, count);
553 return NULL;
554 }
555 } else {
556 __dma_remap(page, size, prot);
557 ptr = page_address(page);
558 }
6e8266e3
CC
559
560 out:
c7909509 561 *ret_page = page;
9848e48f 562 return ptr;
c7909509
MS
563}
564
565static void __free_from_contiguous(struct device *dev, struct page *page,
6e8266e3 566 void *cpu_addr, size_t size, bool want_vaddr)
c7909509 567{
6e8266e3
CC
568 if (want_vaddr) {
569 if (PageHighMem(page))
570 __dma_free_remap(cpu_addr, size);
571 else
572 __dma_remap(page, size, PAGE_KERNEL);
573 }
c7909509
MS
574 dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT);
575}
576
f99d6034
MS
577static inline pgprot_t __get_dma_pgprot(struct dma_attrs *attrs, pgprot_t prot)
578{
579 prot = dma_get_attr(DMA_ATTR_WRITE_COMBINE, attrs) ?
580 pgprot_writecombine(prot) :
581 pgprot_dmacoherent(prot);
582 return prot;
583}
584
c7909509
MS
585#define nommu() 0
586
ab6494f0 587#else /* !CONFIG_MMU */
695ae0af 588
c7909509
MS
589#define nommu() 1
590
6e8266e3
CC
591#define __get_dma_pgprot(attrs, prot) __pgprot(0)
592#define __alloc_remap_buffer(dev, size, gfp, prot, ret, c, wv) NULL
e9da6e99 593#define __alloc_from_pool(size, ret_page) NULL
6e8266e3 594#define __alloc_from_contiguous(dev, size, prot, ret, c, wv) NULL
c7909509 595#define __free_from_pool(cpu_addr, size) 0
6e8266e3 596#define __free_from_contiguous(dev, page, cpu_addr, size, wv) do { } while (0)
c7909509 597#define __dma_free_remap(cpu_addr, size) do { } while (0)
31ebf944
RK
598
599#endif /* CONFIG_MMU */
600
c7909509
MS
601static void *__alloc_simple_buffer(struct device *dev, size_t size, gfp_t gfp,
602 struct page **ret_page)
ab6494f0 603{
c7909509
MS
604 struct page *page;
605 page = __dma_alloc_buffer(dev, size, gfp);
606 if (!page)
607 return NULL;
608
609 *ret_page = page;
610 return page_address(page);
611}
612
613
614
615static void *__dma_alloc(struct device *dev, size_t size, dma_addr_t *handle,
6e8266e3
CC
616 gfp_t gfp, pgprot_t prot, bool is_coherent,
617 struct dma_attrs *attrs, const void *caller)
c7909509
MS
618{
619 u64 mask = get_coherent_dma_mask(dev);
3dd7ea92 620 struct page *page = NULL;
31ebf944 621 void *addr;
6e8266e3 622 bool want_vaddr;
ab6494f0 623
c7909509
MS
624#ifdef CONFIG_DMA_API_DEBUG
625 u64 limit = (mask + 1) & ~mask;
626 if (limit && size >= limit) {
627 dev_warn(dev, "coherent allocation too big (requested %#x mask %#llx)\n",
628 size, mask);
629 return NULL;
630 }
631#endif
632
633 if (!mask)
634 return NULL;
635
636 if (mask < 0xffffffffULL)
637 gfp |= GFP_DMA;
638
ea2e7057
SB
639 /*
640 * Following is a work-around (a.k.a. hack) to prevent pages
641 * with __GFP_COMP being passed to split_page() which cannot
642 * handle them. The real problem is that this flag probably
643 * should be 0 on ARM as it is not supported on this
644 * platform; see CONFIG_HUGETLBFS.
645 */
646 gfp &= ~(__GFP_COMP);
647
553ac788 648 *handle = DMA_ERROR_CODE;
04da5694 649 size = PAGE_ALIGN(size);
6e8266e3 650 want_vaddr = !dma_get_attr(DMA_ATTR_NO_KERNEL_MAPPING, attrs);
ab6494f0 651
21caf3a7
LN
652 if (nommu())
653 addr = __alloc_simple_buffer(dev, size, gfp, &page);
d0164adc 654 else if (dev_get_cma_area(dev) && (gfp & __GFP_DIRECT_RECLAIM))
21caf3a7
LN
655 addr = __alloc_from_contiguous(dev, size, prot, &page,
656 caller, want_vaddr);
657 else if (is_coherent)
c7909509 658 addr = __alloc_simple_buffer(dev, size, gfp, &page);
d0164adc 659 else if (!gfpflags_allow_blocking(gfp))
e9da6e99 660 addr = __alloc_from_pool(size, &page);
31ebf944 661 else
21caf3a7
LN
662 addr = __alloc_remap_buffer(dev, size, gfp, prot, &page,
663 caller, want_vaddr);
695ae0af 664
6e8266e3 665 if (page)
9eedd963 666 *handle = pfn_to_dma(dev, page_to_pfn(page));
695ae0af 667
6e8266e3 668 return want_vaddr ? addr : page;
31ebf944 669}
1da177e4
LT
670
671/*
672 * Allocate DMA-coherent memory space and return both the kernel remapped
673 * virtual and bus address for that space.
674 */
f99d6034
MS
675void *arm_dma_alloc(struct device *dev, size_t size, dma_addr_t *handle,
676 gfp_t gfp, struct dma_attrs *attrs)
1da177e4 677{
0ea1ec71 678 pgprot_t prot = __get_dma_pgprot(attrs, PAGE_KERNEL);
1fe53268 679
dd37e940 680 return __dma_alloc(dev, size, handle, gfp, prot, false,
6e8266e3 681 attrs, __builtin_return_address(0));
dd37e940
RH
682}
683
684static void *arm_coherent_dma_alloc(struct device *dev, size_t size,
685 dma_addr_t *handle, gfp_t gfp, struct dma_attrs *attrs)
686{
21caf3a7 687 return __dma_alloc(dev, size, handle, gfp, PAGE_KERNEL, true,
6e8266e3 688 attrs, __builtin_return_address(0));
1da177e4 689}
1da177e4 690
55af8a91 691static int __arm_dma_mmap(struct device *dev, struct vm_area_struct *vma,
f99d6034
MS
692 void *cpu_addr, dma_addr_t dma_addr, size_t size,
693 struct dma_attrs *attrs)
1da177e4 694{
ab6494f0
CM
695 int ret = -ENXIO;
696#ifdef CONFIG_MMU
50262a4b
MS
697 unsigned long nr_vma_pages = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
698 unsigned long nr_pages = PAGE_ALIGN(size) >> PAGE_SHIFT;
c7909509 699 unsigned long pfn = dma_to_pfn(dev, dma_addr);
50262a4b
MS
700 unsigned long off = vma->vm_pgoff;
701
47142f07
MS
702 if (dma_mmap_from_coherent(dev, vma, cpu_addr, size, &ret))
703 return ret;
704
50262a4b
MS
705 if (off < nr_pages && nr_vma_pages <= (nr_pages - off)) {
706 ret = remap_pfn_range(vma, vma->vm_start,
707 pfn + off,
708 vma->vm_end - vma->vm_start,
709 vma->vm_page_prot);
710 }
ab6494f0 711#endif /* CONFIG_MMU */
1da177e4
LT
712
713 return ret;
714}
715
55af8a91
ML
716/*
717 * Create userspace mapping for the DMA-coherent memory.
718 */
719static int arm_coherent_dma_mmap(struct device *dev, struct vm_area_struct *vma,
720 void *cpu_addr, dma_addr_t dma_addr, size_t size,
721 struct dma_attrs *attrs)
722{
723 return __arm_dma_mmap(dev, vma, cpu_addr, dma_addr, size, attrs);
724}
725
726int arm_dma_mmap(struct device *dev, struct vm_area_struct *vma,
727 void *cpu_addr, dma_addr_t dma_addr, size_t size,
728 struct dma_attrs *attrs)
729{
730#ifdef CONFIG_MMU
731 vma->vm_page_prot = __get_dma_pgprot(attrs, vma->vm_page_prot);
732#endif /* CONFIG_MMU */
733 return __arm_dma_mmap(dev, vma, cpu_addr, dma_addr, size, attrs);
734}
735
1da177e4 736/*
c7909509 737 * Free a buffer as defined by the above mapping.
1da177e4 738 */
dd37e940
RH
739static void __arm_dma_free(struct device *dev, size_t size, void *cpu_addr,
740 dma_addr_t handle, struct dma_attrs *attrs,
741 bool is_coherent)
1da177e4 742{
c7909509 743 struct page *page = pfn_to_page(dma_to_pfn(dev, handle));
6e8266e3 744 bool want_vaddr = !dma_get_attr(DMA_ATTR_NO_KERNEL_MAPPING, attrs);
5edf71ae 745
3e82d012
RK
746 size = PAGE_ALIGN(size);
747
21caf3a7 748 if (nommu()) {
c7909509 749 __dma_free_buffer(page, size);
21caf3a7 750 } else if (!is_coherent && __free_from_pool(cpu_addr, size)) {
d9e0d149 751 return;
e464ef16 752 } else if (!dev_get_cma_area(dev)) {
21caf3a7 753 if (want_vaddr && !is_coherent)
6e8266e3 754 __dma_free_remap(cpu_addr, size);
c7909509
MS
755 __dma_free_buffer(page, size);
756 } else {
c7909509
MS
757 /*
758 * Non-atomic allocations cannot be freed with IRQs disabled
759 */
760 WARN_ON(irqs_disabled());
6e8266e3 761 __free_from_contiguous(dev, page, cpu_addr, size, want_vaddr);
c7909509 762 }
1da177e4 763}
afd1a321 764
dd37e940
RH
765void arm_dma_free(struct device *dev, size_t size, void *cpu_addr,
766 dma_addr_t handle, struct dma_attrs *attrs)
767{
768 __arm_dma_free(dev, size, cpu_addr, handle, attrs, false);
769}
770
771static void arm_coherent_dma_free(struct device *dev, size_t size, void *cpu_addr,
772 dma_addr_t handle, struct dma_attrs *attrs)
773{
774 __arm_dma_free(dev, size, cpu_addr, handle, attrs, true);
775}
776
dc2832e1
MS
777int arm_dma_get_sgtable(struct device *dev, struct sg_table *sgt,
778 void *cpu_addr, dma_addr_t handle, size_t size,
779 struct dma_attrs *attrs)
780{
781 struct page *page = pfn_to_page(dma_to_pfn(dev, handle));
782 int ret;
783
784 ret = sg_alloc_table(sgt, 1, GFP_KERNEL);
785 if (unlikely(ret))
786 return ret;
787
788 sg_set_page(sgt->sgl, page, PAGE_ALIGN(size), 0);
789 return 0;
790}
791
4ea0d737 792static void dma_cache_maint_page(struct page *page, unsigned long offset,
a9c9147e
RK
793 size_t size, enum dma_data_direction dir,
794 void (*op)(const void *, size_t, int))
43377453 795{
15653371
RK
796 unsigned long pfn;
797 size_t left = size;
798
799 pfn = page_to_pfn(page) + offset / PAGE_SIZE;
800 offset %= PAGE_SIZE;
801
43377453
NP
802 /*
803 * A single sg entry may refer to multiple physically contiguous
804 * pages. But we still need to process highmem pages individually.
805 * If highmem is not configured then the bulk of this loop gets
806 * optimized out.
807 */
43377453
NP
808 do {
809 size_t len = left;
93f1d629
RK
810 void *vaddr;
811
15653371
RK
812 page = pfn_to_page(pfn);
813
93f1d629 814 if (PageHighMem(page)) {
15653371 815 if (len + offset > PAGE_SIZE)
93f1d629 816 len = PAGE_SIZE - offset;
dd0f67f4
JK
817
818 if (cache_is_vipt_nonaliasing()) {
39af22a7 819 vaddr = kmap_atomic(page);
7e5a69e8 820 op(vaddr + offset, len, dir);
39af22a7 821 kunmap_atomic(vaddr);
dd0f67f4
JK
822 } else {
823 vaddr = kmap_high_get(page);
824 if (vaddr) {
825 op(vaddr + offset, len, dir);
826 kunmap_high(page);
827 }
43377453 828 }
93f1d629
RK
829 } else {
830 vaddr = page_address(page) + offset;
a9c9147e 831 op(vaddr, len, dir);
43377453 832 }
43377453 833 offset = 0;
15653371 834 pfn++;
43377453
NP
835 left -= len;
836 } while (left);
837}
4ea0d737 838
51fde349
MS
839/*
840 * Make an area consistent for devices.
841 * Note: Drivers should NOT use this function directly, as it will break
842 * platforms with CONFIG_DMABOUNCE.
843 * Use the driver DMA support - see dma-mapping.h (dma_sync_*)
844 */
845static void __dma_page_cpu_to_dev(struct page *page, unsigned long off,
4ea0d737
RK
846 size_t size, enum dma_data_direction dir)
847{
2161c248 848 phys_addr_t paddr;
65af191a 849
a9c9147e 850 dma_cache_maint_page(page, off, size, dir, dmac_map_area);
65af191a
RK
851
852 paddr = page_to_phys(page) + off;
2ffe2da3
RK
853 if (dir == DMA_FROM_DEVICE) {
854 outer_inv_range(paddr, paddr + size);
855 } else {
856 outer_clean_range(paddr, paddr + size);
857 }
858 /* FIXME: non-speculating: flush on bidirectional mappings? */
4ea0d737 859}
4ea0d737 860
51fde349 861static void __dma_page_dev_to_cpu(struct page *page, unsigned long off,
4ea0d737
RK
862 size_t size, enum dma_data_direction dir)
863{
2161c248 864 phys_addr_t paddr = page_to_phys(page) + off;
2ffe2da3
RK
865
866 /* FIXME: non-speculating: not required */
deace4a6
RK
867 /* in any case, don't bother invalidating if DMA to device */
868 if (dir != DMA_TO_DEVICE) {
2ffe2da3
RK
869 outer_inv_range(paddr, paddr + size);
870
deace4a6
RK
871 dma_cache_maint_page(page, off, size, dir, dmac_unmap_area);
872 }
c0177800
CM
873
874 /*
b2a234ed 875 * Mark the D-cache clean for these pages to avoid extra flushing.
c0177800 876 */
b2a234ed
ML
877 if (dir != DMA_TO_DEVICE && size >= PAGE_SIZE) {
878 unsigned long pfn;
879 size_t left = size;
880
881 pfn = page_to_pfn(page) + off / PAGE_SIZE;
882 off %= PAGE_SIZE;
883 if (off) {
884 pfn++;
885 left -= PAGE_SIZE - off;
886 }
887 while (left >= PAGE_SIZE) {
888 page = pfn_to_page(pfn++);
889 set_bit(PG_dcache_clean, &page->flags);
890 left -= PAGE_SIZE;
891 }
892 }
4ea0d737 893}
43377453 894
afd1a321 895/**
2a550e73 896 * arm_dma_map_sg - map a set of SG buffers for streaming mode DMA
afd1a321
RK
897 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
898 * @sg: list of buffers
899 * @nents: number of buffers to map
900 * @dir: DMA transfer direction
901 *
902 * Map a set of buffers described by scatterlist in streaming mode for DMA.
903 * This is the scatter-gather version of the dma_map_single interface.
904 * Here the scatter gather list elements are each tagged with the
905 * appropriate dma address and length. They are obtained via
906 * sg_dma_{address,length}.
907 *
908 * Device ownership issues as mentioned for dma_map_single are the same
909 * here.
910 */
2dc6a016
MS
911int arm_dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
912 enum dma_data_direction dir, struct dma_attrs *attrs)
afd1a321 913{
2a550e73 914 struct dma_map_ops *ops = get_dma_ops(dev);
afd1a321 915 struct scatterlist *s;
01135d92 916 int i, j;
afd1a321
RK
917
918 for_each_sg(sg, s, nents, i) {
4ce63fcd
MS
919#ifdef CONFIG_NEED_SG_DMA_LENGTH
920 s->dma_length = s->length;
921#endif
2a550e73
MS
922 s->dma_address = ops->map_page(dev, sg_page(s), s->offset,
923 s->length, dir, attrs);
01135d92
RK
924 if (dma_mapping_error(dev, s->dma_address))
925 goto bad_mapping;
afd1a321 926 }
afd1a321 927 return nents;
01135d92
RK
928
929 bad_mapping:
930 for_each_sg(sg, s, i, j)
2a550e73 931 ops->unmap_page(dev, sg_dma_address(s), sg_dma_len(s), dir, attrs);
01135d92 932 return 0;
afd1a321 933}
afd1a321
RK
934
935/**
2a550e73 936 * arm_dma_unmap_sg - unmap a set of SG buffers mapped by dma_map_sg
afd1a321
RK
937 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
938 * @sg: list of buffers
0adfca6f 939 * @nents: number of buffers to unmap (same as was passed to dma_map_sg)
afd1a321
RK
940 * @dir: DMA transfer direction (same as was passed to dma_map_sg)
941 *
942 * Unmap a set of streaming mode DMA translations. Again, CPU access
943 * rules concerning calls here are the same as for dma_unmap_single().
944 */
2dc6a016
MS
945void arm_dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents,
946 enum dma_data_direction dir, struct dma_attrs *attrs)
afd1a321 947{
2a550e73 948 struct dma_map_ops *ops = get_dma_ops(dev);
01135d92 949 struct scatterlist *s;
01135d92 950
01135d92 951 int i;
24056f52 952
01135d92 953 for_each_sg(sg, s, nents, i)
2a550e73 954 ops->unmap_page(dev, sg_dma_address(s), sg_dma_len(s), dir, attrs);
afd1a321 955}
afd1a321
RK
956
957/**
2a550e73 958 * arm_dma_sync_sg_for_cpu
afd1a321
RK
959 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
960 * @sg: list of buffers
961 * @nents: number of buffers to map (returned from dma_map_sg)
962 * @dir: DMA transfer direction (same as was passed to dma_map_sg)
963 */
2dc6a016 964void arm_dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
afd1a321
RK
965 int nents, enum dma_data_direction dir)
966{
2a550e73 967 struct dma_map_ops *ops = get_dma_ops(dev);
afd1a321
RK
968 struct scatterlist *s;
969 int i;
970
2a550e73
MS
971 for_each_sg(sg, s, nents, i)
972 ops->sync_single_for_cpu(dev, sg_dma_address(s), s->length,
973 dir);
afd1a321 974}
afd1a321
RK
975
976/**
2a550e73 977 * arm_dma_sync_sg_for_device
afd1a321
RK
978 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
979 * @sg: list of buffers
980 * @nents: number of buffers to map (returned from dma_map_sg)
981 * @dir: DMA transfer direction (same as was passed to dma_map_sg)
982 */
2dc6a016 983void arm_dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
afd1a321
RK
984 int nents, enum dma_data_direction dir)
985{
2a550e73 986 struct dma_map_ops *ops = get_dma_ops(dev);
afd1a321
RK
987 struct scatterlist *s;
988 int i;
989
2a550e73
MS
990 for_each_sg(sg, s, nents, i)
991 ops->sync_single_for_device(dev, sg_dma_address(s), s->length,
992 dir);
afd1a321 993}
24056f52 994
022ae537
RK
995/*
996 * Return whether the given device DMA address mask can be supported
997 * properly. For example, if your device can only drive the low 24-bits
998 * during bus mastering, then you would pass 0x00ffffff as the mask
999 * to this function.
1000 */
1001int dma_supported(struct device *dev, u64 mask)
1002{
9f28cde0 1003 return __dma_supported(dev, mask, false);
022ae537
RK
1004}
1005EXPORT_SYMBOL(dma_supported);
1006
87b54e78 1007int arm_dma_set_mask(struct device *dev, u64 dma_mask)
022ae537
RK
1008{
1009 if (!dev->dma_mask || !dma_supported(dev, dma_mask))
1010 return -EIO;
1011
022ae537 1012 *dev->dma_mask = dma_mask;
022ae537
RK
1013
1014 return 0;
1015}
022ae537 1016
24056f52
RK
1017#define PREALLOC_DMA_DEBUG_ENTRIES 4096
1018
1019static int __init dma_debug_do_init(void)
1020{
1021 dma_debug_init(PREALLOC_DMA_DEBUG_ENTRIES);
1022 return 0;
1023}
1024fs_initcall(dma_debug_do_init);
4ce63fcd
MS
1025
1026#ifdef CONFIG_ARM_DMA_USE_IOMMU
1027
1028/* IOMMU */
1029
4d852ef8
AH
1030static int extend_iommu_mapping(struct dma_iommu_mapping *mapping);
1031
4ce63fcd
MS
1032static inline dma_addr_t __alloc_iova(struct dma_iommu_mapping *mapping,
1033 size_t size)
1034{
1035 unsigned int order = get_order(size);
1036 unsigned int align = 0;
1037 unsigned int count, start;
006f841d 1038 size_t mapping_size = mapping->bits << PAGE_SHIFT;
4ce63fcd 1039 unsigned long flags;
4d852ef8
AH
1040 dma_addr_t iova;
1041 int i;
4ce63fcd 1042
60460abf
SWK
1043 if (order > CONFIG_ARM_DMA_IOMMU_ALIGNMENT)
1044 order = CONFIG_ARM_DMA_IOMMU_ALIGNMENT;
1045
68efd7d2
MS
1046 count = PAGE_ALIGN(size) >> PAGE_SHIFT;
1047 align = (1 << order) - 1;
4ce63fcd
MS
1048
1049 spin_lock_irqsave(&mapping->lock, flags);
4d852ef8
AH
1050 for (i = 0; i < mapping->nr_bitmaps; i++) {
1051 start = bitmap_find_next_zero_area(mapping->bitmaps[i],
1052 mapping->bits, 0, count, align);
1053
1054 if (start > mapping->bits)
1055 continue;
1056
1057 bitmap_set(mapping->bitmaps[i], start, count);
1058 break;
4ce63fcd
MS
1059 }
1060
4d852ef8
AH
1061 /*
1062 * No unused range found. Try to extend the existing mapping
1063 * and perform a second attempt to reserve an IO virtual
1064 * address range of size bytes.
1065 */
1066 if (i == mapping->nr_bitmaps) {
1067 if (extend_iommu_mapping(mapping)) {
1068 spin_unlock_irqrestore(&mapping->lock, flags);
1069 return DMA_ERROR_CODE;
1070 }
1071
1072 start = bitmap_find_next_zero_area(mapping->bitmaps[i],
1073 mapping->bits, 0, count, align);
1074
1075 if (start > mapping->bits) {
1076 spin_unlock_irqrestore(&mapping->lock, flags);
1077 return DMA_ERROR_CODE;
1078 }
1079
1080 bitmap_set(mapping->bitmaps[i], start, count);
1081 }
4ce63fcd
MS
1082 spin_unlock_irqrestore(&mapping->lock, flags);
1083
006f841d 1084 iova = mapping->base + (mapping_size * i);
68efd7d2 1085 iova += start << PAGE_SHIFT;
4d852ef8
AH
1086
1087 return iova;
4ce63fcd
MS
1088}
1089
1090static inline void __free_iova(struct dma_iommu_mapping *mapping,
1091 dma_addr_t addr, size_t size)
1092{
4d852ef8 1093 unsigned int start, count;
006f841d 1094 size_t mapping_size = mapping->bits << PAGE_SHIFT;
4ce63fcd 1095 unsigned long flags;
4d852ef8
AH
1096 dma_addr_t bitmap_base;
1097 u32 bitmap_index;
1098
1099 if (!size)
1100 return;
1101
006f841d 1102 bitmap_index = (u32) (addr - mapping->base) / (u32) mapping_size;
4d852ef8
AH
1103 BUG_ON(addr < mapping->base || bitmap_index > mapping->extensions);
1104
006f841d 1105 bitmap_base = mapping->base + mapping_size * bitmap_index;
4d852ef8 1106
68efd7d2 1107 start = (addr - bitmap_base) >> PAGE_SHIFT;
4d852ef8 1108
006f841d 1109 if (addr + size > bitmap_base + mapping_size) {
4d852ef8
AH
1110 /*
1111 * The address range to be freed reaches into the iova
1112 * range of the next bitmap. This should not happen as
1113 * we don't allow this in __alloc_iova (at the
1114 * moment).
1115 */
1116 BUG();
1117 } else
68efd7d2 1118 count = size >> PAGE_SHIFT;
4ce63fcd
MS
1119
1120 spin_lock_irqsave(&mapping->lock, flags);
4d852ef8 1121 bitmap_clear(mapping->bitmaps[bitmap_index], start, count);
4ce63fcd
MS
1122 spin_unlock_irqrestore(&mapping->lock, flags);
1123}
1124
33298ef6
DA
1125/* We'll try 2M, 1M, 64K, and finally 4K; array must end with 0! */
1126static const int iommu_order_array[] = { 9, 8, 4, 0 };
1127
549a17e4
MS
1128static struct page **__iommu_alloc_buffer(struct device *dev, size_t size,
1129 gfp_t gfp, struct dma_attrs *attrs)
4ce63fcd
MS
1130{
1131 struct page **pages;
1132 int count = size >> PAGE_SHIFT;
1133 int array_size = count * sizeof(struct page *);
1134 int i = 0;
33298ef6 1135 int order_idx = 0;
4ce63fcd
MS
1136
1137 if (array_size <= PAGE_SIZE)
23be7fda 1138 pages = kzalloc(array_size, GFP_KERNEL);
4ce63fcd
MS
1139 else
1140 pages = vzalloc(array_size);
1141 if (!pages)
1142 return NULL;
1143
549a17e4
MS
1144 if (dma_get_attr(DMA_ATTR_FORCE_CONTIGUOUS, attrs))
1145 {
1146 unsigned long order = get_order(size);
1147 struct page *page;
1148
1149 page = dma_alloc_from_contiguous(dev, count, order);
1150 if (!page)
1151 goto error;
1152
1153 __dma_clear_buffer(page, size);
1154
1155 for (i = 0; i < count; i++)
1156 pages[i] = page + i;
1157
1158 return pages;
1159 }
1160
f8669bef
MS
1161 /*
1162 * IOMMU can map any pages, so himem can also be used here
1163 */
1164 gfp |= __GFP_NOWARN | __GFP_HIGHMEM;
1165
4ce63fcd 1166 while (count) {
49f28aa6
TF
1167 int j, order;
1168
33298ef6
DA
1169 order = iommu_order_array[order_idx];
1170
1171 /* Drop down when we get small */
1172 if (__fls(count) < order) {
1173 order_idx++;
1174 continue;
49f28aa6 1175 }
4ce63fcd 1176
33298ef6
DA
1177 if (order) {
1178 /* See if it's easy to allocate a high-order chunk */
1179 pages[i] = alloc_pages(gfp | __GFP_NORETRY, order);
1180
1181 /* Go down a notch at first sign of pressure */
1182 if (!pages[i]) {
1183 order_idx++;
1184 continue;
1185 }
1186 } else {
49f28aa6
TF
1187 pages[i] = alloc_pages(gfp, 0);
1188 if (!pages[i])
1189 goto error;
1190 }
4ce63fcd 1191
5a796eeb 1192 if (order) {
4ce63fcd 1193 split_page(pages[i], order);
5a796eeb
HD
1194 j = 1 << order;
1195 while (--j)
1196 pages[i + j] = pages[i] + j;
1197 }
4ce63fcd
MS
1198
1199 __dma_clear_buffer(pages[i], PAGE_SIZE << order);
1200 i += 1 << order;
1201 count -= 1 << order;
1202 }
1203
1204 return pages;
1205error:
9fa8af91 1206 while (i--)
4ce63fcd
MS
1207 if (pages[i])
1208 __free_pages(pages[i], 0);
1d5cfdb0 1209 kvfree(pages);
4ce63fcd
MS
1210 return NULL;
1211}
1212
549a17e4
MS
1213static int __iommu_free_buffer(struct device *dev, struct page **pages,
1214 size_t size, struct dma_attrs *attrs)
4ce63fcd
MS
1215{
1216 int count = size >> PAGE_SHIFT;
4ce63fcd 1217 int i;
549a17e4
MS
1218
1219 if (dma_get_attr(DMA_ATTR_FORCE_CONTIGUOUS, attrs)) {
1220 dma_release_from_contiguous(dev, pages[0], count);
1221 } else {
1222 for (i = 0; i < count; i++)
1223 if (pages[i])
1224 __free_pages(pages[i], 0);
1225 }
1226
1d5cfdb0 1227 kvfree(pages);
4ce63fcd
MS
1228 return 0;
1229}
1230
1231/*
1232 * Create a CPU mapping for a specified pages
1233 */
1234static void *
e9da6e99
MS
1235__iommu_alloc_remap(struct page **pages, size_t size, gfp_t gfp, pgprot_t prot,
1236 const void *caller)
4ce63fcd 1237{
513510dd
LA
1238 return dma_common_pages_remap(pages, size,
1239 VM_ARM_DMA_CONSISTENT | VM_USERMAP, prot, caller);
4ce63fcd
MS
1240}
1241
1242/*
1243 * Create a mapping in device IO address space for specified pages
1244 */
1245static dma_addr_t
1246__iommu_create_mapping(struct device *dev, struct page **pages, size_t size)
1247{
89cfdb19 1248 struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev);
4ce63fcd
MS
1249 unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
1250 dma_addr_t dma_addr, iova;
90cde558 1251 int i;
4ce63fcd
MS
1252
1253 dma_addr = __alloc_iova(mapping, size);
1254 if (dma_addr == DMA_ERROR_CODE)
1255 return dma_addr;
1256
1257 iova = dma_addr;
1258 for (i = 0; i < count; ) {
90cde558
AP
1259 int ret;
1260
4ce63fcd
MS
1261 unsigned int next_pfn = page_to_pfn(pages[i]) + 1;
1262 phys_addr_t phys = page_to_phys(pages[i]);
1263 unsigned int len, j;
1264
1265 for (j = i + 1; j < count; j++, next_pfn++)
1266 if (page_to_pfn(pages[j]) != next_pfn)
1267 break;
1268
1269 len = (j - i) << PAGE_SHIFT;
c9b24996
AH
1270 ret = iommu_map(mapping->domain, iova, phys, len,
1271 IOMMU_READ|IOMMU_WRITE);
4ce63fcd
MS
1272 if (ret < 0)
1273 goto fail;
1274 iova += len;
1275 i = j;
1276 }
1277 return dma_addr;
1278fail:
1279 iommu_unmap(mapping->domain, dma_addr, iova-dma_addr);
1280 __free_iova(mapping, dma_addr, size);
1281 return DMA_ERROR_CODE;
1282}
1283
1284static int __iommu_remove_mapping(struct device *dev, dma_addr_t iova, size_t size)
1285{
89cfdb19 1286 struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev);
4ce63fcd
MS
1287
1288 /*
1289 * add optional in-page offset from iova to size and align
1290 * result to page size
1291 */
1292 size = PAGE_ALIGN((iova & ~PAGE_MASK) + size);
1293 iova &= PAGE_MASK;
1294
1295 iommu_unmap(mapping->domain, iova, size);
1296 __free_iova(mapping, iova, size);
1297 return 0;
1298}
1299
665bad7b
HD
1300static struct page **__atomic_get_pages(void *addr)
1301{
36d0fd21
LA
1302 struct page *page;
1303 phys_addr_t phys;
1304
1305 phys = gen_pool_virt_to_phys(atomic_pool, (unsigned long)addr);
1306 page = phys_to_page(phys);
665bad7b 1307
36d0fd21 1308 return (struct page **)page;
665bad7b
HD
1309}
1310
955c757e 1311static struct page **__iommu_get_pages(void *cpu_addr, struct dma_attrs *attrs)
e9da6e99
MS
1312{
1313 struct vm_struct *area;
1314
665bad7b
HD
1315 if (__in_atomic_pool(cpu_addr, PAGE_SIZE))
1316 return __atomic_get_pages(cpu_addr);
1317
955c757e
MS
1318 if (dma_get_attr(DMA_ATTR_NO_KERNEL_MAPPING, attrs))
1319 return cpu_addr;
1320
e9da6e99
MS
1321 area = find_vm_area(cpu_addr);
1322 if (area && (area->flags & VM_ARM_DMA_CONSISTENT))
1323 return area->pages;
1324 return NULL;
1325}
1326
479ed93a
HD
1327static void *__iommu_alloc_atomic(struct device *dev, size_t size,
1328 dma_addr_t *handle)
1329{
1330 struct page *page;
1331 void *addr;
1332
1333 addr = __alloc_from_pool(size, &page);
1334 if (!addr)
1335 return NULL;
1336
1337 *handle = __iommu_create_mapping(dev, &page, size);
1338 if (*handle == DMA_ERROR_CODE)
1339 goto err_mapping;
1340
1341 return addr;
1342
1343err_mapping:
1344 __free_from_pool(addr, size);
1345 return NULL;
1346}
1347
d5898291 1348static void __iommu_free_atomic(struct device *dev, void *cpu_addr,
479ed93a
HD
1349 dma_addr_t handle, size_t size)
1350{
1351 __iommu_remove_mapping(dev, handle, size);
d5898291 1352 __free_from_pool(cpu_addr, size);
479ed93a
HD
1353}
1354
4ce63fcd
MS
1355static void *arm_iommu_alloc_attrs(struct device *dev, size_t size,
1356 dma_addr_t *handle, gfp_t gfp, struct dma_attrs *attrs)
1357{
71b55663 1358 pgprot_t prot = __get_dma_pgprot(attrs, PAGE_KERNEL);
4ce63fcd
MS
1359 struct page **pages;
1360 void *addr = NULL;
1361
1362 *handle = DMA_ERROR_CODE;
1363 size = PAGE_ALIGN(size);
1364
d0164adc 1365 if (!gfpflags_allow_blocking(gfp))
479ed93a
HD
1366 return __iommu_alloc_atomic(dev, size, handle);
1367
5b91a98c
RZ
1368 /*
1369 * Following is a work-around (a.k.a. hack) to prevent pages
1370 * with __GFP_COMP being passed to split_page() which cannot
1371 * handle them. The real problem is that this flag probably
1372 * should be 0 on ARM as it is not supported on this
1373 * platform; see CONFIG_HUGETLBFS.
1374 */
1375 gfp &= ~(__GFP_COMP);
1376
549a17e4 1377 pages = __iommu_alloc_buffer(dev, size, gfp, attrs);
4ce63fcd
MS
1378 if (!pages)
1379 return NULL;
1380
1381 *handle = __iommu_create_mapping(dev, pages, size);
1382 if (*handle == DMA_ERROR_CODE)
1383 goto err_buffer;
1384
955c757e
MS
1385 if (dma_get_attr(DMA_ATTR_NO_KERNEL_MAPPING, attrs))
1386 return pages;
1387
e9da6e99
MS
1388 addr = __iommu_alloc_remap(pages, size, gfp, prot,
1389 __builtin_return_address(0));
4ce63fcd
MS
1390 if (!addr)
1391 goto err_mapping;
1392
1393 return addr;
1394
1395err_mapping:
1396 __iommu_remove_mapping(dev, *handle, size);
1397err_buffer:
549a17e4 1398 __iommu_free_buffer(dev, pages, size, attrs);
4ce63fcd
MS
1399 return NULL;
1400}
1401
1402static int arm_iommu_mmap_attrs(struct device *dev, struct vm_area_struct *vma,
1403 void *cpu_addr, dma_addr_t dma_addr, size_t size,
1404 struct dma_attrs *attrs)
1405{
e9da6e99
MS
1406 unsigned long uaddr = vma->vm_start;
1407 unsigned long usize = vma->vm_end - vma->vm_start;
955c757e 1408 struct page **pages = __iommu_get_pages(cpu_addr, attrs);
371f0f08
MS
1409 unsigned long nr_pages = PAGE_ALIGN(size) >> PAGE_SHIFT;
1410 unsigned long off = vma->vm_pgoff;
4ce63fcd
MS
1411
1412 vma->vm_page_prot = __get_dma_pgprot(attrs, vma->vm_page_prot);
4ce63fcd 1413
e9da6e99
MS
1414 if (!pages)
1415 return -ENXIO;
4ce63fcd 1416
371f0f08
MS
1417 if (off >= nr_pages || (usize >> PAGE_SHIFT) > nr_pages - off)
1418 return -ENXIO;
1419
7e312103
MS
1420 pages += off;
1421
e9da6e99
MS
1422 do {
1423 int ret = vm_insert_page(vma, uaddr, *pages++);
1424 if (ret) {
1425 pr_err("Remapping memory failed: %d\n", ret);
1426 return ret;
1427 }
1428 uaddr += PAGE_SIZE;
1429 usize -= PAGE_SIZE;
1430 } while (usize > 0);
4ce63fcd 1431
4ce63fcd
MS
1432 return 0;
1433}
1434
1435/*
1436 * free a page as defined by the above mapping.
1437 * Must not be called with IRQs disabled.
1438 */
1439void arm_iommu_free_attrs(struct device *dev, size_t size, void *cpu_addr,
1440 dma_addr_t handle, struct dma_attrs *attrs)
1441{
836bfa0d 1442 struct page **pages;
4ce63fcd
MS
1443 size = PAGE_ALIGN(size);
1444
836bfa0d
YC
1445 if (__in_atomic_pool(cpu_addr, size)) {
1446 __iommu_free_atomic(dev, cpu_addr, handle, size);
e9da6e99 1447 return;
4ce63fcd 1448 }
e9da6e99 1449
836bfa0d
YC
1450 pages = __iommu_get_pages(cpu_addr, attrs);
1451 if (!pages) {
1452 WARN(1, "trying to free invalid coherent area: %p\n", cpu_addr);
479ed93a
HD
1453 return;
1454 }
1455
955c757e 1456 if (!dma_get_attr(DMA_ATTR_NO_KERNEL_MAPPING, attrs)) {
513510dd
LA
1457 dma_common_free_remap(cpu_addr, size,
1458 VM_ARM_DMA_CONSISTENT | VM_USERMAP);
955c757e 1459 }
e9da6e99
MS
1460
1461 __iommu_remove_mapping(dev, handle, size);
549a17e4 1462 __iommu_free_buffer(dev, pages, size, attrs);
4ce63fcd
MS
1463}
1464
dc2832e1
MS
1465static int arm_iommu_get_sgtable(struct device *dev, struct sg_table *sgt,
1466 void *cpu_addr, dma_addr_t dma_addr,
1467 size_t size, struct dma_attrs *attrs)
1468{
1469 unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
1470 struct page **pages = __iommu_get_pages(cpu_addr, attrs);
1471
1472 if (!pages)
1473 return -ENXIO;
1474
1475 return sg_alloc_table_from_pages(sgt, pages, count, 0, size,
1476 GFP_KERNEL);
4ce63fcd
MS
1477}
1478
c9b24996
AH
1479static int __dma_direction_to_prot(enum dma_data_direction dir)
1480{
1481 int prot;
1482
1483 switch (dir) {
1484 case DMA_BIDIRECTIONAL:
1485 prot = IOMMU_READ | IOMMU_WRITE;
1486 break;
1487 case DMA_TO_DEVICE:
1488 prot = IOMMU_READ;
1489 break;
1490 case DMA_FROM_DEVICE:
1491 prot = IOMMU_WRITE;
1492 break;
1493 default:
1494 prot = 0;
1495 }
1496
1497 return prot;
1498}
1499
4ce63fcd
MS
1500/*
1501 * Map a part of the scatter-gather list into contiguous io address space
1502 */
1503static int __map_sg_chunk(struct device *dev, struct scatterlist *sg,
1504 size_t size, dma_addr_t *handle,
0fa478df
RH
1505 enum dma_data_direction dir, struct dma_attrs *attrs,
1506 bool is_coherent)
4ce63fcd 1507{
89cfdb19 1508 struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev);
4ce63fcd
MS
1509 dma_addr_t iova, iova_base;
1510 int ret = 0;
1511 unsigned int count;
1512 struct scatterlist *s;
c9b24996 1513 int prot;
4ce63fcd
MS
1514
1515 size = PAGE_ALIGN(size);
1516 *handle = DMA_ERROR_CODE;
1517
1518 iova_base = iova = __alloc_iova(mapping, size);
1519 if (iova == DMA_ERROR_CODE)
1520 return -ENOMEM;
1521
1522 for (count = 0, s = sg; count < (size >> PAGE_SHIFT); s = sg_next(s)) {
3e6110fd 1523 phys_addr_t phys = page_to_phys(sg_page(s));
4ce63fcd
MS
1524 unsigned int len = PAGE_ALIGN(s->offset + s->length);
1525
0fa478df
RH
1526 if (!is_coherent &&
1527 !dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs))
4ce63fcd
MS
1528 __dma_page_cpu_to_dev(sg_page(s), s->offset, s->length, dir);
1529
c9b24996
AH
1530 prot = __dma_direction_to_prot(dir);
1531
1532 ret = iommu_map(mapping->domain, iova, phys, len, prot);
4ce63fcd
MS
1533 if (ret < 0)
1534 goto fail;
1535 count += len >> PAGE_SHIFT;
1536 iova += len;
1537 }
1538 *handle = iova_base;
1539
1540 return 0;
1541fail:
1542 iommu_unmap(mapping->domain, iova_base, count * PAGE_SIZE);
1543 __free_iova(mapping, iova_base, size);
1544 return ret;
1545}
1546
0fa478df
RH
1547static int __iommu_map_sg(struct device *dev, struct scatterlist *sg, int nents,
1548 enum dma_data_direction dir, struct dma_attrs *attrs,
1549 bool is_coherent)
4ce63fcd
MS
1550{
1551 struct scatterlist *s = sg, *dma = sg, *start = sg;
1552 int i, count = 0;
1553 unsigned int offset = s->offset;
1554 unsigned int size = s->offset + s->length;
1555 unsigned int max = dma_get_max_seg_size(dev);
1556
1557 for (i = 1; i < nents; i++) {
1558 s = sg_next(s);
1559
1560 s->dma_address = DMA_ERROR_CODE;
1561 s->dma_length = 0;
1562
1563 if (s->offset || (size & ~PAGE_MASK) || size + s->length > max) {
1564 if (__map_sg_chunk(dev, start, size, &dma->dma_address,
0fa478df 1565 dir, attrs, is_coherent) < 0)
4ce63fcd
MS
1566 goto bad_mapping;
1567
1568 dma->dma_address += offset;
1569 dma->dma_length = size - offset;
1570
1571 size = offset = s->offset;
1572 start = s;
1573 dma = sg_next(dma);
1574 count += 1;
1575 }
1576 size += s->length;
1577 }
0fa478df
RH
1578 if (__map_sg_chunk(dev, start, size, &dma->dma_address, dir, attrs,
1579 is_coherent) < 0)
4ce63fcd
MS
1580 goto bad_mapping;
1581
1582 dma->dma_address += offset;
1583 dma->dma_length = size - offset;
1584
1585 return count+1;
1586
1587bad_mapping:
1588 for_each_sg(sg, s, count, i)
1589 __iommu_remove_mapping(dev, sg_dma_address(s), sg_dma_len(s));
1590 return 0;
1591}
1592
1593/**
0fa478df 1594 * arm_coherent_iommu_map_sg - map a set of SG buffers for streaming mode DMA
4ce63fcd
MS
1595 * @dev: valid struct device pointer
1596 * @sg: list of buffers
0fa478df
RH
1597 * @nents: number of buffers to map
1598 * @dir: DMA transfer direction
4ce63fcd 1599 *
0fa478df
RH
1600 * Map a set of i/o coherent buffers described by scatterlist in streaming
1601 * mode for DMA. The scatter gather list elements are merged together (if
1602 * possible) and tagged with the appropriate dma address and length. They are
1603 * obtained via sg_dma_{address,length}.
4ce63fcd 1604 */
0fa478df
RH
1605int arm_coherent_iommu_map_sg(struct device *dev, struct scatterlist *sg,
1606 int nents, enum dma_data_direction dir, struct dma_attrs *attrs)
1607{
1608 return __iommu_map_sg(dev, sg, nents, dir, attrs, true);
1609}
1610
1611/**
1612 * arm_iommu_map_sg - map a set of SG buffers for streaming mode DMA
1613 * @dev: valid struct device pointer
1614 * @sg: list of buffers
1615 * @nents: number of buffers to map
1616 * @dir: DMA transfer direction
1617 *
1618 * Map a set of buffers described by scatterlist in streaming mode for DMA.
1619 * The scatter gather list elements are merged together (if possible) and
1620 * tagged with the appropriate dma address and length. They are obtained via
1621 * sg_dma_{address,length}.
1622 */
1623int arm_iommu_map_sg(struct device *dev, struct scatterlist *sg,
1624 int nents, enum dma_data_direction dir, struct dma_attrs *attrs)
1625{
1626 return __iommu_map_sg(dev, sg, nents, dir, attrs, false);
1627}
1628
1629static void __iommu_unmap_sg(struct device *dev, struct scatterlist *sg,
1630 int nents, enum dma_data_direction dir, struct dma_attrs *attrs,
1631 bool is_coherent)
4ce63fcd
MS
1632{
1633 struct scatterlist *s;
1634 int i;
1635
1636 for_each_sg(sg, s, nents, i) {
1637 if (sg_dma_len(s))
1638 __iommu_remove_mapping(dev, sg_dma_address(s),
1639 sg_dma_len(s));
0fa478df 1640 if (!is_coherent &&
97ef952a 1641 !dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs))
4ce63fcd
MS
1642 __dma_page_dev_to_cpu(sg_page(s), s->offset,
1643 s->length, dir);
1644 }
1645}
1646
0fa478df
RH
1647/**
1648 * arm_coherent_iommu_unmap_sg - unmap a set of SG buffers mapped by dma_map_sg
1649 * @dev: valid struct device pointer
1650 * @sg: list of buffers
1651 * @nents: number of buffers to unmap (same as was passed to dma_map_sg)
1652 * @dir: DMA transfer direction (same as was passed to dma_map_sg)
1653 *
1654 * Unmap a set of streaming mode DMA translations. Again, CPU access
1655 * rules concerning calls here are the same as for dma_unmap_single().
1656 */
1657void arm_coherent_iommu_unmap_sg(struct device *dev, struct scatterlist *sg,
1658 int nents, enum dma_data_direction dir, struct dma_attrs *attrs)
1659{
1660 __iommu_unmap_sg(dev, sg, nents, dir, attrs, true);
1661}
1662
1663/**
1664 * arm_iommu_unmap_sg - unmap a set of SG buffers mapped by dma_map_sg
1665 * @dev: valid struct device pointer
1666 * @sg: list of buffers
1667 * @nents: number of buffers to unmap (same as was passed to dma_map_sg)
1668 * @dir: DMA transfer direction (same as was passed to dma_map_sg)
1669 *
1670 * Unmap a set of streaming mode DMA translations. Again, CPU access
1671 * rules concerning calls here are the same as for dma_unmap_single().
1672 */
1673void arm_iommu_unmap_sg(struct device *dev, struct scatterlist *sg, int nents,
1674 enum dma_data_direction dir, struct dma_attrs *attrs)
1675{
1676 __iommu_unmap_sg(dev, sg, nents, dir, attrs, false);
1677}
1678
4ce63fcd
MS
1679/**
1680 * arm_iommu_sync_sg_for_cpu
1681 * @dev: valid struct device pointer
1682 * @sg: list of buffers
1683 * @nents: number of buffers to map (returned from dma_map_sg)
1684 * @dir: DMA transfer direction (same as was passed to dma_map_sg)
1685 */
1686void arm_iommu_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
1687 int nents, enum dma_data_direction dir)
1688{
1689 struct scatterlist *s;
1690 int i;
1691
1692 for_each_sg(sg, s, nents, i)
0fa478df 1693 __dma_page_dev_to_cpu(sg_page(s), s->offset, s->length, dir);
4ce63fcd
MS
1694
1695}
1696
1697/**
1698 * arm_iommu_sync_sg_for_device
1699 * @dev: valid struct device pointer
1700 * @sg: list of buffers
1701 * @nents: number of buffers to map (returned from dma_map_sg)
1702 * @dir: DMA transfer direction (same as was passed to dma_map_sg)
1703 */
1704void arm_iommu_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
1705 int nents, enum dma_data_direction dir)
1706{
1707 struct scatterlist *s;
1708 int i;
1709
1710 for_each_sg(sg, s, nents, i)
0fa478df 1711 __dma_page_cpu_to_dev(sg_page(s), s->offset, s->length, dir);
4ce63fcd
MS
1712}
1713
1714
1715/**
0fa478df 1716 * arm_coherent_iommu_map_page
4ce63fcd
MS
1717 * @dev: valid struct device pointer
1718 * @page: page that buffer resides in
1719 * @offset: offset into page for start of buffer
1720 * @size: size of buffer to map
1721 * @dir: DMA transfer direction
1722 *
0fa478df 1723 * Coherent IOMMU aware version of arm_dma_map_page()
4ce63fcd 1724 */
0fa478df 1725static dma_addr_t arm_coherent_iommu_map_page(struct device *dev, struct page *page,
4ce63fcd
MS
1726 unsigned long offset, size_t size, enum dma_data_direction dir,
1727 struct dma_attrs *attrs)
1728{
89cfdb19 1729 struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev);
4ce63fcd 1730 dma_addr_t dma_addr;
13987d68 1731 int ret, prot, len = PAGE_ALIGN(size + offset);
4ce63fcd 1732
4ce63fcd
MS
1733 dma_addr = __alloc_iova(mapping, len);
1734 if (dma_addr == DMA_ERROR_CODE)
1735 return dma_addr;
1736
c9b24996 1737 prot = __dma_direction_to_prot(dir);
13987d68
WD
1738
1739 ret = iommu_map(mapping->domain, dma_addr, page_to_phys(page), len, prot);
4ce63fcd
MS
1740 if (ret < 0)
1741 goto fail;
1742
1743 return dma_addr + offset;
1744fail:
1745 __free_iova(mapping, dma_addr, len);
1746 return DMA_ERROR_CODE;
1747}
1748
0fa478df
RH
1749/**
1750 * arm_iommu_map_page
1751 * @dev: valid struct device pointer
1752 * @page: page that buffer resides in
1753 * @offset: offset into page for start of buffer
1754 * @size: size of buffer to map
1755 * @dir: DMA transfer direction
1756 *
1757 * IOMMU aware version of arm_dma_map_page()
1758 */
1759static dma_addr_t arm_iommu_map_page(struct device *dev, struct page *page,
1760 unsigned long offset, size_t size, enum dma_data_direction dir,
1761 struct dma_attrs *attrs)
1762{
1763 if (!dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs))
1764 __dma_page_cpu_to_dev(page, offset, size, dir);
1765
1766 return arm_coherent_iommu_map_page(dev, page, offset, size, dir, attrs);
1767}
1768
1769/**
1770 * arm_coherent_iommu_unmap_page
1771 * @dev: valid struct device pointer
1772 * @handle: DMA address of buffer
1773 * @size: size of buffer (same as passed to dma_map_page)
1774 * @dir: DMA transfer direction (same as passed to dma_map_page)
1775 *
1776 * Coherent IOMMU aware version of arm_dma_unmap_page()
1777 */
1778static void arm_coherent_iommu_unmap_page(struct device *dev, dma_addr_t handle,
1779 size_t size, enum dma_data_direction dir,
1780 struct dma_attrs *attrs)
1781{
89cfdb19 1782 struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev);
0fa478df 1783 dma_addr_t iova = handle & PAGE_MASK;
0fa478df
RH
1784 int offset = handle & ~PAGE_MASK;
1785 int len = PAGE_ALIGN(size + offset);
1786
1787 if (!iova)
1788 return;
1789
1790 iommu_unmap(mapping->domain, iova, len);
1791 __free_iova(mapping, iova, len);
1792}
1793
4ce63fcd
MS
1794/**
1795 * arm_iommu_unmap_page
1796 * @dev: valid struct device pointer
1797 * @handle: DMA address of buffer
1798 * @size: size of buffer (same as passed to dma_map_page)
1799 * @dir: DMA transfer direction (same as passed to dma_map_page)
1800 *
1801 * IOMMU aware version of arm_dma_unmap_page()
1802 */
1803static void arm_iommu_unmap_page(struct device *dev, dma_addr_t handle,
1804 size_t size, enum dma_data_direction dir,
1805 struct dma_attrs *attrs)
1806{
89cfdb19 1807 struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev);
4ce63fcd
MS
1808 dma_addr_t iova = handle & PAGE_MASK;
1809 struct page *page = phys_to_page(iommu_iova_to_phys(mapping->domain, iova));
1810 int offset = handle & ~PAGE_MASK;
1811 int len = PAGE_ALIGN(size + offset);
1812
1813 if (!iova)
1814 return;
1815
0fa478df 1816 if (!dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs))
4ce63fcd
MS
1817 __dma_page_dev_to_cpu(page, offset, size, dir);
1818
1819 iommu_unmap(mapping->domain, iova, len);
1820 __free_iova(mapping, iova, len);
1821}
1822
1823static void arm_iommu_sync_single_for_cpu(struct device *dev,
1824 dma_addr_t handle, size_t size, enum dma_data_direction dir)
1825{
89cfdb19 1826 struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev);
4ce63fcd
MS
1827 dma_addr_t iova = handle & PAGE_MASK;
1828 struct page *page = phys_to_page(iommu_iova_to_phys(mapping->domain, iova));
1829 unsigned int offset = handle & ~PAGE_MASK;
1830
1831 if (!iova)
1832 return;
1833
0fa478df 1834 __dma_page_dev_to_cpu(page, offset, size, dir);
4ce63fcd
MS
1835}
1836
1837static void arm_iommu_sync_single_for_device(struct device *dev,
1838 dma_addr_t handle, size_t size, enum dma_data_direction dir)
1839{
89cfdb19 1840 struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev);
4ce63fcd
MS
1841 dma_addr_t iova = handle & PAGE_MASK;
1842 struct page *page = phys_to_page(iommu_iova_to_phys(mapping->domain, iova));
1843 unsigned int offset = handle & ~PAGE_MASK;
1844
1845 if (!iova)
1846 return;
1847
1848 __dma_page_cpu_to_dev(page, offset, size, dir);
1849}
1850
1851struct dma_map_ops iommu_ops = {
1852 .alloc = arm_iommu_alloc_attrs,
1853 .free = arm_iommu_free_attrs,
1854 .mmap = arm_iommu_mmap_attrs,
dc2832e1 1855 .get_sgtable = arm_iommu_get_sgtable,
4ce63fcd
MS
1856
1857 .map_page = arm_iommu_map_page,
1858 .unmap_page = arm_iommu_unmap_page,
1859 .sync_single_for_cpu = arm_iommu_sync_single_for_cpu,
1860 .sync_single_for_device = arm_iommu_sync_single_for_device,
1861
1862 .map_sg = arm_iommu_map_sg,
1863 .unmap_sg = arm_iommu_unmap_sg,
1864 .sync_sg_for_cpu = arm_iommu_sync_sg_for_cpu,
1865 .sync_sg_for_device = arm_iommu_sync_sg_for_device,
d09e1333
HD
1866
1867 .set_dma_mask = arm_dma_set_mask,
4ce63fcd
MS
1868};
1869
0fa478df
RH
1870struct dma_map_ops iommu_coherent_ops = {
1871 .alloc = arm_iommu_alloc_attrs,
1872 .free = arm_iommu_free_attrs,
1873 .mmap = arm_iommu_mmap_attrs,
1874 .get_sgtable = arm_iommu_get_sgtable,
1875
1876 .map_page = arm_coherent_iommu_map_page,
1877 .unmap_page = arm_coherent_iommu_unmap_page,
1878
1879 .map_sg = arm_coherent_iommu_map_sg,
1880 .unmap_sg = arm_coherent_iommu_unmap_sg,
d09e1333
HD
1881
1882 .set_dma_mask = arm_dma_set_mask,
0fa478df
RH
1883};
1884
4ce63fcd
MS
1885/**
1886 * arm_iommu_create_mapping
1887 * @bus: pointer to the bus holding the client device (for IOMMU calls)
1888 * @base: start address of the valid IO address space
68efd7d2 1889 * @size: maximum size of the valid IO address space
4ce63fcd
MS
1890 *
1891 * Creates a mapping structure which holds information about used/unused
1892 * IO address ranges, which is required to perform memory allocation and
1893 * mapping with IOMMU aware functions.
1894 *
1895 * The client device need to be attached to the mapping with
1896 * arm_iommu_attach_device function.
1897 */
1898struct dma_iommu_mapping *
1424532b 1899arm_iommu_create_mapping(struct bus_type *bus, dma_addr_t base, u64 size)
4ce63fcd 1900{
68efd7d2
MS
1901 unsigned int bits = size >> PAGE_SHIFT;
1902 unsigned int bitmap_size = BITS_TO_LONGS(bits) * sizeof(long);
4ce63fcd 1903 struct dma_iommu_mapping *mapping;
68efd7d2 1904 int extensions = 1;
4ce63fcd
MS
1905 int err = -ENOMEM;
1906
1424532b
MS
1907 /* currently only 32-bit DMA address space is supported */
1908 if (size > DMA_BIT_MASK(32) + 1)
1909 return ERR_PTR(-ERANGE);
1910
68efd7d2 1911 if (!bitmap_size)
4ce63fcd
MS
1912 return ERR_PTR(-EINVAL);
1913
68efd7d2
MS
1914 if (bitmap_size > PAGE_SIZE) {
1915 extensions = bitmap_size / PAGE_SIZE;
1916 bitmap_size = PAGE_SIZE;
1917 }
1918
4ce63fcd
MS
1919 mapping = kzalloc(sizeof(struct dma_iommu_mapping), GFP_KERNEL);
1920 if (!mapping)
1921 goto err;
1922
68efd7d2
MS
1923 mapping->bitmap_size = bitmap_size;
1924 mapping->bitmaps = kzalloc(extensions * sizeof(unsigned long *),
4d852ef8
AH
1925 GFP_KERNEL);
1926 if (!mapping->bitmaps)
4ce63fcd
MS
1927 goto err2;
1928
68efd7d2 1929 mapping->bitmaps[0] = kzalloc(bitmap_size, GFP_KERNEL);
4d852ef8
AH
1930 if (!mapping->bitmaps[0])
1931 goto err3;
1932
1933 mapping->nr_bitmaps = 1;
1934 mapping->extensions = extensions;
4ce63fcd 1935 mapping->base = base;
68efd7d2 1936 mapping->bits = BITS_PER_BYTE * bitmap_size;
4d852ef8 1937
4ce63fcd
MS
1938 spin_lock_init(&mapping->lock);
1939
1940 mapping->domain = iommu_domain_alloc(bus);
1941 if (!mapping->domain)
4d852ef8 1942 goto err4;
4ce63fcd
MS
1943
1944 kref_init(&mapping->kref);
1945 return mapping;
4d852ef8
AH
1946err4:
1947 kfree(mapping->bitmaps[0]);
4ce63fcd 1948err3:
4d852ef8 1949 kfree(mapping->bitmaps);
4ce63fcd
MS
1950err2:
1951 kfree(mapping);
1952err:
1953 return ERR_PTR(err);
1954}
18177d12 1955EXPORT_SYMBOL_GPL(arm_iommu_create_mapping);
4ce63fcd
MS
1956
1957static void release_iommu_mapping(struct kref *kref)
1958{
4d852ef8 1959 int i;
4ce63fcd
MS
1960 struct dma_iommu_mapping *mapping =
1961 container_of(kref, struct dma_iommu_mapping, kref);
1962
1963 iommu_domain_free(mapping->domain);
4d852ef8
AH
1964 for (i = 0; i < mapping->nr_bitmaps; i++)
1965 kfree(mapping->bitmaps[i]);
1966 kfree(mapping->bitmaps);
4ce63fcd
MS
1967 kfree(mapping);
1968}
1969
4d852ef8
AH
1970static int extend_iommu_mapping(struct dma_iommu_mapping *mapping)
1971{
1972 int next_bitmap;
1973
462859aa 1974 if (mapping->nr_bitmaps >= mapping->extensions)
4d852ef8
AH
1975 return -EINVAL;
1976
1977 next_bitmap = mapping->nr_bitmaps;
1978 mapping->bitmaps[next_bitmap] = kzalloc(mapping->bitmap_size,
1979 GFP_ATOMIC);
1980 if (!mapping->bitmaps[next_bitmap])
1981 return -ENOMEM;
1982
1983 mapping->nr_bitmaps++;
1984
1985 return 0;
1986}
1987
4ce63fcd
MS
1988void arm_iommu_release_mapping(struct dma_iommu_mapping *mapping)
1989{
1990 if (mapping)
1991 kref_put(&mapping->kref, release_iommu_mapping);
1992}
18177d12 1993EXPORT_SYMBOL_GPL(arm_iommu_release_mapping);
4ce63fcd 1994
eab8d653
LP
1995static int __arm_iommu_attach_device(struct device *dev,
1996 struct dma_iommu_mapping *mapping)
1997{
1998 int err;
1999
2000 err = iommu_attach_device(mapping->domain, dev);
2001 if (err)
2002 return err;
2003
2004 kref_get(&mapping->kref);
89cfdb19 2005 to_dma_iommu_mapping(dev) = mapping;
eab8d653
LP
2006
2007 pr_debug("Attached IOMMU controller to %s device.\n", dev_name(dev));
2008 return 0;
2009}
2010
4ce63fcd
MS
2011/**
2012 * arm_iommu_attach_device
2013 * @dev: valid struct device pointer
2014 * @mapping: io address space mapping structure (returned from
2015 * arm_iommu_create_mapping)
2016 *
eab8d653
LP
2017 * Attaches specified io address space mapping to the provided device.
2018 * This replaces the dma operations (dma_map_ops pointer) with the
2019 * IOMMU aware version.
2020 *
4bb25789
WD
2021 * More than one client might be attached to the same io address space
2022 * mapping.
4ce63fcd
MS
2023 */
2024int arm_iommu_attach_device(struct device *dev,
2025 struct dma_iommu_mapping *mapping)
2026{
2027 int err;
2028
eab8d653 2029 err = __arm_iommu_attach_device(dev, mapping);
4ce63fcd
MS
2030 if (err)
2031 return err;
2032
eab8d653 2033 set_dma_ops(dev, &iommu_ops);
4ce63fcd
MS
2034 return 0;
2035}
18177d12 2036EXPORT_SYMBOL_GPL(arm_iommu_attach_device);
4ce63fcd 2037
eab8d653 2038static void __arm_iommu_detach_device(struct device *dev)
6fe36758
HD
2039{
2040 struct dma_iommu_mapping *mapping;
2041
2042 mapping = to_dma_iommu_mapping(dev);
2043 if (!mapping) {
2044 dev_warn(dev, "Not attached\n");
2045 return;
2046 }
2047
2048 iommu_detach_device(mapping->domain, dev);
2049 kref_put(&mapping->kref, release_iommu_mapping);
89cfdb19 2050 to_dma_iommu_mapping(dev) = NULL;
6fe36758
HD
2051
2052 pr_debug("Detached IOMMU controller from %s device.\n", dev_name(dev));
2053}
eab8d653
LP
2054
2055/**
2056 * arm_iommu_detach_device
2057 * @dev: valid struct device pointer
2058 *
2059 * Detaches the provided device from a previously attached map.
2060 * This voids the dma operations (dma_map_ops pointer)
2061 */
2062void arm_iommu_detach_device(struct device *dev)
2063{
2064 __arm_iommu_detach_device(dev);
2065 set_dma_ops(dev, NULL);
2066}
18177d12 2067EXPORT_SYMBOL_GPL(arm_iommu_detach_device);
6fe36758 2068
4bb25789
WD
2069static struct dma_map_ops *arm_get_iommu_dma_map_ops(bool coherent)
2070{
2071 return coherent ? &iommu_coherent_ops : &iommu_ops;
2072}
2073
2074static bool arm_setup_iommu_dma_ops(struct device *dev, u64 dma_base, u64 size,
2075 struct iommu_ops *iommu)
2076{
2077 struct dma_iommu_mapping *mapping;
2078
2079 if (!iommu)
2080 return false;
2081
2082 mapping = arm_iommu_create_mapping(dev->bus, dma_base, size);
2083 if (IS_ERR(mapping)) {
2084 pr_warn("Failed to create %llu-byte IOMMU mapping for device %s\n",
2085 size, dev_name(dev));
2086 return false;
2087 }
2088
eab8d653 2089 if (__arm_iommu_attach_device(dev, mapping)) {
4bb25789
WD
2090 pr_warn("Failed to attached device %s to IOMMU_mapping\n",
2091 dev_name(dev));
2092 arm_iommu_release_mapping(mapping);
2093 return false;
2094 }
2095
2096 return true;
2097}
2098
2099static void arm_teardown_iommu_dma_ops(struct device *dev)
2100{
89cfdb19 2101 struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev);
4bb25789 2102
c2273a18
WD
2103 if (!mapping)
2104 return;
2105
eab8d653 2106 __arm_iommu_detach_device(dev);
4bb25789
WD
2107 arm_iommu_release_mapping(mapping);
2108}
2109
2110#else
2111
2112static bool arm_setup_iommu_dma_ops(struct device *dev, u64 dma_base, u64 size,
2113 struct iommu_ops *iommu)
2114{
2115 return false;
2116}
2117
2118static void arm_teardown_iommu_dma_ops(struct device *dev) { }
2119
2120#define arm_get_iommu_dma_map_ops arm_get_dma_map_ops
2121
2122#endif /* CONFIG_ARM_DMA_USE_IOMMU */
2123
2124static struct dma_map_ops *arm_get_dma_map_ops(bool coherent)
2125{
2126 return coherent ? &arm_coherent_dma_ops : &arm_dma_ops;
2127}
2128
2129void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
2130 struct iommu_ops *iommu, bool coherent)
2131{
2132 struct dma_map_ops *dma_ops;
2133
6f51ee70 2134 dev->archdata.dma_coherent = coherent;
4bb25789
WD
2135 if (arm_setup_iommu_dma_ops(dev, dma_base, size, iommu))
2136 dma_ops = arm_get_iommu_dma_map_ops(coherent);
2137 else
2138 dma_ops = arm_get_dma_map_ops(coherent);
2139
2140 set_dma_ops(dev, dma_ops);
2141}
2142
2143void arch_teardown_dma_ops(struct device *dev)
2144{
2145 arm_teardown_iommu_dma_ops(dev);
2146}