arm64: Implement custom mmap functions for dma mapping
[linux-2.6-block.git] / arch / arm64 / mm / dma-mapping.c
CommitLineData
09b55412
CM
1/*
2 * SWIOTLB-based DMA API implementation
3 *
4 * Copyright (C) 2012 ARM Ltd.
5 * Author: Catalin Marinas <catalin.marinas@arm.com>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program. If not, see <http://www.gnu.org/licenses/>.
18 */
19
20#include <linux/gfp.h>
21#include <linux/export.h>
22#include <linux/slab.h>
23#include <linux/dma-mapping.h>
6ac2104d 24#include <linux/dma-contiguous.h>
09b55412
CM
25#include <linux/vmalloc.h>
26#include <linux/swiotlb.h>
27
28#include <asm/cacheflush.h>
29
30struct dma_map_ops *dma_ops;
31EXPORT_SYMBOL(dma_ops);
32
bb10eb7b
RH
33static void *__dma_alloc_coherent(struct device *dev, size_t size,
34 dma_addr_t *dma_handle, gfp_t flags,
35 struct dma_attrs *attrs)
09b55412 36{
c666e8d5
LA
37 if (dev == NULL) {
38 WARN_ONCE(1, "Use an actual device structure for DMA allocation\n");
39 return NULL;
40 }
41
19e7640d 42 if (IS_ENABLED(CONFIG_ZONE_DMA) &&
09b55412 43 dev->coherent_dma_mask <= DMA_BIT_MASK(32))
19e7640d 44 flags |= GFP_DMA;
6ac2104d
LA
45 if (IS_ENABLED(CONFIG_DMA_CMA)) {
46 struct page *page;
47
ccc9e244 48 size = PAGE_ALIGN(size);
6ac2104d
LA
49 page = dma_alloc_from_contiguous(dev, size >> PAGE_SHIFT,
50 get_order(size));
51 if (!page)
52 return NULL;
53
54 *dma_handle = phys_to_dma(dev, page_to_phys(page));
55 return page_address(page);
56 } else {
57 return swiotlb_alloc_coherent(dev, size, dma_handle, flags);
58 }
09b55412
CM
59}
60
bb10eb7b
RH
61static void __dma_free_coherent(struct device *dev, size_t size,
62 void *vaddr, dma_addr_t dma_handle,
63 struct dma_attrs *attrs)
09b55412 64{
c666e8d5
LA
65 if (dev == NULL) {
66 WARN_ONCE(1, "Use an actual device structure for DMA allocation\n");
67 return;
68 }
69
6ac2104d
LA
70 if (IS_ENABLED(CONFIG_DMA_CMA)) {
71 phys_addr_t paddr = dma_to_phys(dev, dma_handle);
72
73 dma_release_from_contiguous(dev,
74 phys_to_page(paddr),
75 size >> PAGE_SHIFT);
76 } else {
77 swiotlb_free_coherent(dev, size, vaddr, dma_handle);
78 }
09b55412
CM
79}
80
7363590d
CM
81static void *__dma_alloc_noncoherent(struct device *dev, size_t size,
82 dma_addr_t *dma_handle, gfp_t flags,
83 struct dma_attrs *attrs)
84{
85 struct page *page, **map;
86 void *ptr, *coherent_ptr;
87 int order, i;
88
89 size = PAGE_ALIGN(size);
90 order = get_order(size);
91
92 ptr = __dma_alloc_coherent(dev, size, dma_handle, flags, attrs);
93 if (!ptr)
94 goto no_mem;
95 map = kmalloc(sizeof(struct page *) << order, flags & ~GFP_DMA);
96 if (!map)
97 goto no_map;
98
99 /* remove any dirty cache lines on the kernel alias */
100 __dma_flush_range(ptr, ptr + size);
101
102 /* create a coherent mapping */
103 page = virt_to_page(ptr);
104 for (i = 0; i < (size >> PAGE_SHIFT); i++)
105 map[i] = page + i;
106 coherent_ptr = vmap(map, size >> PAGE_SHIFT, VM_MAP,
107 pgprot_dmacoherent(pgprot_default));
108 kfree(map);
109 if (!coherent_ptr)
110 goto no_map;
111
112 return coherent_ptr;
113
114no_map:
115 __dma_free_coherent(dev, size, ptr, *dma_handle, attrs);
116no_mem:
117 *dma_handle = ~0;
118 return NULL;
119}
120
121static void __dma_free_noncoherent(struct device *dev, size_t size,
122 void *vaddr, dma_addr_t dma_handle,
123 struct dma_attrs *attrs)
124{
125 void *swiotlb_addr = phys_to_virt(dma_to_phys(dev, dma_handle));
126
127 vunmap(vaddr);
128 __dma_free_coherent(dev, size, swiotlb_addr, dma_handle, attrs);
129}
130
131static dma_addr_t __swiotlb_map_page(struct device *dev, struct page *page,
132 unsigned long offset, size_t size,
133 enum dma_data_direction dir,
134 struct dma_attrs *attrs)
135{
136 dma_addr_t dev_addr;
137
138 dev_addr = swiotlb_map_page(dev, page, offset, size, dir, attrs);
139 __dma_map_area(phys_to_virt(dma_to_phys(dev, dev_addr)), size, dir);
140
141 return dev_addr;
142}
143
144
145static void __swiotlb_unmap_page(struct device *dev, dma_addr_t dev_addr,
146 size_t size, enum dma_data_direction dir,
147 struct dma_attrs *attrs)
148{
149 __dma_unmap_area(phys_to_virt(dma_to_phys(dev, dev_addr)), size, dir);
150 swiotlb_unmap_page(dev, dev_addr, size, dir, attrs);
151}
152
153static int __swiotlb_map_sg_attrs(struct device *dev, struct scatterlist *sgl,
154 int nelems, enum dma_data_direction dir,
155 struct dma_attrs *attrs)
156{
157 struct scatterlist *sg;
158 int i, ret;
159
160 ret = swiotlb_map_sg_attrs(dev, sgl, nelems, dir, attrs);
161 for_each_sg(sgl, sg, ret, i)
162 __dma_map_area(phys_to_virt(dma_to_phys(dev, sg->dma_address)),
163 sg->length, dir);
164
165 return ret;
166}
167
168static void __swiotlb_unmap_sg_attrs(struct device *dev,
169 struct scatterlist *sgl, int nelems,
170 enum dma_data_direction dir,
171 struct dma_attrs *attrs)
172{
173 struct scatterlist *sg;
174 int i;
175
176 for_each_sg(sgl, sg, nelems, i)
177 __dma_unmap_area(phys_to_virt(dma_to_phys(dev, sg->dma_address)),
178 sg->length, dir);
179 swiotlb_unmap_sg_attrs(dev, sgl, nelems, dir, attrs);
180}
181
182static void __swiotlb_sync_single_for_cpu(struct device *dev,
183 dma_addr_t dev_addr, size_t size,
184 enum dma_data_direction dir)
185{
186 __dma_unmap_area(phys_to_virt(dma_to_phys(dev, dev_addr)), size, dir);
187 swiotlb_sync_single_for_cpu(dev, dev_addr, size, dir);
188}
189
190static void __swiotlb_sync_single_for_device(struct device *dev,
191 dma_addr_t dev_addr, size_t size,
192 enum dma_data_direction dir)
193{
194 swiotlb_sync_single_for_device(dev, dev_addr, size, dir);
195 __dma_map_area(phys_to_virt(dma_to_phys(dev, dev_addr)), size, dir);
196}
197
198static void __swiotlb_sync_sg_for_cpu(struct device *dev,
199 struct scatterlist *sgl, int nelems,
200 enum dma_data_direction dir)
201{
202 struct scatterlist *sg;
203 int i;
204
205 for_each_sg(sgl, sg, nelems, i)
206 __dma_unmap_area(phys_to_virt(dma_to_phys(dev, sg->dma_address)),
207 sg->length, dir);
208 swiotlb_sync_sg_for_cpu(dev, sgl, nelems, dir);
209}
210
211static void __swiotlb_sync_sg_for_device(struct device *dev,
212 struct scatterlist *sgl, int nelems,
213 enum dma_data_direction dir)
214{
215 struct scatterlist *sg;
216 int i;
217
218 swiotlb_sync_sg_for_device(dev, sgl, nelems, dir);
219 for_each_sg(sgl, sg, nelems, i)
220 __dma_map_area(phys_to_virt(dma_to_phys(dev, sg->dma_address)),
221 sg->length, dir);
222}
223
6e8d7968
LA
224/* vma->vm_page_prot must be set appropriately before calling this function */
225static int __dma_common_mmap(struct device *dev, struct vm_area_struct *vma,
226 void *cpu_addr, dma_addr_t dma_addr, size_t size)
227{
228 int ret = -ENXIO;
229 unsigned long nr_vma_pages = (vma->vm_end - vma->vm_start) >>
230 PAGE_SHIFT;
231 unsigned long nr_pages = PAGE_ALIGN(size) >> PAGE_SHIFT;
232 unsigned long pfn = dma_to_phys(dev, dma_addr) >> PAGE_SHIFT;
233 unsigned long off = vma->vm_pgoff;
234
235 if (dma_mmap_from_coherent(dev, vma, cpu_addr, size, &ret))
236 return ret;
237
238 if (off < nr_pages && nr_vma_pages <= (nr_pages - off)) {
239 ret = remap_pfn_range(vma, vma->vm_start,
240 pfn + off,
241 vma->vm_end - vma->vm_start,
242 vma->vm_page_prot);
243 }
244
245 return ret;
246}
247
248static int __swiotlb_mmap_noncoherent(struct device *dev,
249 struct vm_area_struct *vma,
250 void *cpu_addr, dma_addr_t dma_addr, size_t size,
251 struct dma_attrs *attrs)
252{
253 vma->vm_page_prot = pgprot_dmacoherent(vma->vm_page_prot);
254 return __dma_common_mmap(dev, vma, cpu_addr, dma_addr, size);
255}
256
257static int __swiotlb_mmap_coherent(struct device *dev,
258 struct vm_area_struct *vma,
259 void *cpu_addr, dma_addr_t dma_addr, size_t size,
260 struct dma_attrs *attrs)
261{
262 /* Just use whatever page_prot attributes were specified */
263 return __dma_common_mmap(dev, vma, cpu_addr, dma_addr, size);
264}
265
7363590d
CM
266struct dma_map_ops noncoherent_swiotlb_dma_ops = {
267 .alloc = __dma_alloc_noncoherent,
268 .free = __dma_free_noncoherent,
6e8d7968 269 .mmap = __swiotlb_mmap_noncoherent,
7363590d
CM
270 .map_page = __swiotlb_map_page,
271 .unmap_page = __swiotlb_unmap_page,
272 .map_sg = __swiotlb_map_sg_attrs,
273 .unmap_sg = __swiotlb_unmap_sg_attrs,
274 .sync_single_for_cpu = __swiotlb_sync_single_for_cpu,
275 .sync_single_for_device = __swiotlb_sync_single_for_device,
276 .sync_sg_for_cpu = __swiotlb_sync_sg_for_cpu,
277 .sync_sg_for_device = __swiotlb_sync_sg_for_device,
278 .dma_supported = swiotlb_dma_supported,
279 .mapping_error = swiotlb_dma_mapping_error,
280};
281EXPORT_SYMBOL(noncoherent_swiotlb_dma_ops);
282
283struct dma_map_ops coherent_swiotlb_dma_ops = {
bb10eb7b
RH
284 .alloc = __dma_alloc_coherent,
285 .free = __dma_free_coherent,
6e8d7968 286 .mmap = __swiotlb_mmap_coherent,
09b55412
CM
287 .map_page = swiotlb_map_page,
288 .unmap_page = swiotlb_unmap_page,
289 .map_sg = swiotlb_map_sg_attrs,
290 .unmap_sg = swiotlb_unmap_sg_attrs,
291 .sync_single_for_cpu = swiotlb_sync_single_for_cpu,
292 .sync_single_for_device = swiotlb_sync_single_for_device,
293 .sync_sg_for_cpu = swiotlb_sync_sg_for_cpu,
294 .sync_sg_for_device = swiotlb_sync_sg_for_device,
295 .dma_supported = swiotlb_dma_supported,
296 .mapping_error = swiotlb_dma_mapping_error,
297};
7363590d 298EXPORT_SYMBOL(coherent_swiotlb_dma_ops);
09b55412 299
3690951f
CM
300extern int swiotlb_late_init_with_default_size(size_t default_size);
301
302static int __init swiotlb_late_init(void)
09b55412 303{
3690951f
CM
304 size_t swiotlb_size = min(SZ_64M, MAX_ORDER_NR_PAGES << PAGE_SHIFT);
305
bb10eb7b 306 dma_ops = &coherent_swiotlb_dma_ops;
3690951f
CM
307
308 return swiotlb_late_init_with_default_size(swiotlb_size);
09b55412 309}
3690951f 310subsys_initcall(swiotlb_late_init);
09b55412
CM
311
312#define PREALLOC_DMA_DEBUG_ENTRIES 4096
313
314static int __init dma_debug_do_init(void)
315{
316 dma_debug_init(PREALLOC_DMA_DEBUG_ENTRIES);
317 return 0;
318}
319fs_initcall(dma_debug_do_init);