Merge tag 'arm64-upstream' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64...
[linux-2.6-block.git] / arch / arm64 / mm / dma-mapping.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * SWIOTLB-based DMA API implementation
4  *
5  * Copyright (C) 2012 ARM Ltd.
6  * Author: Catalin Marinas <catalin.marinas@arm.com>
7  */
8
9 #include <linux/gfp.h>
10 #include <linux/acpi.h>
11 #include <linux/memblock.h>
12 #include <linux/cache.h>
13 #include <linux/export.h>
14 #include <linux/slab.h>
15 #include <linux/genalloc.h>
16 #include <linux/dma-direct.h>
17 #include <linux/dma-noncoherent.h>
18 #include <linux/dma-contiguous.h>
19 #include <linux/vmalloc.h>
20 #include <linux/swiotlb.h>
21 #include <linux/pci.h>
22
23 #include <asm/cacheflush.h>
24
25 pgprot_t arch_dma_mmap_pgprot(struct device *dev, pgprot_t prot,
26                 unsigned long attrs)
27 {
28         if (!dev_is_dma_coherent(dev) || (attrs & DMA_ATTR_WRITE_COMBINE))
29                 return pgprot_writecombine(prot);
30         return prot;
31 }
32
33 void arch_sync_dma_for_device(struct device *dev, phys_addr_t paddr,
34                 size_t size, enum dma_data_direction dir)
35 {
36         __dma_map_area(phys_to_virt(paddr), size, dir);
37 }
38
39 void arch_sync_dma_for_cpu(struct device *dev, phys_addr_t paddr,
40                 size_t size, enum dma_data_direction dir)
41 {
42         __dma_unmap_area(phys_to_virt(paddr), size, dir);
43 }
44
45 void arch_dma_prep_coherent(struct page *page, size_t size)
46 {
47         __dma_flush_area(page_address(page), size);
48 }
49
50 #ifdef CONFIG_IOMMU_DMA
51 static int __swiotlb_get_sgtable_page(struct sg_table *sgt,
52                                       struct page *page, size_t size)
53 {
54         int ret = sg_alloc_table(sgt, 1, GFP_KERNEL);
55
56         if (!ret)
57                 sg_set_page(sgt->sgl, page, PAGE_ALIGN(size), 0);
58
59         return ret;
60 }
61
62 static int __swiotlb_mmap_pfn(struct vm_area_struct *vma,
63                               unsigned long pfn, size_t size)
64 {
65         int ret = -ENXIO;
66         unsigned long nr_vma_pages = vma_pages(vma);
67         unsigned long nr_pages = PAGE_ALIGN(size) >> PAGE_SHIFT;
68         unsigned long off = vma->vm_pgoff;
69
70         if (off < nr_pages && nr_vma_pages <= (nr_pages - off)) {
71                 ret = remap_pfn_range(vma, vma->vm_start,
72                                       pfn + off,
73                                       vma->vm_end - vma->vm_start,
74                                       vma->vm_page_prot);
75         }
76
77         return ret;
78 }
79 #endif /* CONFIG_IOMMU_DMA */
80
81 static int __init arm64_dma_init(void)
82 {
83         return dma_atomic_pool_init(GFP_DMA32, __pgprot(PROT_NORMAL_NC));
84 }
85 arch_initcall(arm64_dma_init);
86
87 #ifdef CONFIG_IOMMU_DMA
88 #include <linux/dma-iommu.h>
89 #include <linux/platform_device.h>
90 #include <linux/amba/bus.h>
91
92 /* Thankfully, all cache ops are by VA so we can ignore phys here */
93 static void flush_page(struct device *dev, const void *virt, phys_addr_t phys)
94 {
95         __dma_flush_area(virt, PAGE_SIZE);
96 }
97
98 static void *__iommu_alloc_attrs(struct device *dev, size_t size,
99                                  dma_addr_t *handle, gfp_t gfp,
100                                  unsigned long attrs)
101 {
102         bool coherent = dev_is_dma_coherent(dev);
103         int ioprot = dma_info_to_prot(DMA_BIDIRECTIONAL, coherent, attrs);
104         size_t iosize = size;
105         void *addr;
106
107         if (WARN(!dev, "cannot create IOMMU mapping for unknown device\n"))
108                 return NULL;
109
110         size = PAGE_ALIGN(size);
111
112         /*
113          * Some drivers rely on this, and we probably don't want the
114          * possibility of stale kernel data being read by devices anyway.
115          */
116         gfp |= __GFP_ZERO;
117
118         if (!gfpflags_allow_blocking(gfp)) {
119                 struct page *page;
120                 /*
121                  * In atomic context we can't remap anything, so we'll only
122                  * get the virtually contiguous buffer we need by way of a
123                  * physically contiguous allocation.
124                  */
125                 if (coherent) {
126                         page = alloc_pages(gfp, get_order(size));
127                         addr = page ? page_address(page) : NULL;
128                 } else {
129                         addr = dma_alloc_from_pool(size, &page, gfp);
130                 }
131                 if (!addr)
132                         return NULL;
133
134                 *handle = iommu_dma_map_page(dev, page, 0, iosize, ioprot);
135                 if (*handle == DMA_MAPPING_ERROR) {
136                         if (coherent)
137                                 __free_pages(page, get_order(size));
138                         else
139                                 dma_free_from_pool(addr, size);
140                         addr = NULL;
141                 }
142         } else if (attrs & DMA_ATTR_FORCE_CONTIGUOUS) {
143                 pgprot_t prot = arch_dma_mmap_pgprot(dev, PAGE_KERNEL, attrs);
144                 struct page *page;
145
146                 page = dma_alloc_from_contiguous(dev, size >> PAGE_SHIFT,
147                                         get_order(size), gfp & __GFP_NOWARN);
148                 if (!page)
149                         return NULL;
150
151                 *handle = iommu_dma_map_page(dev, page, 0, iosize, ioprot);
152                 if (*handle == DMA_MAPPING_ERROR) {
153                         dma_release_from_contiguous(dev, page,
154                                                     size >> PAGE_SHIFT);
155                         return NULL;
156                 }
157                 addr = dma_common_contiguous_remap(page, size, VM_USERMAP,
158                                                    prot,
159                                                    __builtin_return_address(0));
160                 if (addr) {
161                         if (!coherent)
162                                 __dma_flush_area(page_to_virt(page), iosize);
163                         memset(addr, 0, size);
164                 } else {
165                         iommu_dma_unmap_page(dev, *handle, iosize, 0, attrs);
166                         dma_release_from_contiguous(dev, page,
167                                                     size >> PAGE_SHIFT);
168                 }
169         } else {
170                 pgprot_t prot = arch_dma_mmap_pgprot(dev, PAGE_KERNEL, attrs);
171                 struct page **pages;
172
173                 pages = iommu_dma_alloc(dev, iosize, gfp, attrs, ioprot,
174                                         handle, flush_page);
175                 if (!pages)
176                         return NULL;
177
178                 addr = dma_common_pages_remap(pages, size, VM_USERMAP, prot,
179                                               __builtin_return_address(0));
180                 if (!addr)
181                         iommu_dma_free(dev, pages, iosize, handle);
182         }
183         return addr;
184 }
185
186 static void __iommu_free_attrs(struct device *dev, size_t size, void *cpu_addr,
187                                dma_addr_t handle, unsigned long attrs)
188 {
189         size_t iosize = size;
190
191         size = PAGE_ALIGN(size);
192         /*
193          * @cpu_addr will be one of 4 things depending on how it was allocated:
194          * - A remapped array of pages for contiguous allocations.
195          * - A remapped array of pages from iommu_dma_alloc(), for all
196          *   non-atomic allocations.
197          * - A non-cacheable alias from the atomic pool, for atomic
198          *   allocations by non-coherent devices.
199          * - A normal lowmem address, for atomic allocations by
200          *   coherent devices.
201          * Hence how dodgy the below logic looks...
202          */
203         if (dma_in_atomic_pool(cpu_addr, size)) {
204                 iommu_dma_unmap_page(dev, handle, iosize, 0, 0);
205                 dma_free_from_pool(cpu_addr, size);
206         } else if (attrs & DMA_ATTR_FORCE_CONTIGUOUS) {
207                 struct page *page = vmalloc_to_page(cpu_addr);
208
209                 iommu_dma_unmap_page(dev, handle, iosize, 0, attrs);
210                 dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT);
211                 dma_common_free_remap(cpu_addr, size, VM_USERMAP);
212         } else if (is_vmalloc_addr(cpu_addr)){
213                 struct vm_struct *area = find_vm_area(cpu_addr);
214
215                 if (WARN_ON(!area || !area->pages))
216                         return;
217                 iommu_dma_free(dev, area->pages, iosize, &handle);
218                 dma_common_free_remap(cpu_addr, size, VM_USERMAP);
219         } else {
220                 iommu_dma_unmap_page(dev, handle, iosize, 0, 0);
221                 __free_pages(virt_to_page(cpu_addr), get_order(size));
222         }
223 }
224
225 static int __iommu_mmap_attrs(struct device *dev, struct vm_area_struct *vma,
226                               void *cpu_addr, dma_addr_t dma_addr, size_t size,
227                               unsigned long attrs)
228 {
229         struct vm_struct *area;
230         int ret;
231
232         vma->vm_page_prot = arch_dma_mmap_pgprot(dev, vma->vm_page_prot, attrs);
233
234         if (dma_mmap_from_dev_coherent(dev, vma, cpu_addr, size, &ret))
235                 return ret;
236
237         if (!is_vmalloc_addr(cpu_addr)) {
238                 unsigned long pfn = page_to_pfn(virt_to_page(cpu_addr));
239                 return __swiotlb_mmap_pfn(vma, pfn, size);
240         }
241
242         if (attrs & DMA_ATTR_FORCE_CONTIGUOUS) {
243                 /*
244                  * DMA_ATTR_FORCE_CONTIGUOUS allocations are always remapped,
245                  * hence in the vmalloc space.
246                  */
247                 unsigned long pfn = vmalloc_to_pfn(cpu_addr);
248                 return __swiotlb_mmap_pfn(vma, pfn, size);
249         }
250
251         area = find_vm_area(cpu_addr);
252         if (WARN_ON(!area || !area->pages))
253                 return -ENXIO;
254
255         return iommu_dma_mmap(area->pages, size, vma);
256 }
257
258 static int __iommu_get_sgtable(struct device *dev, struct sg_table *sgt,
259                                void *cpu_addr, dma_addr_t dma_addr,
260                                size_t size, unsigned long attrs)
261 {
262         unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
263         struct vm_struct *area = find_vm_area(cpu_addr);
264
265         if (!is_vmalloc_addr(cpu_addr)) {
266                 struct page *page = virt_to_page(cpu_addr);
267                 return __swiotlb_get_sgtable_page(sgt, page, size);
268         }
269
270         if (attrs & DMA_ATTR_FORCE_CONTIGUOUS) {
271                 /*
272                  * DMA_ATTR_FORCE_CONTIGUOUS allocations are always remapped,
273                  * hence in the vmalloc space.
274                  */
275                 struct page *page = vmalloc_to_page(cpu_addr);
276                 return __swiotlb_get_sgtable_page(sgt, page, size);
277         }
278
279         if (WARN_ON(!area || !area->pages))
280                 return -ENXIO;
281
282         return sg_alloc_table_from_pages(sgt, area->pages, count, 0, size,
283                                          GFP_KERNEL);
284 }
285
286 static void __iommu_sync_single_for_cpu(struct device *dev,
287                                         dma_addr_t dev_addr, size_t size,
288                                         enum dma_data_direction dir)
289 {
290         phys_addr_t phys;
291
292         if (dev_is_dma_coherent(dev))
293                 return;
294
295         phys = iommu_iova_to_phys(iommu_get_dma_domain(dev), dev_addr);
296         arch_sync_dma_for_cpu(dev, phys, size, dir);
297 }
298
299 static void __iommu_sync_single_for_device(struct device *dev,
300                                            dma_addr_t dev_addr, size_t size,
301                                            enum dma_data_direction dir)
302 {
303         phys_addr_t phys;
304
305         if (dev_is_dma_coherent(dev))
306                 return;
307
308         phys = iommu_iova_to_phys(iommu_get_dma_domain(dev), dev_addr);
309         arch_sync_dma_for_device(dev, phys, size, dir);
310 }
311
312 static dma_addr_t __iommu_map_page(struct device *dev, struct page *page,
313                                    unsigned long offset, size_t size,
314                                    enum dma_data_direction dir,
315                                    unsigned long attrs)
316 {
317         bool coherent = dev_is_dma_coherent(dev);
318         int prot = dma_info_to_prot(dir, coherent, attrs);
319         dma_addr_t dev_addr = iommu_dma_map_page(dev, page, offset, size, prot);
320
321         if (!coherent && !(attrs & DMA_ATTR_SKIP_CPU_SYNC) &&
322             dev_addr != DMA_MAPPING_ERROR)
323                 __dma_map_area(page_address(page) + offset, size, dir);
324
325         return dev_addr;
326 }
327
328 static void __iommu_unmap_page(struct device *dev, dma_addr_t dev_addr,
329                                size_t size, enum dma_data_direction dir,
330                                unsigned long attrs)
331 {
332         if ((attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0)
333                 __iommu_sync_single_for_cpu(dev, dev_addr, size, dir);
334
335         iommu_dma_unmap_page(dev, dev_addr, size, dir, attrs);
336 }
337
338 static void __iommu_sync_sg_for_cpu(struct device *dev,
339                                     struct scatterlist *sgl, int nelems,
340                                     enum dma_data_direction dir)
341 {
342         struct scatterlist *sg;
343         int i;
344
345         if (dev_is_dma_coherent(dev))
346                 return;
347
348         for_each_sg(sgl, sg, nelems, i)
349                 arch_sync_dma_for_cpu(dev, sg_phys(sg), sg->length, dir);
350 }
351
352 static void __iommu_sync_sg_for_device(struct device *dev,
353                                        struct scatterlist *sgl, int nelems,
354                                        enum dma_data_direction dir)
355 {
356         struct scatterlist *sg;
357         int i;
358
359         if (dev_is_dma_coherent(dev))
360                 return;
361
362         for_each_sg(sgl, sg, nelems, i)
363                 arch_sync_dma_for_device(dev, sg_phys(sg), sg->length, dir);
364 }
365
366 static int __iommu_map_sg_attrs(struct device *dev, struct scatterlist *sgl,
367                                 int nelems, enum dma_data_direction dir,
368                                 unsigned long attrs)
369 {
370         bool coherent = dev_is_dma_coherent(dev);
371
372         if ((attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0)
373                 __iommu_sync_sg_for_device(dev, sgl, nelems, dir);
374
375         return iommu_dma_map_sg(dev, sgl, nelems,
376                                 dma_info_to_prot(dir, coherent, attrs));
377 }
378
379 static void __iommu_unmap_sg_attrs(struct device *dev,
380                                    struct scatterlist *sgl, int nelems,
381                                    enum dma_data_direction dir,
382                                    unsigned long attrs)
383 {
384         if ((attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0)
385                 __iommu_sync_sg_for_cpu(dev, sgl, nelems, dir);
386
387         iommu_dma_unmap_sg(dev, sgl, nelems, dir, attrs);
388 }
389
390 static const struct dma_map_ops iommu_dma_ops = {
391         .alloc = __iommu_alloc_attrs,
392         .free = __iommu_free_attrs,
393         .mmap = __iommu_mmap_attrs,
394         .get_sgtable = __iommu_get_sgtable,
395         .map_page = __iommu_map_page,
396         .unmap_page = __iommu_unmap_page,
397         .map_sg = __iommu_map_sg_attrs,
398         .unmap_sg = __iommu_unmap_sg_attrs,
399         .sync_single_for_cpu = __iommu_sync_single_for_cpu,
400         .sync_single_for_device = __iommu_sync_single_for_device,
401         .sync_sg_for_cpu = __iommu_sync_sg_for_cpu,
402         .sync_sg_for_device = __iommu_sync_sg_for_device,
403         .map_resource = iommu_dma_map_resource,
404         .unmap_resource = iommu_dma_unmap_resource,
405 };
406
407 static int __init __iommu_dma_init(void)
408 {
409         return iommu_dma_init();
410 }
411 arch_initcall(__iommu_dma_init);
412
413 static void __iommu_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
414                                   const struct iommu_ops *ops)
415 {
416         struct iommu_domain *domain;
417
418         if (!ops)
419                 return;
420
421         /*
422          * The IOMMU core code allocates the default DMA domain, which the
423          * underlying IOMMU driver needs to support via the dma-iommu layer.
424          */
425         domain = iommu_get_domain_for_dev(dev);
426
427         if (!domain)
428                 goto out_err;
429
430         if (domain->type == IOMMU_DOMAIN_DMA) {
431                 if (iommu_dma_init_domain(domain, dma_base, size, dev))
432                         goto out_err;
433
434                 dev->dma_ops = &iommu_dma_ops;
435         }
436
437         return;
438
439 out_err:
440          pr_warn("Failed to set up IOMMU for device %s; retaining platform DMA ops\n",
441                  dev_name(dev));
442 }
443
444 void arch_teardown_dma_ops(struct device *dev)
445 {
446         dev->dma_ops = NULL;
447 }
448
449 #else
450
451 static void __iommu_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
452                                   const struct iommu_ops *iommu)
453 { }
454
455 #endif  /* CONFIG_IOMMU_DMA */
456
457 void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
458                         const struct iommu_ops *iommu, bool coherent)
459 {
460         int cls = cache_line_size_of_cpu();
461
462         WARN_TAINT(!coherent && cls > ARCH_DMA_MINALIGN,
463                    TAINT_CPU_OUT_OF_SPEC,
464                    "%s %s: ARCH_DMA_MINALIGN smaller than CTR_EL0.CWG (%d < %d)",
465                    dev_driver_string(dev), dev_name(dev),
466                    ARCH_DMA_MINALIGN, cls);
467
468         dev->dma_coherent = coherent;
469         __iommu_setup_dma_ops(dev, dma_base, size, iommu);
470
471 #ifdef CONFIG_XEN
472         if (xen_initial_domain())
473                 dev->dma_ops = xen_dma_ops;
474 #endif
475 }