ACPI / SBS: Add 5 us delay to fix SBS hangs on MacBook
[linux-2.6-block.git] / arch / arm64 / mm / dma-mapping.c
1 /*
2  * SWIOTLB-based DMA API implementation
3  *
4  * Copyright (C) 2012 ARM Ltd.
5  * Author: Catalin Marinas <catalin.marinas@arm.com>
6  *
7  * This program is free software; you can redistribute it and/or modify
8  * it under the terms of the GNU General Public License version 2 as
9  * published by the Free Software Foundation.
10  *
11  * This program is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  * GNU General Public License for more details.
15  *
16  * You should have received a copy of the GNU General Public License
17  * along with this program.  If not, see <http://www.gnu.org/licenses/>.
18  */
19
20 #include <linux/gfp.h>
21 #include <linux/export.h>
22 #include <linux/slab.h>
23 #include <linux/genalloc.h>
24 #include <linux/dma-mapping.h>
25 #include <linux/dma-contiguous.h>
26 #include <linux/vmalloc.h>
27 #include <linux/swiotlb.h>
28
29 #include <asm/cacheflush.h>
30
31 struct dma_map_ops *dma_ops;
32 EXPORT_SYMBOL(dma_ops);
33
34 static pgprot_t __get_dma_pgprot(struct dma_attrs *attrs, pgprot_t prot,
35                                  bool coherent)
36 {
37         if (!coherent || dma_get_attr(DMA_ATTR_WRITE_COMBINE, attrs))
38                 return pgprot_writecombine(prot);
39         return prot;
40 }
41
42 static struct gen_pool *atomic_pool;
43
44 #define DEFAULT_DMA_COHERENT_POOL_SIZE  SZ_256K
45 static size_t atomic_pool_size = DEFAULT_DMA_COHERENT_POOL_SIZE;
46
47 static int __init early_coherent_pool(char *p)
48 {
49         atomic_pool_size = memparse(p, &p);
50         return 0;
51 }
52 early_param("coherent_pool", early_coherent_pool);
53
54 static void *__alloc_from_pool(size_t size, struct page **ret_page, gfp_t flags)
55 {
56         unsigned long val;
57         void *ptr = NULL;
58
59         if (!atomic_pool) {
60                 WARN(1, "coherent pool not initialised!\n");
61                 return NULL;
62         }
63
64         val = gen_pool_alloc(atomic_pool, size);
65         if (val) {
66                 phys_addr_t phys = gen_pool_virt_to_phys(atomic_pool, val);
67
68                 *ret_page = phys_to_page(phys);
69                 ptr = (void *)val;
70                 if (flags & __GFP_ZERO)
71                         memset(ptr, 0, size);
72         }
73
74         return ptr;
75 }
76
77 static bool __in_atomic_pool(void *start, size_t size)
78 {
79         return addr_in_gen_pool(atomic_pool, (unsigned long)start, size);
80 }
81
82 static int __free_from_pool(void *start, size_t size)
83 {
84         if (!__in_atomic_pool(start, size))
85                 return 0;
86
87         gen_pool_free(atomic_pool, (unsigned long)start, size);
88
89         return 1;
90 }
91
92 static void *__dma_alloc_coherent(struct device *dev, size_t size,
93                                   dma_addr_t *dma_handle, gfp_t flags,
94                                   struct dma_attrs *attrs)
95 {
96         if (dev == NULL) {
97                 WARN_ONCE(1, "Use an actual device structure for DMA allocation\n");
98                 return NULL;
99         }
100
101         if (IS_ENABLED(CONFIG_ZONE_DMA) &&
102             dev->coherent_dma_mask <= DMA_BIT_MASK(32))
103                 flags |= GFP_DMA;
104         if (IS_ENABLED(CONFIG_DMA_CMA) && (flags & __GFP_WAIT)) {
105                 struct page *page;
106                 void *addr;
107
108                 size = PAGE_ALIGN(size);
109                 page = dma_alloc_from_contiguous(dev, size >> PAGE_SHIFT,
110                                                         get_order(size));
111                 if (!page)
112                         return NULL;
113
114                 *dma_handle = phys_to_dma(dev, page_to_phys(page));
115                 addr = page_address(page);
116                 if (flags & __GFP_ZERO)
117                         memset(addr, 0, size);
118                 return addr;
119         } else {
120                 return swiotlb_alloc_coherent(dev, size, dma_handle, flags);
121         }
122 }
123
124 static void __dma_free_coherent(struct device *dev, size_t size,
125                                 void *vaddr, dma_addr_t dma_handle,
126                                 struct dma_attrs *attrs)
127 {
128         bool freed;
129         phys_addr_t paddr = dma_to_phys(dev, dma_handle);
130
131         if (dev == NULL) {
132                 WARN_ONCE(1, "Use an actual device structure for DMA allocation\n");
133                 return;
134         }
135
136         freed = dma_release_from_contiguous(dev,
137                                         phys_to_page(paddr),
138                                         size >> PAGE_SHIFT);
139         if (!freed)
140                 swiotlb_free_coherent(dev, size, vaddr, dma_handle);
141 }
142
143 static void *__dma_alloc(struct device *dev, size_t size,
144                          dma_addr_t *dma_handle, gfp_t flags,
145                          struct dma_attrs *attrs)
146 {
147         struct page *page;
148         void *ptr, *coherent_ptr;
149         bool coherent = is_device_dma_coherent(dev);
150
151         size = PAGE_ALIGN(size);
152
153         if (!coherent && !(flags & __GFP_WAIT)) {
154                 struct page *page = NULL;
155                 void *addr = __alloc_from_pool(size, &page, flags);
156
157                 if (addr)
158                         *dma_handle = phys_to_dma(dev, page_to_phys(page));
159
160                 return addr;
161         }
162
163         ptr = __dma_alloc_coherent(dev, size, dma_handle, flags, attrs);
164         if (!ptr)
165                 goto no_mem;
166
167         /* no need for non-cacheable mapping if coherent */
168         if (coherent)
169                 return ptr;
170
171         /* remove any dirty cache lines on the kernel alias */
172         __dma_flush_range(ptr, ptr + size);
173
174         /* create a coherent mapping */
175         page = virt_to_page(ptr);
176         coherent_ptr = dma_common_contiguous_remap(page, size, VM_USERMAP,
177                                 __get_dma_pgprot(attrs,
178                                         __pgprot(PROT_NORMAL_NC), false),
179                                         NULL);
180         if (!coherent_ptr)
181                 goto no_map;
182
183         return coherent_ptr;
184
185 no_map:
186         __dma_free_coherent(dev, size, ptr, *dma_handle, attrs);
187 no_mem:
188         *dma_handle = DMA_ERROR_CODE;
189         return NULL;
190 }
191
192 static void __dma_free(struct device *dev, size_t size,
193                        void *vaddr, dma_addr_t dma_handle,
194                        struct dma_attrs *attrs)
195 {
196         void *swiotlb_addr = phys_to_virt(dma_to_phys(dev, dma_handle));
197
198         if (!is_device_dma_coherent(dev)) {
199                 if (__free_from_pool(vaddr, size))
200                         return;
201                 vunmap(vaddr);
202         }
203         __dma_free_coherent(dev, size, swiotlb_addr, dma_handle, attrs);
204 }
205
206 static dma_addr_t __swiotlb_map_page(struct device *dev, struct page *page,
207                                      unsigned long offset, size_t size,
208                                      enum dma_data_direction dir,
209                                      struct dma_attrs *attrs)
210 {
211         dma_addr_t dev_addr;
212
213         dev_addr = swiotlb_map_page(dev, page, offset, size, dir, attrs);
214         if (!is_device_dma_coherent(dev))
215                 __dma_map_area(phys_to_virt(dma_to_phys(dev, dev_addr)), size, dir);
216
217         return dev_addr;
218 }
219
220
221 static void __swiotlb_unmap_page(struct device *dev, dma_addr_t dev_addr,
222                                  size_t size, enum dma_data_direction dir,
223                                  struct dma_attrs *attrs)
224 {
225         if (!is_device_dma_coherent(dev))
226                 __dma_unmap_area(phys_to_virt(dma_to_phys(dev, dev_addr)), size, dir);
227         swiotlb_unmap_page(dev, dev_addr, size, dir, attrs);
228 }
229
230 static int __swiotlb_map_sg_attrs(struct device *dev, struct scatterlist *sgl,
231                                   int nelems, enum dma_data_direction dir,
232                                   struct dma_attrs *attrs)
233 {
234         struct scatterlist *sg;
235         int i, ret;
236
237         ret = swiotlb_map_sg_attrs(dev, sgl, nelems, dir, attrs);
238         if (!is_device_dma_coherent(dev))
239                 for_each_sg(sgl, sg, ret, i)
240                         __dma_map_area(phys_to_virt(dma_to_phys(dev, sg->dma_address)),
241                                        sg->length, dir);
242
243         return ret;
244 }
245
246 static void __swiotlb_unmap_sg_attrs(struct device *dev,
247                                      struct scatterlist *sgl, int nelems,
248                                      enum dma_data_direction dir,
249                                      struct dma_attrs *attrs)
250 {
251         struct scatterlist *sg;
252         int i;
253
254         if (!is_device_dma_coherent(dev))
255                 for_each_sg(sgl, sg, nelems, i)
256                         __dma_unmap_area(phys_to_virt(dma_to_phys(dev, sg->dma_address)),
257                                          sg->length, dir);
258         swiotlb_unmap_sg_attrs(dev, sgl, nelems, dir, attrs);
259 }
260
261 static void __swiotlb_sync_single_for_cpu(struct device *dev,
262                                           dma_addr_t dev_addr, size_t size,
263                                           enum dma_data_direction dir)
264 {
265         if (!is_device_dma_coherent(dev))
266                 __dma_unmap_area(phys_to_virt(dma_to_phys(dev, dev_addr)), size, dir);
267         swiotlb_sync_single_for_cpu(dev, dev_addr, size, dir);
268 }
269
270 static void __swiotlb_sync_single_for_device(struct device *dev,
271                                              dma_addr_t dev_addr, size_t size,
272                                              enum dma_data_direction dir)
273 {
274         swiotlb_sync_single_for_device(dev, dev_addr, size, dir);
275         if (!is_device_dma_coherent(dev))
276                 __dma_map_area(phys_to_virt(dma_to_phys(dev, dev_addr)), size, dir);
277 }
278
279 static void __swiotlb_sync_sg_for_cpu(struct device *dev,
280                                       struct scatterlist *sgl, int nelems,
281                                       enum dma_data_direction dir)
282 {
283         struct scatterlist *sg;
284         int i;
285
286         if (!is_device_dma_coherent(dev))
287                 for_each_sg(sgl, sg, nelems, i)
288                         __dma_unmap_area(phys_to_virt(dma_to_phys(dev, sg->dma_address)),
289                                          sg->length, dir);
290         swiotlb_sync_sg_for_cpu(dev, sgl, nelems, dir);
291 }
292
293 static void __swiotlb_sync_sg_for_device(struct device *dev,
294                                          struct scatterlist *sgl, int nelems,
295                                          enum dma_data_direction dir)
296 {
297         struct scatterlist *sg;
298         int i;
299
300         swiotlb_sync_sg_for_device(dev, sgl, nelems, dir);
301         if (!is_device_dma_coherent(dev))
302                 for_each_sg(sgl, sg, nelems, i)
303                         __dma_map_area(phys_to_virt(dma_to_phys(dev, sg->dma_address)),
304                                        sg->length, dir);
305 }
306
307 /* vma->vm_page_prot must be set appropriately before calling this function */
308 static int __dma_common_mmap(struct device *dev, struct vm_area_struct *vma,
309                              void *cpu_addr, dma_addr_t dma_addr, size_t size)
310 {
311         int ret = -ENXIO;
312         unsigned long nr_vma_pages = (vma->vm_end - vma->vm_start) >>
313                                         PAGE_SHIFT;
314         unsigned long nr_pages = PAGE_ALIGN(size) >> PAGE_SHIFT;
315         unsigned long pfn = dma_to_phys(dev, dma_addr) >> PAGE_SHIFT;
316         unsigned long off = vma->vm_pgoff;
317
318         if (dma_mmap_from_coherent(dev, vma, cpu_addr, size, &ret))
319                 return ret;
320
321         if (off < nr_pages && nr_vma_pages <= (nr_pages - off)) {
322                 ret = remap_pfn_range(vma, vma->vm_start,
323                                       pfn + off,
324                                       vma->vm_end - vma->vm_start,
325                                       vma->vm_page_prot);
326         }
327
328         return ret;
329 }
330
331 static int __swiotlb_mmap(struct device *dev,
332                           struct vm_area_struct *vma,
333                           void *cpu_addr, dma_addr_t dma_addr, size_t size,
334                           struct dma_attrs *attrs)
335 {
336         vma->vm_page_prot = __get_dma_pgprot(attrs, vma->vm_page_prot,
337                                              is_device_dma_coherent(dev));
338         return __dma_common_mmap(dev, vma, cpu_addr, dma_addr, size);
339 }
340
341 static struct dma_map_ops swiotlb_dma_ops = {
342         .alloc = __dma_alloc,
343         .free = __dma_free,
344         .mmap = __swiotlb_mmap,
345         .map_page = __swiotlb_map_page,
346         .unmap_page = __swiotlb_unmap_page,
347         .map_sg = __swiotlb_map_sg_attrs,
348         .unmap_sg = __swiotlb_unmap_sg_attrs,
349         .sync_single_for_cpu = __swiotlb_sync_single_for_cpu,
350         .sync_single_for_device = __swiotlb_sync_single_for_device,
351         .sync_sg_for_cpu = __swiotlb_sync_sg_for_cpu,
352         .sync_sg_for_device = __swiotlb_sync_sg_for_device,
353         .dma_supported = swiotlb_dma_supported,
354         .mapping_error = swiotlb_dma_mapping_error,
355 };
356
357 static int __init atomic_pool_init(void)
358 {
359         pgprot_t prot = __pgprot(PROT_NORMAL_NC);
360         unsigned long nr_pages = atomic_pool_size >> PAGE_SHIFT;
361         struct page *page;
362         void *addr;
363         unsigned int pool_size_order = get_order(atomic_pool_size);
364
365         if (dev_get_cma_area(NULL))
366                 page = dma_alloc_from_contiguous(NULL, nr_pages,
367                                                         pool_size_order);
368         else
369                 page = alloc_pages(GFP_DMA, pool_size_order);
370
371         if (page) {
372                 int ret;
373                 void *page_addr = page_address(page);
374
375                 memset(page_addr, 0, atomic_pool_size);
376                 __dma_flush_range(page_addr, page_addr + atomic_pool_size);
377
378                 atomic_pool = gen_pool_create(PAGE_SHIFT, -1);
379                 if (!atomic_pool)
380                         goto free_page;
381
382                 addr = dma_common_contiguous_remap(page, atomic_pool_size,
383                                         VM_USERMAP, prot, atomic_pool_init);
384
385                 if (!addr)
386                         goto destroy_genpool;
387
388                 ret = gen_pool_add_virt(atomic_pool, (unsigned long)addr,
389                                         page_to_phys(page),
390                                         atomic_pool_size, -1);
391                 if (ret)
392                         goto remove_mapping;
393
394                 gen_pool_set_algo(atomic_pool,
395                                   gen_pool_first_fit_order_align,
396                                   (void *)PAGE_SHIFT);
397
398                 pr_info("DMA: preallocated %zu KiB pool for atomic allocations\n",
399                         atomic_pool_size / 1024);
400                 return 0;
401         }
402         goto out;
403
404 remove_mapping:
405         dma_common_free_remap(addr, atomic_pool_size, VM_USERMAP);
406 destroy_genpool:
407         gen_pool_destroy(atomic_pool);
408         atomic_pool = NULL;
409 free_page:
410         if (!dma_release_from_contiguous(NULL, page, nr_pages))
411                 __free_pages(page, pool_size_order);
412 out:
413         pr_err("DMA: failed to allocate %zu KiB pool for atomic coherent allocation\n",
414                 atomic_pool_size / 1024);
415         return -ENOMEM;
416 }
417
418 static int __init arm64_dma_init(void)
419 {
420         int ret;
421
422         dma_ops = &swiotlb_dma_ops;
423
424         ret = atomic_pool_init();
425
426         return ret;
427 }
428 arch_initcall(arm64_dma_init);
429
430 #define PREALLOC_DMA_DEBUG_ENTRIES      4096
431
432 static int __init dma_debug_do_init(void)
433 {
434         dma_debug_init(PREALLOC_DMA_DEBUG_ENTRIES);
435         return 0;
436 }
437 fs_initcall(dma_debug_do_init);