Merge tag 'scsi-misc' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi
[linux-2.6-block.git] / kernel / dma / direct.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2018-2020 Christoph Hellwig.
4  *
5  * DMA operations that map physical memory directly without using an IOMMU.
6  */
7 #include <linux/memblock.h> /* for max_pfn */
8 #include <linux/export.h>
9 #include <linux/mm.h>
10 #include <linux/dma-map-ops.h>
11 #include <linux/scatterlist.h>
12 #include <linux/pfn.h>
13 #include <linux/vmalloc.h>
14 #include <linux/set_memory.h>
15 #include <linux/slab.h>
16 #include "direct.h"
17
18 /*
19  * Most architectures use ZONE_DMA for the first 16 Megabytes, but some use
20  * it for entirely different regions. In that case the arch code needs to
21  * override the variable below for dma-direct to work properly.
22  */
23 unsigned int zone_dma_bits __ro_after_init = 24;
24
25 static inline dma_addr_t phys_to_dma_direct(struct device *dev,
26                 phys_addr_t phys)
27 {
28         if (force_dma_unencrypted(dev))
29                 return phys_to_dma_unencrypted(dev, phys);
30         return phys_to_dma(dev, phys);
31 }
32
33 static inline struct page *dma_direct_to_page(struct device *dev,
34                 dma_addr_t dma_addr)
35 {
36         return pfn_to_page(PHYS_PFN(dma_to_phys(dev, dma_addr)));
37 }
38
39 u64 dma_direct_get_required_mask(struct device *dev)
40 {
41         phys_addr_t phys = (phys_addr_t)(max_pfn - 1) << PAGE_SHIFT;
42         u64 max_dma = phys_to_dma_direct(dev, phys);
43
44         return (1ULL << (fls64(max_dma) - 1)) * 2 - 1;
45 }
46
47 static gfp_t dma_direct_optimal_gfp_mask(struct device *dev, u64 *phys_limit)
48 {
49         u64 dma_limit = min_not_zero(
50                 dev->coherent_dma_mask,
51                 dev->bus_dma_limit);
52
53         /*
54          * Optimistically try the zone that the physical address mask falls
55          * into first.  If that returns memory that isn't actually addressable
56          * we will fallback to the next lower zone and try again.
57          *
58          * Note that GFP_DMA32 and GFP_DMA are no ops without the corresponding
59          * zones.
60          */
61         *phys_limit = dma_to_phys(dev, dma_limit);
62         if (*phys_limit <= DMA_BIT_MASK(zone_dma_bits))
63                 return GFP_DMA;
64         if (*phys_limit <= DMA_BIT_MASK(32))
65                 return GFP_DMA32;
66         return 0;
67 }
68
69 bool dma_coherent_ok(struct device *dev, phys_addr_t phys, size_t size)
70 {
71         dma_addr_t dma_addr = phys_to_dma_direct(dev, phys);
72
73         if (dma_addr == DMA_MAPPING_ERROR)
74                 return false;
75         return dma_addr + size - 1 <=
76                 min_not_zero(dev->coherent_dma_mask, dev->bus_dma_limit);
77 }
78
79 static int dma_set_decrypted(struct device *dev, void *vaddr, size_t size)
80 {
81         if (!force_dma_unencrypted(dev))
82                 return 0;
83         return set_memory_decrypted((unsigned long)vaddr, PFN_UP(size));
84 }
85
86 static int dma_set_encrypted(struct device *dev, void *vaddr, size_t size)
87 {
88         int ret;
89
90         if (!force_dma_unencrypted(dev))
91                 return 0;
92         ret = set_memory_encrypted((unsigned long)vaddr, PFN_UP(size));
93         if (ret)
94                 pr_warn_ratelimited("leaking DMA memory that can't be re-encrypted\n");
95         return ret;
96 }
97
98 static void __dma_direct_free_pages(struct device *dev, struct page *page,
99                                     size_t size)
100 {
101         if (swiotlb_free(dev, page, size))
102                 return;
103         dma_free_contiguous(dev, page, size);
104 }
105
106 static struct page *dma_direct_alloc_swiotlb(struct device *dev, size_t size)
107 {
108         struct page *page = swiotlb_alloc(dev, size);
109
110         if (page && !dma_coherent_ok(dev, page_to_phys(page), size)) {
111                 swiotlb_free(dev, page, size);
112                 return NULL;
113         }
114
115         return page;
116 }
117
118 static struct page *__dma_direct_alloc_pages(struct device *dev, size_t size,
119                 gfp_t gfp, bool allow_highmem)
120 {
121         int node = dev_to_node(dev);
122         struct page *page = NULL;
123         u64 phys_limit;
124
125         WARN_ON_ONCE(!PAGE_ALIGNED(size));
126
127         if (is_swiotlb_for_alloc(dev))
128                 return dma_direct_alloc_swiotlb(dev, size);
129
130         gfp |= dma_direct_optimal_gfp_mask(dev, &phys_limit);
131         page = dma_alloc_contiguous(dev, size, gfp);
132         if (page) {
133                 if (!dma_coherent_ok(dev, page_to_phys(page), size) ||
134                     (!allow_highmem && PageHighMem(page))) {
135                         dma_free_contiguous(dev, page, size);
136                         page = NULL;
137                 }
138         }
139 again:
140         if (!page)
141                 page = alloc_pages_node(node, gfp, get_order(size));
142         if (page && !dma_coherent_ok(dev, page_to_phys(page), size)) {
143                 dma_free_contiguous(dev, page, size);
144                 page = NULL;
145
146                 if (IS_ENABLED(CONFIG_ZONE_DMA32) &&
147                     phys_limit < DMA_BIT_MASK(64) &&
148                     !(gfp & (GFP_DMA32 | GFP_DMA))) {
149                         gfp |= GFP_DMA32;
150                         goto again;
151                 }
152
153                 if (IS_ENABLED(CONFIG_ZONE_DMA) && !(gfp & GFP_DMA)) {
154                         gfp = (gfp & ~GFP_DMA32) | GFP_DMA;
155                         goto again;
156                 }
157         }
158
159         return page;
160 }
161
162 /*
163  * Check if a potentially blocking operations needs to dip into the atomic
164  * pools for the given device/gfp.
165  */
166 static bool dma_direct_use_pool(struct device *dev, gfp_t gfp)
167 {
168         return !gfpflags_allow_blocking(gfp) && !is_swiotlb_for_alloc(dev);
169 }
170
171 static void *dma_direct_alloc_from_pool(struct device *dev, size_t size,
172                 dma_addr_t *dma_handle, gfp_t gfp)
173 {
174         struct page *page;
175         u64 phys_limit;
176         void *ret;
177
178         if (WARN_ON_ONCE(!IS_ENABLED(CONFIG_DMA_COHERENT_POOL)))
179                 return NULL;
180
181         gfp |= dma_direct_optimal_gfp_mask(dev, &phys_limit);
182         page = dma_alloc_from_pool(dev, size, &ret, gfp, dma_coherent_ok);
183         if (!page)
184                 return NULL;
185         *dma_handle = phys_to_dma_direct(dev, page_to_phys(page));
186         return ret;
187 }
188
189 static void *dma_direct_alloc_no_mapping(struct device *dev, size_t size,
190                 dma_addr_t *dma_handle, gfp_t gfp)
191 {
192         struct page *page;
193
194         page = __dma_direct_alloc_pages(dev, size, gfp & ~__GFP_ZERO, true);
195         if (!page)
196                 return NULL;
197
198         /* remove any dirty cache lines on the kernel alias */
199         if (!PageHighMem(page))
200                 arch_dma_prep_coherent(page, size);
201
202         /* return the page pointer as the opaque cookie */
203         *dma_handle = phys_to_dma_direct(dev, page_to_phys(page));
204         return page;
205 }
206
207 void *dma_direct_alloc(struct device *dev, size_t size,
208                 dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs)
209 {
210         bool remap = false, set_uncached = false;
211         struct page *page;
212         void *ret;
213
214         size = PAGE_ALIGN(size);
215         if (attrs & DMA_ATTR_NO_WARN)
216                 gfp |= __GFP_NOWARN;
217
218         if ((attrs & DMA_ATTR_NO_KERNEL_MAPPING) &&
219             !force_dma_unencrypted(dev) && !is_swiotlb_for_alloc(dev))
220                 return dma_direct_alloc_no_mapping(dev, size, dma_handle, gfp);
221
222         if (!dev_is_dma_coherent(dev)) {
223                 if (IS_ENABLED(CONFIG_ARCH_HAS_DMA_ALLOC) &&
224                     !is_swiotlb_for_alloc(dev))
225                         return arch_dma_alloc(dev, size, dma_handle, gfp,
226                                               attrs);
227
228                 /*
229                  * If there is a global pool, always allocate from it for
230                  * non-coherent devices.
231                  */
232                 if (IS_ENABLED(CONFIG_DMA_GLOBAL_POOL))
233                         return dma_alloc_from_global_coherent(dev, size,
234                                         dma_handle);
235
236                 /*
237                  * Otherwise we require the architecture to either be able to
238                  * mark arbitrary parts of the kernel direct mapping uncached,
239                  * or remapped it uncached.
240                  */
241                 set_uncached = IS_ENABLED(CONFIG_ARCH_HAS_DMA_SET_UNCACHED);
242                 remap = IS_ENABLED(CONFIG_DMA_DIRECT_REMAP);
243                 if (!set_uncached && !remap) {
244                         pr_warn_once("coherent DMA allocations not supported on this platform.\n");
245                         return NULL;
246                 }
247         }
248
249         /*
250          * Remapping or decrypting memory may block, allocate the memory from
251          * the atomic pools instead if we aren't allowed block.
252          */
253         if ((remap || force_dma_unencrypted(dev)) &&
254             dma_direct_use_pool(dev, gfp))
255                 return dma_direct_alloc_from_pool(dev, size, dma_handle, gfp);
256
257         /* we always manually zero the memory once we are done */
258         page = __dma_direct_alloc_pages(dev, size, gfp & ~__GFP_ZERO, true);
259         if (!page)
260                 return NULL;
261
262         /*
263          * dma_alloc_contiguous can return highmem pages depending on a
264          * combination the cma= arguments and per-arch setup.  These need to be
265          * remapped to return a kernel virtual address.
266          */
267         if (PageHighMem(page)) {
268                 remap = true;
269                 set_uncached = false;
270         }
271
272         if (remap) {
273                 pgprot_t prot = dma_pgprot(dev, PAGE_KERNEL, attrs);
274
275                 if (force_dma_unencrypted(dev))
276                         prot = pgprot_decrypted(prot);
277
278                 /* remove any dirty cache lines on the kernel alias */
279                 arch_dma_prep_coherent(page, size);
280
281                 /* create a coherent mapping */
282                 ret = dma_common_contiguous_remap(page, size, prot,
283                                 __builtin_return_address(0));
284                 if (!ret)
285                         goto out_free_pages;
286         } else {
287                 ret = page_address(page);
288                 if (dma_set_decrypted(dev, ret, size))
289                         goto out_leak_pages;
290         }
291
292         memset(ret, 0, size);
293
294         if (set_uncached) {
295                 arch_dma_prep_coherent(page, size);
296                 ret = arch_dma_set_uncached(ret, size);
297                 if (IS_ERR(ret))
298                         goto out_encrypt_pages;
299         }
300
301         *dma_handle = phys_to_dma_direct(dev, page_to_phys(page));
302         return ret;
303
304 out_encrypt_pages:
305         if (dma_set_encrypted(dev, page_address(page), size))
306                 return NULL;
307 out_free_pages:
308         __dma_direct_free_pages(dev, page, size);
309         return NULL;
310 out_leak_pages:
311         return NULL;
312 }
313
314 void dma_direct_free(struct device *dev, size_t size,
315                 void *cpu_addr, dma_addr_t dma_addr, unsigned long attrs)
316 {
317         unsigned int page_order = get_order(size);
318
319         if ((attrs & DMA_ATTR_NO_KERNEL_MAPPING) &&
320             !force_dma_unencrypted(dev) && !is_swiotlb_for_alloc(dev)) {
321                 /* cpu_addr is a struct page cookie, not a kernel address */
322                 dma_free_contiguous(dev, cpu_addr, size);
323                 return;
324         }
325
326         if (IS_ENABLED(CONFIG_ARCH_HAS_DMA_ALLOC) &&
327             !dev_is_dma_coherent(dev) &&
328             !is_swiotlb_for_alloc(dev)) {
329                 arch_dma_free(dev, size, cpu_addr, dma_addr, attrs);
330                 return;
331         }
332
333         if (IS_ENABLED(CONFIG_DMA_GLOBAL_POOL) &&
334             !dev_is_dma_coherent(dev)) {
335                 if (!dma_release_from_global_coherent(page_order, cpu_addr))
336                         WARN_ON_ONCE(1);
337                 return;
338         }
339
340         /* If cpu_addr is not from an atomic pool, dma_free_from_pool() fails */
341         if (IS_ENABLED(CONFIG_DMA_COHERENT_POOL) &&
342             dma_free_from_pool(dev, cpu_addr, PAGE_ALIGN(size)))
343                 return;
344
345         if (is_vmalloc_addr(cpu_addr)) {
346                 vunmap(cpu_addr);
347         } else {
348                 if (IS_ENABLED(CONFIG_ARCH_HAS_DMA_CLEAR_UNCACHED))
349                         arch_dma_clear_uncached(cpu_addr, size);
350                 if (dma_set_encrypted(dev, cpu_addr, size))
351                         return;
352         }
353
354         __dma_direct_free_pages(dev, dma_direct_to_page(dev, dma_addr), size);
355 }
356
357 struct page *dma_direct_alloc_pages(struct device *dev, size_t size,
358                 dma_addr_t *dma_handle, enum dma_data_direction dir, gfp_t gfp)
359 {
360         struct page *page;
361         void *ret;
362
363         if (force_dma_unencrypted(dev) && dma_direct_use_pool(dev, gfp))
364                 return dma_direct_alloc_from_pool(dev, size, dma_handle, gfp);
365
366         page = __dma_direct_alloc_pages(dev, size, gfp, false);
367         if (!page)
368                 return NULL;
369
370         ret = page_address(page);
371         if (dma_set_decrypted(dev, ret, size))
372                 goto out_leak_pages;
373         memset(ret, 0, size);
374         *dma_handle = phys_to_dma_direct(dev, page_to_phys(page));
375         return page;
376 out_leak_pages:
377         return NULL;
378 }
379
380 void dma_direct_free_pages(struct device *dev, size_t size,
381                 struct page *page, dma_addr_t dma_addr,
382                 enum dma_data_direction dir)
383 {
384         void *vaddr = page_address(page);
385
386         /* If cpu_addr is not from an atomic pool, dma_free_from_pool() fails */
387         if (IS_ENABLED(CONFIG_DMA_COHERENT_POOL) &&
388             dma_free_from_pool(dev, vaddr, size))
389                 return;
390
391         if (dma_set_encrypted(dev, vaddr, size))
392                 return;
393         __dma_direct_free_pages(dev, page, size);
394 }
395
396 #if defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_DEVICE) || \
397     defined(CONFIG_SWIOTLB)
398 void dma_direct_sync_sg_for_device(struct device *dev,
399                 struct scatterlist *sgl, int nents, enum dma_data_direction dir)
400 {
401         struct scatterlist *sg;
402         int i;
403
404         for_each_sg(sgl, sg, nents, i) {
405                 phys_addr_t paddr = dma_to_phys(dev, sg_dma_address(sg));
406
407                 if (unlikely(is_swiotlb_buffer(dev, paddr)))
408                         swiotlb_sync_single_for_device(dev, paddr, sg->length,
409                                                        dir);
410
411                 if (!dev_is_dma_coherent(dev))
412                         arch_sync_dma_for_device(paddr, sg->length,
413                                         dir);
414         }
415 }
416 #endif
417
418 #if defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU) || \
419     defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU_ALL) || \
420     defined(CONFIG_SWIOTLB)
421 void dma_direct_sync_sg_for_cpu(struct device *dev,
422                 struct scatterlist *sgl, int nents, enum dma_data_direction dir)
423 {
424         struct scatterlist *sg;
425         int i;
426
427         for_each_sg(sgl, sg, nents, i) {
428                 phys_addr_t paddr = dma_to_phys(dev, sg_dma_address(sg));
429
430                 if (!dev_is_dma_coherent(dev))
431                         arch_sync_dma_for_cpu(paddr, sg->length, dir);
432
433                 if (unlikely(is_swiotlb_buffer(dev, paddr)))
434                         swiotlb_sync_single_for_cpu(dev, paddr, sg->length,
435                                                     dir);
436
437                 if (dir == DMA_FROM_DEVICE)
438                         arch_dma_mark_clean(paddr, sg->length);
439         }
440
441         if (!dev_is_dma_coherent(dev))
442                 arch_sync_dma_for_cpu_all();
443 }
444
445 /*
446  * Unmaps segments, except for ones marked as pci_p2pdma which do not
447  * require any further action as they contain a bus address.
448  */
449 void dma_direct_unmap_sg(struct device *dev, struct scatterlist *sgl,
450                 int nents, enum dma_data_direction dir, unsigned long attrs)
451 {
452         struct scatterlist *sg;
453         int i;
454
455         for_each_sg(sgl,  sg, nents, i) {
456                 if (sg_dma_is_bus_address(sg))
457                         sg_dma_unmark_bus_address(sg);
458                 else
459                         dma_direct_unmap_page(dev, sg->dma_address,
460                                               sg_dma_len(sg), dir, attrs);
461         }
462 }
463 #endif
464
465 int dma_direct_map_sg(struct device *dev, struct scatterlist *sgl, int nents,
466                 enum dma_data_direction dir, unsigned long attrs)
467 {
468         struct pci_p2pdma_map_state p2pdma_state = {};
469         enum pci_p2pdma_map_type map;
470         struct scatterlist *sg;
471         int i, ret;
472
473         for_each_sg(sgl, sg, nents, i) {
474                 if (is_pci_p2pdma_page(sg_page(sg))) {
475                         map = pci_p2pdma_map_segment(&p2pdma_state, dev, sg);
476                         switch (map) {
477                         case PCI_P2PDMA_MAP_BUS_ADDR:
478                                 continue;
479                         case PCI_P2PDMA_MAP_THRU_HOST_BRIDGE:
480                                 /*
481                                  * Any P2P mapping that traverses the PCI
482                                  * host bridge must be mapped with CPU physical
483                                  * address and not PCI bus addresses. This is
484                                  * done with dma_direct_map_page() below.
485                                  */
486                                 break;
487                         default:
488                                 ret = -EREMOTEIO;
489                                 goto out_unmap;
490                         }
491                 }
492
493                 sg->dma_address = dma_direct_map_page(dev, sg_page(sg),
494                                 sg->offset, sg->length, dir, attrs);
495                 if (sg->dma_address == DMA_MAPPING_ERROR) {
496                         ret = -EIO;
497                         goto out_unmap;
498                 }
499                 sg_dma_len(sg) = sg->length;
500         }
501
502         return nents;
503
504 out_unmap:
505         dma_direct_unmap_sg(dev, sgl, i, dir, attrs | DMA_ATTR_SKIP_CPU_SYNC);
506         return ret;
507 }
508
509 dma_addr_t dma_direct_map_resource(struct device *dev, phys_addr_t paddr,
510                 size_t size, enum dma_data_direction dir, unsigned long attrs)
511 {
512         dma_addr_t dma_addr = paddr;
513
514         if (unlikely(!dma_capable(dev, dma_addr, size, false))) {
515                 dev_err_once(dev,
516                              "DMA addr %pad+%zu overflow (mask %llx, bus limit %llx).\n",
517                              &dma_addr, size, *dev->dma_mask, dev->bus_dma_limit);
518                 WARN_ON_ONCE(1);
519                 return DMA_MAPPING_ERROR;
520         }
521
522         return dma_addr;
523 }
524
525 int dma_direct_get_sgtable(struct device *dev, struct sg_table *sgt,
526                 void *cpu_addr, dma_addr_t dma_addr, size_t size,
527                 unsigned long attrs)
528 {
529         struct page *page = dma_direct_to_page(dev, dma_addr);
530         int ret;
531
532         ret = sg_alloc_table(sgt, 1, GFP_KERNEL);
533         if (!ret)
534                 sg_set_page(sgt->sgl, page, PAGE_ALIGN(size), 0);
535         return ret;
536 }
537
538 bool dma_direct_can_mmap(struct device *dev)
539 {
540         return dev_is_dma_coherent(dev) ||
541                 IS_ENABLED(CONFIG_DMA_NONCOHERENT_MMAP);
542 }
543
544 int dma_direct_mmap(struct device *dev, struct vm_area_struct *vma,
545                 void *cpu_addr, dma_addr_t dma_addr, size_t size,
546                 unsigned long attrs)
547 {
548         unsigned long user_count = vma_pages(vma);
549         unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT;
550         unsigned long pfn = PHYS_PFN(dma_to_phys(dev, dma_addr));
551         int ret = -ENXIO;
552
553         vma->vm_page_prot = dma_pgprot(dev, vma->vm_page_prot, attrs);
554         if (force_dma_unencrypted(dev))
555                 vma->vm_page_prot = pgprot_decrypted(vma->vm_page_prot);
556
557         if (dma_mmap_from_dev_coherent(dev, vma, cpu_addr, size, &ret))
558                 return ret;
559         if (dma_mmap_from_global_coherent(vma, cpu_addr, size, &ret))
560                 return ret;
561
562         if (vma->vm_pgoff >= count || user_count > count - vma->vm_pgoff)
563                 return -ENXIO;
564         return remap_pfn_range(vma, vma->vm_start, pfn + vma->vm_pgoff,
565                         user_count << PAGE_SHIFT, vma->vm_page_prot);
566 }
567
568 int dma_direct_supported(struct device *dev, u64 mask)
569 {
570         u64 min_mask = (max_pfn - 1) << PAGE_SHIFT;
571
572         /*
573          * Because 32-bit DMA masks are so common we expect every architecture
574          * to be able to satisfy them - either by not supporting more physical
575          * memory, or by providing a ZONE_DMA32.  If neither is the case, the
576          * architecture needs to use an IOMMU instead of the direct mapping.
577          */
578         if (mask >= DMA_BIT_MASK(32))
579                 return 1;
580
581         /*
582          * This check needs to be against the actual bit mask value, so use
583          * phys_to_dma_unencrypted() here so that the SME encryption mask isn't
584          * part of the check.
585          */
586         if (IS_ENABLED(CONFIG_ZONE_DMA))
587                 min_mask = min_t(u64, min_mask, DMA_BIT_MASK(zone_dma_bits));
588         return mask >= phys_to_dma_unencrypted(dev, min_mask);
589 }
590
591 /*
592  * To check whether all ram resource ranges are covered by dma range map
593  * Returns 0 when further check is needed
594  * Returns 1 if there is some RAM range can't be covered by dma_range_map
595  */
596 static int check_ram_in_range_map(unsigned long start_pfn,
597                                   unsigned long nr_pages, void *data)
598 {
599         unsigned long end_pfn = start_pfn + nr_pages;
600         const struct bus_dma_region *bdr = NULL;
601         const struct bus_dma_region *m;
602         struct device *dev = data;
603
604         while (start_pfn < end_pfn) {
605                 for (m = dev->dma_range_map; PFN_DOWN(m->size); m++) {
606                         unsigned long cpu_start_pfn = PFN_DOWN(m->cpu_start);
607
608                         if (start_pfn >= cpu_start_pfn &&
609                             start_pfn - cpu_start_pfn < PFN_DOWN(m->size)) {
610                                 bdr = m;
611                                 break;
612                         }
613                 }
614                 if (!bdr)
615                         return 1;
616
617                 start_pfn = PFN_DOWN(bdr->cpu_start) + PFN_DOWN(bdr->size);
618         }
619
620         return 0;
621 }
622
623 bool dma_direct_all_ram_mapped(struct device *dev)
624 {
625         if (!dev->dma_range_map)
626                 return true;
627         return !walk_system_ram_range(0, PFN_DOWN(ULONG_MAX) + 1, dev,
628                                       check_ram_in_range_map);
629 }
630
631 size_t dma_direct_max_mapping_size(struct device *dev)
632 {
633         /* If SWIOTLB is active, use its maximum mapping size */
634         if (is_swiotlb_active(dev) &&
635             (dma_addressing_limited(dev) || is_swiotlb_force_bounce(dev)))
636                 return swiotlb_max_mapping_size(dev);
637         return SIZE_MAX;
638 }
639
640 bool dma_direct_need_sync(struct device *dev, dma_addr_t dma_addr)
641 {
642         return !dev_is_dma_coherent(dev) ||
643                is_swiotlb_buffer(dev, dma_to_phys(dev, dma_addr));
644 }
645
646 /**
647  * dma_direct_set_offset - Assign scalar offset for a single DMA range.
648  * @dev:        device pointer; needed to "own" the alloced memory.
649  * @cpu_start:  beginning of memory region covered by this offset.
650  * @dma_start:  beginning of DMA/PCI region covered by this offset.
651  * @size:       size of the region.
652  *
653  * This is for the simple case of a uniform offset which cannot
654  * be discovered by "dma-ranges".
655  *
656  * It returns -ENOMEM if out of memory, -EINVAL if a map
657  * already exists, 0 otherwise.
658  *
659  * Note: any call to this from a driver is a bug.  The mapping needs
660  * to be described by the device tree or other firmware interfaces.
661  */
662 int dma_direct_set_offset(struct device *dev, phys_addr_t cpu_start,
663                          dma_addr_t dma_start, u64 size)
664 {
665         struct bus_dma_region *map;
666         u64 offset = (u64)cpu_start - (u64)dma_start;
667
668         if (dev->dma_range_map) {
669                 dev_err(dev, "attempt to add DMA range to existing map\n");
670                 return -EINVAL;
671         }
672
673         if (!offset)
674                 return 0;
675
676         map = kcalloc(2, sizeof(*map), GFP_KERNEL);
677         if (!map)
678                 return -ENOMEM;
679         map[0].cpu_start = cpu_start;
680         map[0].dma_start = dma_start;
681         map[0].size = size;
682         dev->dma_range_map = map;
683         return 0;
684 }