dma-mapping: split <linux/dma-mapping.h>
[linux-block.git] / kernel / dma / ops_helpers.c
CommitLineData
545d2927
CH
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Helpers for DMA ops implementations. These generally rely on the fact that
4 * the allocated memory contains normal pages in the direct kernel mapping.
5 */
efa70f2f 6#include <linux/dma-contiguous.h>
0a0f0d8b 7#include <linux/dma-map-ops.h>
545d2927
CH
8#include <linux/dma-noncoherent.h>
9
10/*
11 * Create scatter-list for the already allocated DMA buffer.
12 */
13int dma_common_get_sgtable(struct device *dev, struct sg_table *sgt,
14 void *cpu_addr, dma_addr_t dma_addr, size_t size,
15 unsigned long attrs)
16{
17 struct page *page = virt_to_page(cpu_addr);
18 int ret;
19
20 ret = sg_alloc_table(sgt, 1, GFP_KERNEL);
21 if (!ret)
22 sg_set_page(sgt->sgl, page, PAGE_ALIGN(size), 0);
23 return ret;
24}
25
26/*
27 * Create userspace mapping for the DMA-coherent memory.
28 */
29int dma_common_mmap(struct device *dev, struct vm_area_struct *vma,
30 void *cpu_addr, dma_addr_t dma_addr, size_t size,
31 unsigned long attrs)
32{
33#ifdef CONFIG_MMU
34 unsigned long user_count = vma_pages(vma);
35 unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT;
36 unsigned long off = vma->vm_pgoff;
37 int ret = -ENXIO;
38
39 vma->vm_page_prot = dma_pgprot(dev, vma->vm_page_prot, attrs);
40
41 if (dma_mmap_from_dev_coherent(dev, vma, cpu_addr, size, &ret))
42 return ret;
43
44 if (off >= count || user_count > count - off)
45 return -ENXIO;
46
47 return remap_pfn_range(vma, vma->vm_start,
48 page_to_pfn(virt_to_page(cpu_addr)) + vma->vm_pgoff,
49 user_count << PAGE_SHIFT, vma->vm_page_prot);
50#else
51 return -ENXIO;
52#endif /* CONFIG_MMU */
53}
efa70f2f
CH
54
55struct page *dma_common_alloc_pages(struct device *dev, size_t size,
56 dma_addr_t *dma_handle, enum dma_data_direction dir, gfp_t gfp)
57{
58 const struct dma_map_ops *ops = get_dma_ops(dev);
59 struct page *page;
60
61 page = dma_alloc_contiguous(dev, size, gfp);
62 if (!page)
63 page = alloc_pages_node(dev_to_node(dev), gfp, get_order(size));
64 if (!page)
65 return NULL;
66
67 *dma_handle = ops->map_page(dev, page, 0, size, dir,
68 DMA_ATTR_SKIP_CPU_SYNC);
69 if (*dma_handle == DMA_MAPPING_ERROR) {
70 dma_free_contiguous(dev, page, size);
71 return NULL;
72 }
73
74 memset(page_address(page), 0, size);
75 return page;
76}
77
78void dma_common_free_pages(struct device *dev, size_t size, struct page *page,
79 dma_addr_t dma_handle, enum dma_data_direction dir)
80{
81 const struct dma_map_ops *ops = get_dma_ops(dev);
82
83 if (ops->unmap_page)
84 ops->unmap_page(dev, dma_handle, size, dir,
85 DMA_ATTR_SKIP_CPU_SYNC);
86 dma_free_contiguous(dev, page, size);
87}