dma-direct: add support for allocation from ZONE_DMA and ZONE_DMA32
[linux-2.6-block.git] / lib / dma-direct.c
CommitLineData
b2441318 1// SPDX-License-Identifier: GPL-2.0
a8463d4b 2/*
2e86a047
CH
3 * DMA operations that map physical memory directly without using an IOMMU or
4 * flushing caches.
a8463d4b
CB
5 */
6#include <linux/export.h>
7#include <linux/mm.h>
2e86a047 8#include <linux/dma-direct.h>
a8463d4b 9#include <linux/scatterlist.h>
080321d3 10#include <linux/dma-contiguous.h>
25f1e188 11#include <linux/pfn.h>
a8463d4b 12
27975969
CH
13#define DIRECT_MAPPING_ERROR 0
14
c61e9637
CH
15/*
16 * Most architectures use ZONE_DMA for the first 16 Megabytes, but
17 * some use it for entirely different regions:
18 */
19#ifndef ARCH_ZONE_DMA_BITS
20#define ARCH_ZONE_DMA_BITS 24
21#endif
22
27975969
CH
23static bool
24check_addr(struct device *dev, dma_addr_t dma_addr, size_t size,
25 const char *caller)
26{
27 if (unlikely(dev && !dma_capable(dev, dma_addr, size))) {
28 if (*dev->dma_mask >= DMA_BIT_MASK(32)) {
29 dev_err(dev,
30 "%s: overflow %pad+%zu of device mask %llx\n",
31 caller, &dma_addr, size, *dev->dma_mask);
32 }
33 return false;
34 }
35 return true;
36}
37
002e6745
CH
38static void *dma_direct_alloc(struct device *dev, size_t size,
39 dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs)
a8463d4b 40{
080321d3
CH
41 unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
42 int page_order = get_order(size);
43 struct page *page = NULL;
a8463d4b 44
c61e9637
CH
45 /* GFP_DMA32 and GFP_DMA are no ops without the corresponding zones: */
46 if (dev->coherent_dma_mask <= DMA_BIT_MASK(ARCH_ZONE_DMA_BITS))
47 gfp |= GFP_DMA;
48 if (dev->coherent_dma_mask <= DMA_BIT_MASK(32) && !(gfp & GFP_DMA))
49 gfp |= GFP_DMA32;
50
080321d3
CH
51 /* CMA can be used only in the context which permits sleeping */
52 if (gfpflags_allow_blocking(gfp))
53 page = dma_alloc_from_contiguous(dev, count, page_order, gfp);
54 if (!page)
21f237e4 55 page = alloc_pages_node(dev_to_node(dev), gfp, page_order);
080321d3
CH
56 if (!page)
57 return NULL;
25f1e188 58
080321d3
CH
59 *dma_handle = phys_to_dma(dev, page_to_phys(page));
60 memset(page_address(page), 0, size);
61 return page_address(page);
a8463d4b
CB
62}
63
002e6745
CH
64static void dma_direct_free(struct device *dev, size_t size, void *cpu_addr,
65 dma_addr_t dma_addr, unsigned long attrs)
a8463d4b 66{
080321d3
CH
67 unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
68
69 if (!dma_release_from_contiguous(dev, virt_to_page(cpu_addr), count))
70 free_pages((unsigned long)cpu_addr, get_order(size));
a8463d4b
CB
71}
72
002e6745
CH
73static dma_addr_t dma_direct_map_page(struct device *dev, struct page *page,
74 unsigned long offset, size_t size, enum dma_data_direction dir,
75 unsigned long attrs)
a8463d4b 76{
27975969
CH
77 dma_addr_t dma_addr = phys_to_dma(dev, page_to_phys(page)) + offset;
78
79 if (!check_addr(dev, dma_addr, size, __func__))
80 return DIRECT_MAPPING_ERROR;
81 return dma_addr;
a8463d4b
CB
82}
83
002e6745
CH
84static int dma_direct_map_sg(struct device *dev, struct scatterlist *sgl,
85 int nents, enum dma_data_direction dir, unsigned long attrs)
a8463d4b
CB
86{
87 int i;
88 struct scatterlist *sg;
89
90 for_each_sg(sgl, sg, nents, i) {
a8463d4b 91 BUG_ON(!sg_page(sg));
2e86a047
CH
92
93 sg_dma_address(sg) = phys_to_dma(dev, sg_phys(sg));
27975969
CH
94 if (!check_addr(dev, sg_dma_address(sg), sg->length, __func__))
95 return 0;
a8463d4b
CB
96 sg_dma_len(sg) = sg->length;
97 }
98
99 return nents;
100}
101
27975969
CH
102static int dma_direct_mapping_error(struct device *dev, dma_addr_t dma_addr)
103{
104 return dma_addr == DIRECT_MAPPING_ERROR;
105}
106
002e6745
CH
107const struct dma_map_ops dma_direct_ops = {
108 .alloc = dma_direct_alloc,
109 .free = dma_direct_free,
110 .map_page = dma_direct_map_page,
111 .map_sg = dma_direct_map_sg,
27975969 112 .mapping_error = dma_direct_mapping_error,
a8463d4b 113};
002e6745 114EXPORT_SYMBOL(dma_direct_ops);