Commit | Line | Data |
---|---|---|
1c51c429 VM |
1 | /* |
2 | * Based on linux/arch/arm/mm/dma-mapping.c | |
3 | * | |
4 | * Copyright (C) 2000-2004 Russell King | |
5 | * | |
6 | * This program is free software; you can redistribute it and/or modify | |
7 | * it under the terms of the GNU General Public License version 2 as | |
8 | * published by the Free Software Foundation. | |
9 | * | |
10 | */ | |
11 | ||
12 | #include <linux/export.h> | |
13 | #include <linux/mm.h> | |
14 | #include <linux/dma-mapping.h> | |
15 | #include <linux/scatterlist.h> | |
16 | ||
17 | #include <asm/cachetype.h> | |
18 | #include <asm/cacheflush.h> | |
19 | #include <asm/outercache.h> | |
20 | #include <asm/cp15.h> | |
21 | ||
22 | #include "dma.h" | |
23 | ||
24 | /* | |
25 | * dma_noop_ops is used if | |
26 | * - MMU/MPU is off | |
27 | * - cpu is v7m w/o cache support | |
28 | * - device is coherent | |
29 | * otherwise arm_nommu_dma_ops is used. | |
30 | * | |
31 | * arm_nommu_dma_ops rely on consistent DMA memory (please, refer to | |
32 | * [1] on how to declare such memory). | |
33 | * | |
34 | * [1] Documentation/devicetree/bindings/reserved-memory/reserved-memory.txt | |
35 | */ | |
36 | ||
37 | static void *arm_nommu_dma_alloc(struct device *dev, size_t size, | |
38 | dma_addr_t *dma_handle, gfp_t gfp, | |
39 | unsigned long attrs) | |
40 | ||
41 | { | |
42 | const struct dma_map_ops *ops = &dma_noop_ops; | |
43 | ||
44 | /* | |
45 | * We are here because: | |
46 | * - no consistent DMA region has been defined, so we can't | |
47 | * continue. | |
48 | * - there is no space left in consistent DMA region, so we | |
49 | * only can fallback to generic allocator if we are | |
50 | * advertised that consistency is not required. | |
51 | */ | |
52 | ||
53 | if (attrs & DMA_ATTR_NON_CONSISTENT) | |
54 | return ops->alloc(dev, size, dma_handle, gfp, attrs); | |
55 | ||
56 | WARN_ON_ONCE(1); | |
57 | return NULL; | |
58 | } | |
59 | ||
60 | static void arm_nommu_dma_free(struct device *dev, size_t size, | |
61 | void *cpu_addr, dma_addr_t dma_addr, | |
62 | unsigned long attrs) | |
63 | { | |
64 | const struct dma_map_ops *ops = &dma_noop_ops; | |
65 | ||
66 | if (attrs & DMA_ATTR_NON_CONSISTENT) | |
67 | ops->free(dev, size, cpu_addr, dma_addr, attrs); | |
68 | else | |
69 | WARN_ON_ONCE(1); | |
70 | ||
71 | return; | |
72 | } | |
73 | ||
74 | static void __dma_page_cpu_to_dev(phys_addr_t paddr, size_t size, | |
75 | enum dma_data_direction dir) | |
76 | { | |
77 | dmac_map_area(__va(paddr), size, dir); | |
78 | ||
79 | if (dir == DMA_FROM_DEVICE) | |
80 | outer_inv_range(paddr, paddr + size); | |
81 | else | |
82 | outer_clean_range(paddr, paddr + size); | |
83 | } | |
84 | ||
85 | static void __dma_page_dev_to_cpu(phys_addr_t paddr, size_t size, | |
86 | enum dma_data_direction dir) | |
87 | { | |
88 | if (dir != DMA_TO_DEVICE) { | |
89 | outer_inv_range(paddr, paddr + size); | |
90 | dmac_unmap_area(__va(paddr), size, dir); | |
91 | } | |
92 | } | |
93 | ||
94 | static dma_addr_t arm_nommu_dma_map_page(struct device *dev, struct page *page, | |
95 | unsigned long offset, size_t size, | |
96 | enum dma_data_direction dir, | |
97 | unsigned long attrs) | |
98 | { | |
99 | dma_addr_t handle = page_to_phys(page) + offset; | |
100 | ||
101 | __dma_page_cpu_to_dev(handle, size, dir); | |
102 | ||
103 | return handle; | |
104 | } | |
105 | ||
106 | static void arm_nommu_dma_unmap_page(struct device *dev, dma_addr_t handle, | |
107 | size_t size, enum dma_data_direction dir, | |
108 | unsigned long attrs) | |
109 | { | |
110 | __dma_page_dev_to_cpu(handle, size, dir); | |
111 | } | |
112 | ||
113 | ||
114 | static int arm_nommu_dma_map_sg(struct device *dev, struct scatterlist *sgl, | |
115 | int nents, enum dma_data_direction dir, | |
116 | unsigned long attrs) | |
117 | { | |
118 | int i; | |
119 | struct scatterlist *sg; | |
120 | ||
121 | for_each_sg(sgl, sg, nents, i) { | |
122 | sg_dma_address(sg) = sg_phys(sg); | |
123 | sg_dma_len(sg) = sg->length; | |
124 | __dma_page_cpu_to_dev(sg_dma_address(sg), sg_dma_len(sg), dir); | |
125 | } | |
126 | ||
127 | return nents; | |
128 | } | |
129 | ||
130 | static void arm_nommu_dma_unmap_sg(struct device *dev, struct scatterlist *sgl, | |
131 | int nents, enum dma_data_direction dir, | |
132 | unsigned long attrs) | |
133 | { | |
134 | struct scatterlist *sg; | |
135 | int i; | |
136 | ||
137 | for_each_sg(sgl, sg, nents, i) | |
138 | __dma_page_dev_to_cpu(sg_dma_address(sg), sg_dma_len(sg), dir); | |
139 | } | |
140 | ||
141 | static void arm_nommu_dma_sync_single_for_device(struct device *dev, | |
142 | dma_addr_t handle, size_t size, enum dma_data_direction dir) | |
143 | { | |
144 | __dma_page_cpu_to_dev(handle, size, dir); | |
145 | } | |
146 | ||
147 | static void arm_nommu_dma_sync_single_for_cpu(struct device *dev, | |
148 | dma_addr_t handle, size_t size, enum dma_data_direction dir) | |
149 | { | |
150 | __dma_page_cpu_to_dev(handle, size, dir); | |
151 | } | |
152 | ||
153 | static void arm_nommu_dma_sync_sg_for_device(struct device *dev, struct scatterlist *sgl, | |
154 | int nents, enum dma_data_direction dir) | |
155 | { | |
156 | struct scatterlist *sg; | |
157 | int i; | |
158 | ||
159 | for_each_sg(sgl, sg, nents, i) | |
160 | __dma_page_cpu_to_dev(sg_dma_address(sg), sg_dma_len(sg), dir); | |
161 | } | |
162 | ||
163 | static void arm_nommu_dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sgl, | |
164 | int nents, enum dma_data_direction dir) | |
165 | { | |
166 | struct scatterlist *sg; | |
167 | int i; | |
168 | ||
169 | for_each_sg(sgl, sg, nents, i) | |
170 | __dma_page_dev_to_cpu(sg_dma_address(sg), sg_dma_len(sg), dir); | |
171 | } | |
172 | ||
173 | const struct dma_map_ops arm_nommu_dma_ops = { | |
174 | .alloc = arm_nommu_dma_alloc, | |
175 | .free = arm_nommu_dma_free, | |
176 | .map_page = arm_nommu_dma_map_page, | |
177 | .unmap_page = arm_nommu_dma_unmap_page, | |
178 | .map_sg = arm_nommu_dma_map_sg, | |
179 | .unmap_sg = arm_nommu_dma_unmap_sg, | |
180 | .sync_single_for_device = arm_nommu_dma_sync_single_for_device, | |
181 | .sync_single_for_cpu = arm_nommu_dma_sync_single_for_cpu, | |
182 | .sync_sg_for_device = arm_nommu_dma_sync_sg_for_device, | |
183 | .sync_sg_for_cpu = arm_nommu_dma_sync_sg_for_cpu, | |
184 | }; | |
185 | EXPORT_SYMBOL(arm_nommu_dma_ops); | |
186 | ||
187 | static const struct dma_map_ops *arm_nommu_get_dma_map_ops(bool coherent) | |
188 | { | |
189 | return coherent ? &dma_noop_ops : &arm_nommu_dma_ops; | |
190 | } | |
191 | ||
192 | void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size, | |
193 | const struct iommu_ops *iommu, bool coherent) | |
194 | { | |
195 | const struct dma_map_ops *dma_ops; | |
196 | ||
197 | if (IS_ENABLED(CONFIG_CPU_V7M)) { | |
198 | /* | |
199 | * Cache support for v7m is optional, so can be treated as | |
200 | * coherent if no cache has been detected. Note that it is not | |
201 | * enough to check if MPU is in use or not since in absense of | |
202 | * MPU system memory map is used. | |
203 | */ | |
204 | dev->archdata.dma_coherent = (cacheid) ? coherent : true; | |
205 | } else { | |
206 | /* | |
207 | * Assume coherent DMA in case MMU/MPU has not been set up. | |
208 | */ | |
209 | dev->archdata.dma_coherent = (get_cr() & CR_M) ? coherent : true; | |
210 | } | |
211 | ||
212 | dma_ops = arm_nommu_get_dma_map_ops(dev->archdata.dma_coherent); | |
213 | ||
214 | set_dma_ops(dev, dma_ops); | |
215 | } | |
216 | ||
217 | void arch_teardown_dma_ops(struct device *dev) | |
218 | { | |
219 | } | |
220 | ||
221 | #define PREALLOC_DMA_DEBUG_ENTRIES 4096 | |
222 | ||
223 | static int __init dma_debug_do_init(void) | |
224 | { | |
225 | dma_debug_init(PREALLOC_DMA_DEBUG_ENTRIES); | |
226 | return 0; | |
227 | } | |
228 | core_initcall(dma_debug_do_init); |