Merge branch 'x86-paravirt-for-linus' of git://git.kernel.org/pub/scm/linux/kernel...
[linux-2.6-block.git] / arch / arm / mm / dma-mapping-nommu.c
CommitLineData
1c51c429
VM
1/*
2 * Based on linux/arch/arm/mm/dma-mapping.c
3 *
4 * Copyright (C) 2000-2004 Russell King
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 *
10 */
11
12#include <linux/export.h>
13#include <linux/mm.h>
19dca8c0 14#include <linux/dma-direct.h>
1c51c429
VM
15#include <linux/scatterlist.h>
16
17#include <asm/cachetype.h>
18#include <asm/cacheflush.h>
19#include <asm/outercache.h>
20#include <asm/cp15.h>
21
22#include "dma.h"
23
24/*
002e6745 25 * dma_direct_ops is used if
1c51c429
VM
26 * - MMU/MPU is off
27 * - cpu is v7m w/o cache support
28 * - device is coherent
29 * otherwise arm_nommu_dma_ops is used.
30 *
31 * arm_nommu_dma_ops rely on consistent DMA memory (please, refer to
32 * [1] on how to declare such memory).
33 *
34 * [1] Documentation/devicetree/bindings/reserved-memory/reserved-memory.txt
35 */
36
37static void *arm_nommu_dma_alloc(struct device *dev, size_t size,
38 dma_addr_t *dma_handle, gfp_t gfp,
39 unsigned long attrs)
40
41{
878ec367 42 void *ret;
1c51c429
VM
43
44 /*
878ec367
VM
45 * Try generic allocator first if we are advertised that
46 * consistency is not required.
47 */
48
49 if (attrs & DMA_ATTR_NON_CONSISTENT)
bc3ec75d
CH
50 return dma_direct_alloc_pages(dev, size, dma_handle, gfp,
51 attrs);
878ec367
VM
52
53 ret = dma_alloc_from_global_coherent(size, dma_handle);
54
55 /*
56 * dma_alloc_from_global_coherent() may fail because:
57 *
1c51c429
VM
58 * - no consistent DMA region has been defined, so we can't
59 * continue.
60 * - there is no space left in consistent DMA region, so we
61 * only can fallback to generic allocator if we are
62 * advertised that consistency is not required.
63 */
64
878ec367
VM
65 WARN_ON_ONCE(ret == NULL);
66 return ret;
1c51c429
VM
67}
68
69static void arm_nommu_dma_free(struct device *dev, size_t size,
70 void *cpu_addr, dma_addr_t dma_addr,
71 unsigned long attrs)
72{
878ec367 73 if (attrs & DMA_ATTR_NON_CONSISTENT) {
bc3ec75d 74 dma_direct_free_pages(dev, size, cpu_addr, dma_addr, attrs);
878ec367
VM
75 } else {
76 int ret = dma_release_from_global_coherent(get_order(size),
77 cpu_addr);
78
79 WARN_ON_ONCE(ret == 0);
80 }
1c51c429
VM
81
82 return;
83}
84
878ec367
VM
85static int arm_nommu_dma_mmap(struct device *dev, struct vm_area_struct *vma,
86 void *cpu_addr, dma_addr_t dma_addr, size_t size,
87 unsigned long attrs)
88{
89 int ret;
90
91 if (dma_mmap_from_global_coherent(vma, cpu_addr, size, &ret))
92 return ret;
93
58b04406 94 return dma_common_mmap(dev, vma, cpu_addr, dma_addr, size, attrs);
878ec367
VM
95}
96
97
1c51c429
VM
98static void __dma_page_cpu_to_dev(phys_addr_t paddr, size_t size,
99 enum dma_data_direction dir)
100{
101 dmac_map_area(__va(paddr), size, dir);
102
103 if (dir == DMA_FROM_DEVICE)
104 outer_inv_range(paddr, paddr + size);
105 else
106 outer_clean_range(paddr, paddr + size);
107}
108
109static void __dma_page_dev_to_cpu(phys_addr_t paddr, size_t size,
110 enum dma_data_direction dir)
111{
112 if (dir != DMA_TO_DEVICE) {
113 outer_inv_range(paddr, paddr + size);
114 dmac_unmap_area(__va(paddr), size, dir);
115 }
116}
117
118static dma_addr_t arm_nommu_dma_map_page(struct device *dev, struct page *page,
119 unsigned long offset, size_t size,
120 enum dma_data_direction dir,
121 unsigned long attrs)
122{
123 dma_addr_t handle = page_to_phys(page) + offset;
124
125 __dma_page_cpu_to_dev(handle, size, dir);
126
127 return handle;
128}
129
130static void arm_nommu_dma_unmap_page(struct device *dev, dma_addr_t handle,
131 size_t size, enum dma_data_direction dir,
132 unsigned long attrs)
133{
134 __dma_page_dev_to_cpu(handle, size, dir);
135}
136
137
138static int arm_nommu_dma_map_sg(struct device *dev, struct scatterlist *sgl,
139 int nents, enum dma_data_direction dir,
140 unsigned long attrs)
141{
142 int i;
143 struct scatterlist *sg;
144
145 for_each_sg(sgl, sg, nents, i) {
146 sg_dma_address(sg) = sg_phys(sg);
147 sg_dma_len(sg) = sg->length;
148 __dma_page_cpu_to_dev(sg_dma_address(sg), sg_dma_len(sg), dir);
149 }
150
151 return nents;
152}
153
154static void arm_nommu_dma_unmap_sg(struct device *dev, struct scatterlist *sgl,
155 int nents, enum dma_data_direction dir,
156 unsigned long attrs)
157{
158 struct scatterlist *sg;
159 int i;
160
161 for_each_sg(sgl, sg, nents, i)
162 __dma_page_dev_to_cpu(sg_dma_address(sg), sg_dma_len(sg), dir);
163}
164
165static void arm_nommu_dma_sync_single_for_device(struct device *dev,
166 dma_addr_t handle, size_t size, enum dma_data_direction dir)
167{
168 __dma_page_cpu_to_dev(handle, size, dir);
169}
170
171static void arm_nommu_dma_sync_single_for_cpu(struct device *dev,
172 dma_addr_t handle, size_t size, enum dma_data_direction dir)
173{
174 __dma_page_cpu_to_dev(handle, size, dir);
175}
176
177static void arm_nommu_dma_sync_sg_for_device(struct device *dev, struct scatterlist *sgl,
178 int nents, enum dma_data_direction dir)
179{
180 struct scatterlist *sg;
181 int i;
182
183 for_each_sg(sgl, sg, nents, i)
184 __dma_page_cpu_to_dev(sg_dma_address(sg), sg_dma_len(sg), dir);
185}
186
187static void arm_nommu_dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sgl,
188 int nents, enum dma_data_direction dir)
189{
190 struct scatterlist *sg;
191 int i;
192
193 for_each_sg(sgl, sg, nents, i)
194 __dma_page_dev_to_cpu(sg_dma_address(sg), sg_dma_len(sg), dir);
195}
196
197const struct dma_map_ops arm_nommu_dma_ops = {
198 .alloc = arm_nommu_dma_alloc,
199 .free = arm_nommu_dma_free,
878ec367 200 .mmap = arm_nommu_dma_mmap,
1c51c429
VM
201 .map_page = arm_nommu_dma_map_page,
202 .unmap_page = arm_nommu_dma_unmap_page,
203 .map_sg = arm_nommu_dma_map_sg,
204 .unmap_sg = arm_nommu_dma_unmap_sg,
205 .sync_single_for_device = arm_nommu_dma_sync_single_for_device,
206 .sync_single_for_cpu = arm_nommu_dma_sync_single_for_cpu,
207 .sync_sg_for_device = arm_nommu_dma_sync_sg_for_device,
208 .sync_sg_for_cpu = arm_nommu_dma_sync_sg_for_cpu,
209};
210EXPORT_SYMBOL(arm_nommu_dma_ops);
211
212static const struct dma_map_ops *arm_nommu_get_dma_map_ops(bool coherent)
213{
002e6745 214 return coherent ? &dma_direct_ops : &arm_nommu_dma_ops;
1c51c429
VM
215}
216
217void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
218 const struct iommu_ops *iommu, bool coherent)
219{
220 const struct dma_map_ops *dma_ops;
221
222 if (IS_ENABLED(CONFIG_CPU_V7M)) {
223 /*
224 * Cache support for v7m is optional, so can be treated as
225 * coherent if no cache has been detected. Note that it is not
226 * enough to check if MPU is in use or not since in absense of
227 * MPU system memory map is used.
228 */
229 dev->archdata.dma_coherent = (cacheid) ? coherent : true;
230 } else {
231 /*
232 * Assume coherent DMA in case MMU/MPU has not been set up.
233 */
234 dev->archdata.dma_coherent = (get_cr() & CR_M) ? coherent : true;
235 }
236
237 dma_ops = arm_nommu_get_dma_map_ops(dev->archdata.dma_coherent);
238
239 set_dma_ops(dev, dma_ops);
240}