Commit | Line | Data |
---|---|---|
1da177e4 | 1 | /* |
12d04eef | 2 | * Copyright (C) 2006 Benjamin Herrenschmidt, IBM Corporation |
1da177e4 | 3 | * |
12d04eef | 4 | * Provide default implementations of the DMA mapping callbacks for |
8dd0e952 | 5 | * directly mapped busses. |
1da177e4 LT |
6 | */ |
7 | ||
8 | #include <linux/device.h> | |
9 | #include <linux/dma-mapping.h> | |
80d3e8ab | 10 | #include <linux/dma-debug.h> |
5a0e3ad6 | 11 | #include <linux/gfp.h> |
95f72d1e | 12 | #include <linux/memblock.h> |
66b15db6 | 13 | #include <linux/export.h> |
a9803497 AB |
14 | #include <linux/pci.h> |
15 | #include <asm/vio.h> | |
1da177e4 | 16 | #include <asm/bug.h> |
5b6e9ff6 | 17 | #include <asm/machdep.h> |
1da177e4 | 18 | |
12d04eef BH |
19 | /* |
20 | * Generic direct DMA implementation | |
92b20c40 | 21 | * |
31d1b493 ME |
22 | * This implementation supports a per-device offset that can be applied if |
23 | * the address at which memory is visible to devices is not 0. Platform code | |
24 | * can set archdata.dma_data to an unsigned long holding the offset. By | |
4fc665b8 | 25 | * default the offset is PCI_DRAM_OFFSET. |
12d04eef | 26 | */ |
5d33eebe | 27 | |
35e4a6e2 | 28 | |
4fc665b8 | 29 | void *dma_direct_alloc_coherent(struct device *dev, size_t size, |
bfbf7d61 AP |
30 | dma_addr_t *dma_handle, gfp_t flag, |
31 | struct dma_attrs *attrs) | |
12d04eef | 32 | { |
8aa26590 | 33 | void *ret; |
4fc665b8 | 34 | #ifdef CONFIG_NOT_COHERENT_CACHE |
8b31e49d | 35 | ret = __dma_alloc_coherent(dev, size, dma_handle, flag); |
8aa26590 BH |
36 | if (ret == NULL) |
37 | return NULL; | |
1cebd7a0 | 38 | *dma_handle += get_dma_offset(dev); |
8aa26590 | 39 | return ret; |
4fc665b8 | 40 | #else |
c80d9133 | 41 | struct page *page; |
8fae0353 | 42 | int node = dev_to_node(dev); |
12d04eef | 43 | |
4fc665b8 BB |
44 | /* ignore region specifiers */ |
45 | flag &= ~(__GFP_HIGHMEM); | |
46 | ||
c80d9133 BH |
47 | page = alloc_pages_node(node, flag, get_order(size)); |
48 | if (page == NULL) | |
49 | return NULL; | |
50 | ret = page_address(page); | |
51 | memset(ret, 0, size); | |
3d267523 | 52 | *dma_handle = __pa(ret) + get_dma_offset(dev); |
c80d9133 | 53 | |
12d04eef | 54 | return ret; |
4fc665b8 | 55 | #endif |
1da177e4 | 56 | } |
1da177e4 | 57 | |
4fc665b8 | 58 | void dma_direct_free_coherent(struct device *dev, size_t size, |
bfbf7d61 AP |
59 | void *vaddr, dma_addr_t dma_handle, |
60 | struct dma_attrs *attrs) | |
1da177e4 | 61 | { |
4fc665b8 BB |
62 | #ifdef CONFIG_NOT_COHERENT_CACHE |
63 | __dma_free_coherent(size, vaddr); | |
64 | #else | |
12d04eef | 65 | free_pages((unsigned long)vaddr, get_order(size)); |
4fc665b8 | 66 | #endif |
1da177e4 | 67 | } |
1da177e4 | 68 | |
64ccc9c0 MS |
69 | int dma_direct_mmap_coherent(struct device *dev, struct vm_area_struct *vma, |
70 | void *cpu_addr, dma_addr_t handle, size_t size, | |
71 | struct dma_attrs *attrs) | |
72 | { | |
73 | unsigned long pfn; | |
74 | ||
75 | #ifdef CONFIG_NOT_COHERENT_CACHE | |
76 | vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); | |
77 | pfn = __dma_get_coherent_pfn((unsigned long)cpu_addr); | |
78 | #else | |
79 | pfn = page_to_pfn(virt_to_page(cpu_addr)); | |
80 | #endif | |
81 | return remap_pfn_range(vma, vma->vm_start, | |
82 | pfn + vma->vm_pgoff, | |
83 | vma->vm_end - vma->vm_start, | |
84 | vma->vm_page_prot); | |
85 | } | |
86 | ||
78bdc310 | 87 | static int dma_direct_map_sg(struct device *dev, struct scatterlist *sgl, |
3affedc4 MN |
88 | int nents, enum dma_data_direction direction, |
89 | struct dma_attrs *attrs) | |
1da177e4 | 90 | { |
78bdc310 | 91 | struct scatterlist *sg; |
12d04eef | 92 | int i; |
1da177e4 | 93 | |
78bdc310 | 94 | for_each_sg(sgl, sg, nents, i) { |
1cebd7a0 | 95 | sg->dma_address = sg_phys(sg) + get_dma_offset(dev); |
12d04eef | 96 | sg->dma_length = sg->length; |
2434bbb3 | 97 | __dma_sync_page(sg_page(sg), sg->offset, sg->length, direction); |
12d04eef | 98 | } |
5d33eebe | 99 | |
12d04eef | 100 | return nents; |
1da177e4 | 101 | } |
1da177e4 | 102 | |
12d04eef | 103 | static void dma_direct_unmap_sg(struct device *dev, struct scatterlist *sg, |
3affedc4 MN |
104 | int nents, enum dma_data_direction direction, |
105 | struct dma_attrs *attrs) | |
1da177e4 | 106 | { |
12d04eef | 107 | } |
5d33eebe | 108 | |
12d04eef BH |
109 | static int dma_direct_dma_supported(struct device *dev, u64 mask) |
110 | { | |
4fc665b8 | 111 | #ifdef CONFIG_PPC64 |
b2f2e8fe | 112 | /* Could be improved so platforms can set the limit in case |
12d04eef BH |
113 | * they have limited DMA windows |
114 | */ | |
ffa56e55 | 115 | return mask >= get_dma_offset(dev) + (memblock_end_of_DRAM() - 1); |
4fc665b8 BB |
116 | #else |
117 | return 1; | |
118 | #endif | |
119 | } | |
120 | ||
d24f9c69 MM |
121 | static u64 dma_direct_get_required_mask(struct device *dev) |
122 | { | |
123 | u64 end, mask; | |
124 | ||
125 | end = memblock_end_of_DRAM() + get_dma_offset(dev); | |
126 | ||
127 | mask = 1ULL << (fls64(end) - 1); | |
128 | mask += mask - 1; | |
129 | ||
130 | return mask; | |
131 | } | |
132 | ||
4fc665b8 BB |
133 | static inline dma_addr_t dma_direct_map_page(struct device *dev, |
134 | struct page *page, | |
135 | unsigned long offset, | |
136 | size_t size, | |
137 | enum dma_data_direction dir, | |
138 | struct dma_attrs *attrs) | |
139 | { | |
140 | BUG_ON(dir == DMA_NONE); | |
141 | __dma_sync_page(page, offset, size, dir); | |
1cebd7a0 | 142 | return page_to_phys(page) + offset + get_dma_offset(dev); |
4fc665b8 BB |
143 | } |
144 | ||
145 | static inline void dma_direct_unmap_page(struct device *dev, | |
146 | dma_addr_t dma_address, | |
147 | size_t size, | |
148 | enum dma_data_direction direction, | |
149 | struct dma_attrs *attrs) | |
150 | { | |
1da177e4 | 151 | } |
12d04eef | 152 | |
15e09c0e BB |
153 | #ifdef CONFIG_NOT_COHERENT_CACHE |
154 | static inline void dma_direct_sync_sg(struct device *dev, | |
155 | struct scatterlist *sgl, int nents, | |
156 | enum dma_data_direction direction) | |
157 | { | |
158 | struct scatterlist *sg; | |
159 | int i; | |
160 | ||
161 | for_each_sg(sgl, sg, nents, i) | |
162 | __dma_sync_page(sg_page(sg), sg->offset, sg->length, direction); | |
163 | } | |
164 | ||
712d3e22 FT |
165 | static inline void dma_direct_sync_single(struct device *dev, |
166 | dma_addr_t dma_handle, size_t size, | |
167 | enum dma_data_direction direction) | |
15e09c0e | 168 | { |
712d3e22 | 169 | __dma_sync(bus_to_virt(dma_handle), size, direction); |
15e09c0e BB |
170 | } |
171 | #endif | |
172 | ||
45223c54 | 173 | struct dma_map_ops dma_direct_ops = { |
bfbf7d61 AP |
174 | .alloc = dma_direct_alloc_coherent, |
175 | .free = dma_direct_free_coherent, | |
64ccc9c0 | 176 | .mmap = dma_direct_mmap_coherent, |
2eccacd0 MM |
177 | .map_sg = dma_direct_map_sg, |
178 | .unmap_sg = dma_direct_unmap_sg, | |
179 | .dma_supported = dma_direct_dma_supported, | |
180 | .map_page = dma_direct_map_page, | |
181 | .unmap_page = dma_direct_unmap_page, | |
182 | .get_required_mask = dma_direct_get_required_mask, | |
15e09c0e | 183 | #ifdef CONFIG_NOT_COHERENT_CACHE |
712d3e22 FT |
184 | .sync_single_for_cpu = dma_direct_sync_single, |
185 | .sync_single_for_device = dma_direct_sync_single, | |
15e09c0e BB |
186 | .sync_sg_for_cpu = dma_direct_sync_sg, |
187 | .sync_sg_for_device = dma_direct_sync_sg, | |
188 | #endif | |
12d04eef BH |
189 | }; |
190 | EXPORT_SYMBOL(dma_direct_ops); | |
80d3e8ab FT |
191 | |
192 | #define PREALLOC_DMA_DEBUG_ENTRIES (1 << 16) | |
193 | ||
cd15b048 | 194 | int __dma_set_mask(struct device *dev, u64 dma_mask) |
5b6e9ff6 BH |
195 | { |
196 | struct dma_map_ops *dma_ops = get_dma_ops(dev); | |
197 | ||
6471fc66 | 198 | if ((dma_ops != NULL) && (dma_ops->set_dma_mask != NULL)) |
5b6e9ff6 BH |
199 | return dma_ops->set_dma_mask(dev, dma_mask); |
200 | if (!dev->dma_mask || !dma_supported(dev, dma_mask)) | |
201 | return -EIO; | |
202 | *dev->dma_mask = dma_mask; | |
203 | return 0; | |
204 | } | |
cd15b048 BH |
205 | int dma_set_mask(struct device *dev, u64 dma_mask) |
206 | { | |
207 | if (ppc_md.dma_set_mask) | |
208 | return ppc_md.dma_set_mask(dev, dma_mask); | |
209 | return __dma_set_mask(dev, dma_mask); | |
210 | } | |
5b6e9ff6 BH |
211 | EXPORT_SYMBOL(dma_set_mask); |
212 | ||
6a5c7be5 MM |
213 | u64 dma_get_required_mask(struct device *dev) |
214 | { | |
215 | struct dma_map_ops *dma_ops = get_dma_ops(dev); | |
6a5c7be5 MM |
216 | |
217 | if (ppc_md.dma_get_required_mask) | |
218 | return ppc_md.dma_get_required_mask(dev); | |
219 | ||
220 | if (unlikely(dma_ops == NULL)) | |
221 | return 0; | |
222 | ||
d24f9c69 MM |
223 | if (dma_ops->get_required_mask) |
224 | return dma_ops->get_required_mask(dev); | |
6a5c7be5 | 225 | |
d24f9c69 | 226 | return DMA_BIT_MASK(8 * sizeof(dma_addr_t)); |
6a5c7be5 MM |
227 | } |
228 | EXPORT_SYMBOL_GPL(dma_get_required_mask); | |
229 | ||
80d3e8ab FT |
230 | static int __init dma_init(void) |
231 | { | |
a9803497 AB |
232 | dma_debug_init(PREALLOC_DMA_DEBUG_ENTRIES); |
233 | #ifdef CONFIG_PCI | |
234 | dma_debug_add_bus(&pci_bus_type); | |
235 | #endif | |
236 | #ifdef CONFIG_IBMVIO | |
237 | dma_debug_add_bus(&vio_bus_type); | |
238 | #endif | |
80d3e8ab FT |
239 | |
240 | return 0; | |
241 | } | |
242 | fs_initcall(dma_init); | |
6090912c | 243 |