Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | #ifndef __ASM_SH_DMA_MAPPING_H |
2 | #define __ASM_SH_DMA_MAPPING_H | |
3 | ||
1da177e4 | 4 | #include <linux/mm.h> |
71df50a4 | 5 | #include <linux/scatterlist.h> |
0d831770 | 6 | #include <asm/cacheflush.h> |
1da177e4 LT |
7 | #include <asm/io.h> |
8 | ||
9 | extern struct bus_type pci_bus_type; | |
10 | ||
11 | /* arch/sh/mm/consistent.c */ | |
6dae2c23 | 12 | extern void *consistent_alloc(gfp_t gfp, size_t size, dma_addr_t *handle); |
1da177e4 LT |
13 | extern void consistent_free(void *vaddr, size_t size); |
14 | extern void consistent_sync(void *vaddr, size_t size, int direction); | |
15 | ||
16 | #define dma_supported(dev, mask) (1) | |
17 | ||
18 | static inline int dma_set_mask(struct device *dev, u64 mask) | |
19 | { | |
20 | if (!dev->dma_mask || !dma_supported(dev, mask)) | |
21 | return -EIO; | |
22 | ||
23 | *dev->dma_mask = mask; | |
24 | ||
25 | return 0; | |
26 | } | |
27 | ||
28 | static inline void *dma_alloc_coherent(struct device *dev, size_t size, | |
6dae2c23 | 29 | dma_addr_t *dma_handle, gfp_t flag) |
1da177e4 LT |
30 | { |
31 | if (sh_mv.mv_consistent_alloc) { | |
32 | void *ret; | |
33 | ||
34 | ret = sh_mv.mv_consistent_alloc(dev, size, dma_handle, flag); | |
35 | if (ret != NULL) | |
36 | return ret; | |
37 | } | |
38 | ||
39 | return consistent_alloc(flag, size, dma_handle); | |
40 | } | |
41 | ||
42 | static inline void dma_free_coherent(struct device *dev, size_t size, | |
43 | void *vaddr, dma_addr_t dma_handle) | |
44 | { | |
45 | if (sh_mv.mv_consistent_free) { | |
46 | int ret; | |
47 | ||
48 | ret = sh_mv.mv_consistent_free(dev, size, vaddr, dma_handle); | |
49 | if (ret == 0) | |
50 | return; | |
51 | } | |
52 | ||
53 | consistent_free(vaddr, size); | |
54 | } | |
55 | ||
c7666e72 PM |
56 | #define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f) |
57 | #define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h) | |
58 | #define dma_is_consistent(d, h) (1) | |
59 | ||
d3fa72e4 | 60 | static inline void dma_cache_sync(struct device *dev, void *vaddr, size_t size, |
1da177e4 LT |
61 | enum dma_data_direction dir) |
62 | { | |
63 | consistent_sync(vaddr, size, (int)dir); | |
64 | } | |
65 | ||
66 | static inline dma_addr_t dma_map_single(struct device *dev, | |
67 | void *ptr, size_t size, | |
68 | enum dma_data_direction dir) | |
69 | { | |
70 | #if defined(CONFIG_PCI) && !defined(CONFIG_SH_PCIDMA_NONCOHERENT) | |
71 | if (dev->bus == &pci_bus_type) | |
e257ad06 | 72 | return virt_to_phys(ptr); |
1da177e4 | 73 | #endif |
54321434 | 74 | dma_cache_sync(dev, ptr, size, dir); |
1da177e4 | 75 | |
e257ad06 | 76 | return virt_to_phys(ptr); |
1da177e4 LT |
77 | } |
78 | ||
79 | #define dma_unmap_single(dev, addr, size, dir) do { } while (0) | |
80 | ||
81 | static inline int dma_map_sg(struct device *dev, struct scatterlist *sg, | |
82 | int nents, enum dma_data_direction dir) | |
83 | { | |
84 | int i; | |
85 | ||
86 | for (i = 0; i < nents; i++) { | |
87 | #if !defined(CONFIG_PCI) || defined(CONFIG_SH_PCIDMA_NONCOHERENT) | |
71df50a4 | 88 | dma_cache_sync(dev, sg_virt(&sg[i]), sg[i].length, dir); |
1da177e4 | 89 | #endif |
71df50a4 | 90 | sg[i].dma_address = sg_phys(&sg[i]); |
1da177e4 LT |
91 | } |
92 | ||
93 | return nents; | |
94 | } | |
95 | ||
96 | #define dma_unmap_sg(dev, sg, nents, dir) do { } while (0) | |
97 | ||
98 | static inline dma_addr_t dma_map_page(struct device *dev, struct page *page, | |
99 | unsigned long offset, size_t size, | |
100 | enum dma_data_direction dir) | |
101 | { | |
102 | return dma_map_single(dev, page_address(page) + offset, size, dir); | |
103 | } | |
104 | ||
105 | static inline void dma_unmap_page(struct device *dev, dma_addr_t dma_address, | |
106 | size_t size, enum dma_data_direction dir) | |
107 | { | |
108 | dma_unmap_single(dev, dma_address, size, dir); | |
109 | } | |
110 | ||
111 | static inline void dma_sync_single(struct device *dev, dma_addr_t dma_handle, | |
112 | size_t size, enum dma_data_direction dir) | |
113 | { | |
114 | #if defined(CONFIG_PCI) && !defined(CONFIG_SH_PCIDMA_NONCOHERENT) | |
115 | if (dev->bus == &pci_bus_type) | |
116 | return; | |
117 | #endif | |
e257ad06 | 118 | dma_cache_sync(dev, phys_to_virt(dma_handle), size, dir); |
1da177e4 LT |
119 | } |
120 | ||
121 | static inline void dma_sync_single_range(struct device *dev, | |
122 | dma_addr_t dma_handle, | |
123 | unsigned long offset, size_t size, | |
124 | enum dma_data_direction dir) | |
125 | { | |
126 | #if defined(CONFIG_PCI) && !defined(CONFIG_SH_PCIDMA_NONCOHERENT) | |
127 | if (dev->bus == &pci_bus_type) | |
128 | return; | |
129 | #endif | |
e257ad06 | 130 | dma_cache_sync(dev, phys_to_virt(dma_handle) + offset, size, dir); |
1da177e4 LT |
131 | } |
132 | ||
133 | static inline void dma_sync_sg(struct device *dev, struct scatterlist *sg, | |
134 | int nelems, enum dma_data_direction dir) | |
135 | { | |
136 | int i; | |
137 | ||
138 | for (i = 0; i < nelems; i++) { | |
139 | #if !defined(CONFIG_PCI) || defined(CONFIG_SH_PCIDMA_NONCOHERENT) | |
71df50a4 | 140 | dma_cache_sync(dev, sg_virt(&sg[i]), sg[i].length, dir); |
1da177e4 | 141 | #endif |
71df50a4 | 142 | sg[i].dma_address = sg_phys(&sg[i]); |
1da177e4 LT |
143 | } |
144 | } | |
145 | ||
87b0ef91 PM |
146 | static inline void dma_sync_single_for_cpu(struct device *dev, |
147 | dma_addr_t dma_handle, size_t size, | |
148 | enum dma_data_direction dir) | |
149 | { | |
150 | dma_sync_single(dev, dma_handle, size, dir); | |
151 | } | |
152 | ||
153 | static inline void dma_sync_single_for_device(struct device *dev, | |
154 | dma_addr_t dma_handle, | |
155 | size_t size, | |
156 | enum dma_data_direction dir) | |
157 | { | |
158 | dma_sync_single(dev, dma_handle, size, dir); | |
159 | } | |
1da177e4 | 160 | |
32239264 PM |
161 | static inline void dma_sync_single_range_for_cpu(struct device *dev, |
162 | dma_addr_t dma_handle, | |
163 | unsigned long offset, | |
164 | size_t size, | |
165 | enum dma_data_direction direction) | |
166 | { | |
167 | dma_sync_single_for_cpu(dev, dma_handle+offset, size, direction); | |
168 | } | |
169 | ||
170 | static inline void dma_sync_single_range_for_device(struct device *dev, | |
171 | dma_addr_t dma_handle, | |
172 | unsigned long offset, | |
173 | size_t size, | |
174 | enum dma_data_direction direction) | |
175 | { | |
176 | dma_sync_single_for_device(dev, dma_handle+offset, size, direction); | |
177 | } | |
178 | ||
179 | ||
87b0ef91 PM |
180 | static inline void dma_sync_sg_for_cpu(struct device *dev, |
181 | struct scatterlist *sg, int nelems, | |
0d831770 | 182 | enum dma_data_direction dir) |
87b0ef91 PM |
183 | { |
184 | dma_sync_sg(dev, sg, nelems, dir); | |
185 | } | |
1da177e4 | 186 | |
87b0ef91 PM |
187 | static inline void dma_sync_sg_for_device(struct device *dev, |
188 | struct scatterlist *sg, int nelems, | |
189 | enum dma_data_direction dir) | |
190 | { | |
191 | dma_sync_sg(dev, sg, nelems, dir); | |
192 | } | |
1da177e4 | 193 | |
1da177e4 LT |
194 | |
195 | static inline int dma_get_cache_alignment(void) | |
196 | { | |
197 | /* | |
198 | * Each processor family will define its own L1_CACHE_SHIFT, | |
199 | * L1_CACHE_BYTES wraps to this, so this is always safe. | |
200 | */ | |
201 | return L1_CACHE_BYTES; | |
202 | } | |
203 | ||
204 | static inline int dma_mapping_error(dma_addr_t dma_addr) | |
205 | { | |
206 | return dma_addr == 0; | |
207 | } | |
1da177e4 | 208 | #endif /* __ASM_SH_DMA_MAPPING_H */ |