Commit | Line | Data |
---|---|---|
0a0f0d8b CH |
1 | /* SPDX-License-Identifier: GPL-2.0 */ |
2 | /* | |
3 | * This header is for implementations of dma_map_ops and related code. | |
4 | * It should not be included in drivers just using the DMA API. | |
5 | */ | |
6 | #ifndef _LINUX_DMA_MAP_OPS_H | |
7 | #define _LINUX_DMA_MAP_OPS_H | |
8 | ||
9 | #include <linux/dma-mapping.h> | |
9f4df96b | 10 | #include <linux/pgtable.h> |
0a0f0d8b | 11 | |
0b1abd1f CH |
12 | struct cma; |
13 | ||
159bf192 LG |
14 | /* |
15 | * Values for struct dma_map_ops.flags: | |
16 | * | |
17 | * DMA_F_PCI_P2PDMA_SUPPORTED: Indicates the dma_map_ops implementation can | |
18 | * handle PCI P2PDMA pages in the map_sg/unmap_sg operation. | |
19 | */ | |
20 | #define DMA_F_PCI_P2PDMA_SUPPORTED (1 << 0) | |
21 | ||
0a0f0d8b | 22 | struct dma_map_ops { |
159bf192 LG |
23 | unsigned int flags; |
24 | ||
0a0f0d8b CH |
25 | void *(*alloc)(struct device *dev, size_t size, |
26 | dma_addr_t *dma_handle, gfp_t gfp, | |
27 | unsigned long attrs); | |
28 | void (*free)(struct device *dev, size_t size, void *vaddr, | |
29 | dma_addr_t dma_handle, unsigned long attrs); | |
30 | struct page *(*alloc_pages)(struct device *dev, size_t size, | |
31 | dma_addr_t *dma_handle, enum dma_data_direction dir, | |
32 | gfp_t gfp); | |
33 | void (*free_pages)(struct device *dev, size_t size, struct page *vaddr, | |
34 | dma_addr_t dma_handle, enum dma_data_direction dir); | |
7d5b5738 CH |
35 | struct sg_table *(*alloc_noncontiguous)(struct device *dev, size_t size, |
36 | enum dma_data_direction dir, gfp_t gfp, | |
37 | unsigned long attrs); | |
38 | void (*free_noncontiguous)(struct device *dev, size_t size, | |
39 | struct sg_table *sgt, enum dma_data_direction dir); | |
0a0f0d8b CH |
40 | int (*mmap)(struct device *, struct vm_area_struct *, |
41 | void *, dma_addr_t, size_t, unsigned long attrs); | |
42 | ||
43 | int (*get_sgtable)(struct device *dev, struct sg_table *sgt, | |
44 | void *cpu_addr, dma_addr_t dma_addr, size_t size, | |
45 | unsigned long attrs); | |
46 | ||
47 | dma_addr_t (*map_page)(struct device *dev, struct page *page, | |
48 | unsigned long offset, size_t size, | |
49 | enum dma_data_direction dir, unsigned long attrs); | |
50 | void (*unmap_page)(struct device *dev, dma_addr_t dma_handle, | |
51 | size_t size, enum dma_data_direction dir, | |
52 | unsigned long attrs); | |
53 | /* | |
fffe3cc8 LG |
54 | * map_sg should return a negative error code on error. See |
55 | * dma_map_sgtable() for a list of appropriate error codes | |
56 | * and their meanings. | |
0a0f0d8b CH |
57 | */ |
58 | int (*map_sg)(struct device *dev, struct scatterlist *sg, int nents, | |
59 | enum dma_data_direction dir, unsigned long attrs); | |
60 | void (*unmap_sg)(struct device *dev, struct scatterlist *sg, int nents, | |
61 | enum dma_data_direction dir, unsigned long attrs); | |
62 | dma_addr_t (*map_resource)(struct device *dev, phys_addr_t phys_addr, | |
63 | size_t size, enum dma_data_direction dir, | |
64 | unsigned long attrs); | |
65 | void (*unmap_resource)(struct device *dev, dma_addr_t dma_handle, | |
66 | size_t size, enum dma_data_direction dir, | |
67 | unsigned long attrs); | |
68 | void (*sync_single_for_cpu)(struct device *dev, dma_addr_t dma_handle, | |
69 | size_t size, enum dma_data_direction dir); | |
70 | void (*sync_single_for_device)(struct device *dev, | |
71 | dma_addr_t dma_handle, size_t size, | |
72 | enum dma_data_direction dir); | |
73 | void (*sync_sg_for_cpu)(struct device *dev, struct scatterlist *sg, | |
74 | int nents, enum dma_data_direction dir); | |
75 | void (*sync_sg_for_device)(struct device *dev, struct scatterlist *sg, | |
76 | int nents, enum dma_data_direction dir); | |
77 | void (*cache_sync)(struct device *dev, void *vaddr, size_t size, | |
78 | enum dma_data_direction direction); | |
79 | int (*dma_supported)(struct device *dev, u64 mask); | |
80 | u64 (*get_required_mask)(struct device *dev); | |
81 | size_t (*max_mapping_size)(struct device *dev); | |
a229cc14 | 82 | size_t (*opt_mapping_size)(void); |
0a0f0d8b CH |
83 | unsigned long (*get_merge_boundary)(struct device *dev); |
84 | }; | |
85 | ||
86 | #ifdef CONFIG_DMA_OPS | |
87 | #include <asm/dma-mapping.h> | |
88 | ||
89 | static inline const struct dma_map_ops *get_dma_ops(struct device *dev) | |
90 | { | |
91 | if (dev->dma_ops) | |
92 | return dev->dma_ops; | |
ade1229c | 93 | return get_arch_dma_ops(); |
0a0f0d8b CH |
94 | } |
95 | ||
96 | static inline void set_dma_ops(struct device *dev, | |
97 | const struct dma_map_ops *dma_ops) | |
98 | { | |
99 | dev->dma_ops = dma_ops; | |
100 | } | |
101 | #else /* CONFIG_DMA_OPS */ | |
102 | static inline const struct dma_map_ops *get_dma_ops(struct device *dev) | |
103 | { | |
104 | return NULL; | |
105 | } | |
106 | static inline void set_dma_ops(struct device *dev, | |
107 | const struct dma_map_ops *dma_ops) | |
108 | { | |
109 | } | |
110 | #endif /* CONFIG_DMA_OPS */ | |
111 | ||
0b1abd1f CH |
112 | #ifdef CONFIG_DMA_CMA |
113 | extern struct cma *dma_contiguous_default_area; | |
114 | ||
115 | static inline struct cma *dev_get_cma_area(struct device *dev) | |
116 | { | |
117 | if (dev && dev->cma_area) | |
118 | return dev->cma_area; | |
119 | return dma_contiguous_default_area; | |
120 | } | |
121 | ||
122 | void dma_contiguous_reserve(phys_addr_t addr_limit); | |
123 | int __init dma_contiguous_reserve_area(phys_addr_t size, phys_addr_t base, | |
124 | phys_addr_t limit, struct cma **res_cma, bool fixed); | |
125 | ||
126 | struct page *dma_alloc_from_contiguous(struct device *dev, size_t count, | |
127 | unsigned int order, bool no_warn); | |
128 | bool dma_release_from_contiguous(struct device *dev, struct page *pages, | |
129 | int count); | |
130 | struct page *dma_alloc_contiguous(struct device *dev, size_t size, gfp_t gfp); | |
131 | void dma_free_contiguous(struct device *dev, struct page *page, size_t size); | |
5db5d930 CH |
132 | |
133 | void dma_contiguous_early_fixup(phys_addr_t base, unsigned long size); | |
0b1abd1f CH |
134 | #else /* CONFIG_DMA_CMA */ |
135 | static inline struct cma *dev_get_cma_area(struct device *dev) | |
136 | { | |
137 | return NULL; | |
138 | } | |
139 | static inline void dma_contiguous_reserve(phys_addr_t limit) | |
140 | { | |
141 | } | |
142 | static inline int dma_contiguous_reserve_area(phys_addr_t size, | |
143 | phys_addr_t base, phys_addr_t limit, struct cma **res_cma, | |
144 | bool fixed) | |
145 | { | |
146 | return -ENOSYS; | |
147 | } | |
148 | static inline struct page *dma_alloc_from_contiguous(struct device *dev, | |
149 | size_t count, unsigned int order, bool no_warn) | |
150 | { | |
151 | return NULL; | |
152 | } | |
153 | static inline bool dma_release_from_contiguous(struct device *dev, | |
154 | struct page *pages, int count) | |
155 | { | |
156 | return false; | |
157 | } | |
158 | /* Use fallback alloc() and free() when CONFIG_DMA_CMA=n */ | |
159 | static inline struct page *dma_alloc_contiguous(struct device *dev, size_t size, | |
160 | gfp_t gfp) | |
161 | { | |
162 | return NULL; | |
163 | } | |
164 | static inline void dma_free_contiguous(struct device *dev, struct page *page, | |
165 | size_t size) | |
166 | { | |
167 | __free_pages(page, get_order(size)); | |
168 | } | |
169 | #endif /* CONFIG_DMA_CMA*/ | |
170 | ||
171 | #ifdef CONFIG_DMA_PERNUMA_CMA | |
172 | void dma_pernuma_cma_reserve(void); | |
173 | #else | |
174 | static inline void dma_pernuma_cma_reserve(void) { } | |
175 | #endif /* CONFIG_DMA_PERNUMA_CMA */ | |
176 | ||
0a0f0d8b CH |
177 | #ifdef CONFIG_DMA_DECLARE_COHERENT |
178 | int dma_declare_coherent_memory(struct device *dev, phys_addr_t phys_addr, | |
179 | dma_addr_t device_addr, size_t size); | |
e61c4514 | 180 | void dma_release_coherent_memory(struct device *dev); |
0a0f0d8b CH |
181 | int dma_alloc_from_dev_coherent(struct device *dev, ssize_t size, |
182 | dma_addr_t *dma_handle, void **ret); | |
183 | int dma_release_from_dev_coherent(struct device *dev, int order, void *vaddr); | |
184 | int dma_mmap_from_dev_coherent(struct device *dev, struct vm_area_struct *vma, | |
185 | void *cpu_addr, size_t size, int *ret); | |
0a0f0d8b CH |
186 | #else |
187 | static inline int dma_declare_coherent_memory(struct device *dev, | |
188 | phys_addr_t phys_addr, dma_addr_t device_addr, size_t size) | |
189 | { | |
190 | return -ENOSYS; | |
191 | } | |
e61c4514 | 192 | |
0a0f0d8b CH |
193 | #define dma_alloc_from_dev_coherent(dev, size, handle, ret) (0) |
194 | #define dma_release_from_dev_coherent(dev, order, vaddr) (0) | |
195 | #define dma_mmap_from_dev_coherent(dev, vma, vaddr, order, ret) (0) | |
50d6281c | 196 | static inline void dma_release_coherent_memory(struct device *dev) { } |
22f9feb4 | 197 | #endif /* CONFIG_DMA_DECLARE_COHERENT */ |
0a0f0d8b | 198 | |
22f9feb4 CH |
199 | #ifdef CONFIG_DMA_GLOBAL_POOL |
200 | void *dma_alloc_from_global_coherent(struct device *dev, ssize_t size, | |
201 | dma_addr_t *dma_handle); | |
202 | int dma_release_from_global_coherent(int order, void *vaddr); | |
203 | int dma_mmap_from_global_coherent(struct vm_area_struct *vma, void *cpu_addr, | |
204 | size_t size, int *ret); | |
205 | int dma_init_global_coherent(phys_addr_t phys_addr, size_t size); | |
206 | #else | |
0a0f0d8b CH |
207 | static inline void *dma_alloc_from_global_coherent(struct device *dev, |
208 | ssize_t size, dma_addr_t *dma_handle) | |
209 | { | |
210 | return NULL; | |
211 | } | |
212 | static inline int dma_release_from_global_coherent(int order, void *vaddr) | |
213 | { | |
214 | return 0; | |
215 | } | |
216 | static inline int dma_mmap_from_global_coherent(struct vm_area_struct *vma, | |
217 | void *cpu_addr, size_t size, int *ret) | |
218 | { | |
219 | return 0; | |
220 | } | |
22f9feb4 | 221 | #endif /* CONFIG_DMA_GLOBAL_POOL */ |
0a0f0d8b | 222 | |
7d5b5738 CH |
223 | /* |
224 | * This is the actual return value from the ->alloc_noncontiguous method. | |
225 | * The users of the DMA API should only care about the sg_table, but to make | |
226 | * the DMA-API internal vmaping and freeing easier we stash away the page | |
227 | * array as well (except for the fallback case). This can go away any time, | |
228 | * e.g. when a vmap-variant that takes a scatterlist comes along. | |
229 | */ | |
230 | struct dma_sgt_handle { | |
231 | struct sg_table sgt; | |
232 | struct page **pages; | |
233 | }; | |
234 | #define sgt_handle(sgt) \ | |
235 | container_of((sgt), struct dma_sgt_handle, sgt) | |
236 | ||
695cebe5 CH |
237 | int dma_common_get_sgtable(struct device *dev, struct sg_table *sgt, |
238 | void *cpu_addr, dma_addr_t dma_addr, size_t size, | |
239 | unsigned long attrs); | |
240 | int dma_common_mmap(struct device *dev, struct vm_area_struct *vma, | |
241 | void *cpu_addr, dma_addr_t dma_addr, size_t size, | |
242 | unsigned long attrs); | |
243 | struct page *dma_common_alloc_pages(struct device *dev, size_t size, | |
244 | dma_addr_t *dma_handle, enum dma_data_direction dir, gfp_t gfp); | |
245 | void dma_common_free_pages(struct device *dev, size_t size, struct page *vaddr, | |
246 | dma_addr_t dma_handle, enum dma_data_direction dir); | |
247 | ||
248 | struct page **dma_common_find_pages(void *cpu_addr); | |
249 | void *dma_common_contiguous_remap(struct page *page, size_t size, pgprot_t prot, | |
250 | const void *caller); | |
251 | void *dma_common_pages_remap(struct page **pages, size_t size, pgprot_t prot, | |
252 | const void *caller); | |
253 | void dma_common_free_remap(void *cpu_addr, size_t size); | |
254 | ||
255 | struct page *dma_alloc_from_pool(struct device *dev, size_t size, | |
256 | void **cpu_addr, gfp_t flags, | |
257 | bool (*phys_addr_ok)(struct device *, phys_addr_t, size_t)); | |
258 | bool dma_free_from_pool(struct device *dev, void *start, size_t size); | |
259 | ||
16fee29b CH |
260 | int dma_direct_set_offset(struct device *dev, phys_addr_t cpu_start, |
261 | dma_addr_t dma_start, u64 size); | |
262 | ||
6d4e9a8e | 263 | #if defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_DEVICE) || \ |
9f4df96b CH |
264 | defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU) || \ |
265 | defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU_ALL) | |
6d4e9a8e | 266 | extern bool dma_default_coherent; |
9f4df96b CH |
267 | static inline bool dev_is_dma_coherent(struct device *dev) |
268 | { | |
269 | return dev->dma_coherent; | |
270 | } | |
271 | #else | |
fe4e5efa JY |
272 | #define dma_default_coherent true |
273 | ||
9f4df96b CH |
274 | static inline bool dev_is_dma_coherent(struct device *dev) |
275 | { | |
276 | return true; | |
277 | } | |
278 | #endif /* CONFIG_ARCH_HAS_DMA_COHERENCE_H */ | |
279 | ||
9f4df96b CH |
280 | void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle, |
281 | gfp_t gfp, unsigned long attrs); | |
282 | void arch_dma_free(struct device *dev, size_t size, void *cpu_addr, | |
283 | dma_addr_t dma_addr, unsigned long attrs); | |
284 | ||
285 | #ifdef CONFIG_MMU | |
286 | /* | |
287 | * Page protection so that devices that can't snoop CPU caches can use the | |
288 | * memory coherently. We default to pgprot_noncached which is usually used | |
289 | * for ioremap as a safe bet, but architectures can override this with less | |
290 | * strict semantics if possible. | |
291 | */ | |
292 | #ifndef pgprot_dmacoherent | |
293 | #define pgprot_dmacoherent(prot) pgprot_noncached(prot) | |
294 | #endif | |
295 | ||
296 | pgprot_t dma_pgprot(struct device *dev, pgprot_t prot, unsigned long attrs); | |
297 | #else | |
298 | static inline pgprot_t dma_pgprot(struct device *dev, pgprot_t prot, | |
299 | unsigned long attrs) | |
300 | { | |
301 | return prot; /* no protection bits supported without page tables */ | |
302 | } | |
303 | #endif /* CONFIG_MMU */ | |
304 | ||
305 | #ifdef CONFIG_ARCH_HAS_SYNC_DMA_FOR_DEVICE | |
306 | void arch_sync_dma_for_device(phys_addr_t paddr, size_t size, | |
307 | enum dma_data_direction dir); | |
308 | #else | |
309 | static inline void arch_sync_dma_for_device(phys_addr_t paddr, size_t size, | |
310 | enum dma_data_direction dir) | |
311 | { | |
312 | } | |
313 | #endif /* ARCH_HAS_SYNC_DMA_FOR_DEVICE */ | |
314 | ||
315 | #ifdef CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU | |
316 | void arch_sync_dma_for_cpu(phys_addr_t paddr, size_t size, | |
317 | enum dma_data_direction dir); | |
318 | #else | |
319 | static inline void arch_sync_dma_for_cpu(phys_addr_t paddr, size_t size, | |
320 | enum dma_data_direction dir) | |
321 | { | |
322 | } | |
323 | #endif /* ARCH_HAS_SYNC_DMA_FOR_CPU */ | |
324 | ||
325 | #ifdef CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU_ALL | |
326 | void arch_sync_dma_for_cpu_all(void); | |
327 | #else | |
328 | static inline void arch_sync_dma_for_cpu_all(void) | |
329 | { | |
330 | } | |
331 | #endif /* CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU_ALL */ | |
332 | ||
333 | #ifdef CONFIG_ARCH_HAS_DMA_PREP_COHERENT | |
334 | void arch_dma_prep_coherent(struct page *page, size_t size); | |
335 | #else | |
336 | static inline void arch_dma_prep_coherent(struct page *page, size_t size) | |
337 | { | |
338 | } | |
339 | #endif /* CONFIG_ARCH_HAS_DMA_PREP_COHERENT */ | |
340 | ||
341 | #ifdef CONFIG_ARCH_HAS_DMA_MARK_CLEAN | |
342 | void arch_dma_mark_clean(phys_addr_t paddr, size_t size); | |
343 | #else | |
344 | static inline void arch_dma_mark_clean(phys_addr_t paddr, size_t size) | |
345 | { | |
346 | } | |
347 | #endif /* ARCH_HAS_DMA_MARK_CLEAN */ | |
348 | ||
349 | void *arch_dma_set_uncached(void *addr, size_t size); | |
350 | void arch_dma_clear_uncached(void *addr, size_t size); | |
351 | ||
8d8d53cf AK |
352 | #ifdef CONFIG_ARCH_HAS_DMA_MAP_DIRECT |
353 | bool arch_dma_map_page_direct(struct device *dev, phys_addr_t addr); | |
354 | bool arch_dma_unmap_page_direct(struct device *dev, dma_addr_t dma_handle); | |
355 | bool arch_dma_map_sg_direct(struct device *dev, struct scatterlist *sg, | |
356 | int nents); | |
357 | bool arch_dma_unmap_sg_direct(struct device *dev, struct scatterlist *sg, | |
358 | int nents); | |
359 | #else | |
360 | #define arch_dma_map_page_direct(d, a) (false) | |
361 | #define arch_dma_unmap_page_direct(d, a) (false) | |
362 | #define arch_dma_map_sg_direct(d, s, n) (false) | |
363 | #define arch_dma_unmap_sg_direct(d, s, n) (false) | |
364 | #endif | |
365 | ||
0a0f0d8b CH |
366 | #ifdef CONFIG_ARCH_HAS_SETUP_DMA_OPS |
367 | void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size, | |
368 | const struct iommu_ops *iommu, bool coherent); | |
369 | #else | |
370 | static inline void arch_setup_dma_ops(struct device *dev, u64 dma_base, | |
371 | u64 size, const struct iommu_ops *iommu, bool coherent) | |
372 | { | |
373 | } | |
374 | #endif /* CONFIG_ARCH_HAS_SETUP_DMA_OPS */ | |
375 | ||
376 | #ifdef CONFIG_ARCH_HAS_TEARDOWN_DMA_OPS | |
377 | void arch_teardown_dma_ops(struct device *dev); | |
378 | #else | |
379 | static inline void arch_teardown_dma_ops(struct device *dev) | |
380 | { | |
381 | } | |
382 | #endif /* CONFIG_ARCH_HAS_TEARDOWN_DMA_OPS */ | |
383 | ||
a1fd09e8 CH |
384 | #ifdef CONFIG_DMA_API_DEBUG |
385 | void dma_debug_add_bus(struct bus_type *bus); | |
386 | void debug_dma_dump_mappings(struct device *dev); | |
387 | #else | |
388 | static inline void dma_debug_add_bus(struct bus_type *bus) | |
389 | { | |
390 | } | |
391 | static inline void debug_dma_dump_mappings(struct device *dev) | |
392 | { | |
393 | } | |
394 | #endif /* CONFIG_DMA_API_DEBUG */ | |
395 | ||
0a0f0d8b CH |
396 | extern const struct dma_map_ops dma_dummy_ops; |
397 | ||
5e180ff3 LG |
398 | enum pci_p2pdma_map_type { |
399 | /* | |
400 | * PCI_P2PDMA_MAP_UNKNOWN: Used internally for indicating the mapping | |
401 | * type hasn't been calculated yet. Functions that return this enum | |
402 | * never return this value. | |
403 | */ | |
404 | PCI_P2PDMA_MAP_UNKNOWN = 0, | |
405 | ||
406 | /* | |
407 | * PCI_P2PDMA_MAP_NOT_SUPPORTED: Indicates the transaction will | |
408 | * traverse the host bridge and the host bridge is not in the | |
409 | * allowlist. DMA Mapping routines should return an error when | |
410 | * this is returned. | |
411 | */ | |
412 | PCI_P2PDMA_MAP_NOT_SUPPORTED, | |
413 | ||
414 | /* | |
415 | * PCI_P2PDMA_BUS_ADDR: Indicates that two devices can talk to | |
416 | * each other directly through a PCI switch and the transaction will | |
417 | * not traverse the host bridge. Such a mapping should program | |
418 | * the DMA engine with PCI bus addresses. | |
419 | */ | |
420 | PCI_P2PDMA_MAP_BUS_ADDR, | |
421 | ||
422 | /* | |
423 | * PCI_P2PDMA_MAP_THRU_HOST_BRIDGE: Indicates two devices can talk | |
424 | * to each other, but the transaction traverses a host bridge on the | |
425 | * allowlist. In this case, a normal mapping either with CPU physical | |
426 | * addresses (in the case of dma-direct) or IOVA addresses (in the | |
427 | * case of IOMMUs) should be used to program the DMA engine. | |
428 | */ | |
429 | PCI_P2PDMA_MAP_THRU_HOST_BRIDGE, | |
430 | }; | |
431 | ||
432 | struct pci_p2pdma_map_state { | |
433 | struct dev_pagemap *pgmap; | |
434 | int map; | |
435 | u64 bus_off; | |
436 | }; | |
437 | ||
438 | #ifdef CONFIG_PCI_P2PDMA | |
439 | enum pci_p2pdma_map_type | |
440 | pci_p2pdma_map_segment(struct pci_p2pdma_map_state *state, struct device *dev, | |
441 | struct scatterlist *sg); | |
442 | #else /* CONFIG_PCI_P2PDMA */ | |
443 | static inline enum pci_p2pdma_map_type | |
444 | pci_p2pdma_map_segment(struct pci_p2pdma_map_state *state, struct device *dev, | |
445 | struct scatterlist *sg) | |
446 | { | |
447 | return PCI_P2PDMA_MAP_NOT_SUPPORTED; | |
448 | } | |
449 | #endif /* CONFIG_PCI_P2PDMA */ | |
450 | ||
0a0f0d8b | 451 | #endif /* _LINUX_DMA_MAP_OPS_H */ |