Commit | Line | Data |
---|---|---|
0a0f0d8b CH |
1 | /* SPDX-License-Identifier: GPL-2.0 */ |
2 | /* | |
3 | * This header is for implementations of dma_map_ops and related code. | |
4 | * It should not be included in drivers just using the DMA API. | |
5 | */ | |
6 | #ifndef _LINUX_DMA_MAP_OPS_H | |
7 | #define _LINUX_DMA_MAP_OPS_H | |
8 | ||
9 | #include <linux/dma-mapping.h> | |
9f4df96b | 10 | #include <linux/pgtable.h> |
370645f4 | 11 | #include <linux/slab.h> |
0a0f0d8b | 12 | |
0b1abd1f | 13 | struct cma; |
17de3f5f | 14 | struct iommu_ops; |
0b1abd1f | 15 | |
0a0f0d8b CH |
16 | struct dma_map_ops { |
17 | void *(*alloc)(struct device *dev, size_t size, | |
18 | dma_addr_t *dma_handle, gfp_t gfp, | |
19 | unsigned long attrs); | |
20 | void (*free)(struct device *dev, size_t size, void *vaddr, | |
21 | dma_addr_t dma_handle, unsigned long attrs); | |
8a2f1187 | 22 | struct page *(*alloc_pages_op)(struct device *dev, size_t size, |
0a0f0d8b CH |
23 | dma_addr_t *dma_handle, enum dma_data_direction dir, |
24 | gfp_t gfp); | |
25 | void (*free_pages)(struct device *dev, size_t size, struct page *vaddr, | |
26 | dma_addr_t dma_handle, enum dma_data_direction dir); | |
7d5b5738 CH |
27 | struct sg_table *(*alloc_noncontiguous)(struct device *dev, size_t size, |
28 | enum dma_data_direction dir, gfp_t gfp, | |
29 | unsigned long attrs); | |
30 | void (*free_noncontiguous)(struct device *dev, size_t size, | |
31 | struct sg_table *sgt, enum dma_data_direction dir); | |
0a0f0d8b CH |
32 | int (*mmap)(struct device *, struct vm_area_struct *, |
33 | void *, dma_addr_t, size_t, unsigned long attrs); | |
34 | ||
35 | int (*get_sgtable)(struct device *dev, struct sg_table *sgt, | |
36 | void *cpu_addr, dma_addr_t dma_addr, size_t size, | |
37 | unsigned long attrs); | |
38 | ||
39 | dma_addr_t (*map_page)(struct device *dev, struct page *page, | |
40 | unsigned long offset, size_t size, | |
41 | enum dma_data_direction dir, unsigned long attrs); | |
42 | void (*unmap_page)(struct device *dev, dma_addr_t dma_handle, | |
43 | size_t size, enum dma_data_direction dir, | |
44 | unsigned long attrs); | |
45 | /* | |
fffe3cc8 LG |
46 | * map_sg should return a negative error code on error. See |
47 | * dma_map_sgtable() for a list of appropriate error codes | |
48 | * and their meanings. | |
0a0f0d8b CH |
49 | */ |
50 | int (*map_sg)(struct device *dev, struct scatterlist *sg, int nents, | |
51 | enum dma_data_direction dir, unsigned long attrs); | |
52 | void (*unmap_sg)(struct device *dev, struct scatterlist *sg, int nents, | |
53 | enum dma_data_direction dir, unsigned long attrs); | |
54 | dma_addr_t (*map_resource)(struct device *dev, phys_addr_t phys_addr, | |
55 | size_t size, enum dma_data_direction dir, | |
56 | unsigned long attrs); | |
57 | void (*unmap_resource)(struct device *dev, dma_addr_t dma_handle, | |
58 | size_t size, enum dma_data_direction dir, | |
59 | unsigned long attrs); | |
60 | void (*sync_single_for_cpu)(struct device *dev, dma_addr_t dma_handle, | |
61 | size_t size, enum dma_data_direction dir); | |
62 | void (*sync_single_for_device)(struct device *dev, | |
63 | dma_addr_t dma_handle, size_t size, | |
64 | enum dma_data_direction dir); | |
65 | void (*sync_sg_for_cpu)(struct device *dev, struct scatterlist *sg, | |
66 | int nents, enum dma_data_direction dir); | |
67 | void (*sync_sg_for_device)(struct device *dev, struct scatterlist *sg, | |
68 | int nents, enum dma_data_direction dir); | |
69 | void (*cache_sync)(struct device *dev, void *vaddr, size_t size, | |
70 | enum dma_data_direction direction); | |
71 | int (*dma_supported)(struct device *dev, u64 mask); | |
72 | u64 (*get_required_mask)(struct device *dev); | |
73 | size_t (*max_mapping_size)(struct device *dev); | |
a229cc14 | 74 | size_t (*opt_mapping_size)(void); |
0a0f0d8b CH |
75 | unsigned long (*get_merge_boundary)(struct device *dev); |
76 | }; | |
77 | ||
de6c85bf | 78 | #ifdef CONFIG_ARCH_HAS_DMA_OPS |
0a0f0d8b CH |
79 | #include <asm/dma-mapping.h> |
80 | ||
81 | static inline const struct dma_map_ops *get_dma_ops(struct device *dev) | |
82 | { | |
83 | if (dev->dma_ops) | |
84 | return dev->dma_ops; | |
ade1229c | 85 | return get_arch_dma_ops(); |
0a0f0d8b CH |
86 | } |
87 | ||
88 | static inline void set_dma_ops(struct device *dev, | |
89 | const struct dma_map_ops *dma_ops) | |
90 | { | |
91 | dev->dma_ops = dma_ops; | |
92 | } | |
de6c85bf | 93 | #else /* CONFIG_ARCH_HAS_DMA_OPS */ |
0a0f0d8b CH |
94 | static inline const struct dma_map_ops *get_dma_ops(struct device *dev) |
95 | { | |
96 | return NULL; | |
97 | } | |
98 | static inline void set_dma_ops(struct device *dev, | |
99 | const struct dma_map_ops *dma_ops) | |
100 | { | |
101 | } | |
de6c85bf | 102 | #endif /* CONFIG_ARCH_HAS_DMA_OPS */ |
0a0f0d8b | 103 | |
0b1abd1f CH |
104 | #ifdef CONFIG_DMA_CMA |
105 | extern struct cma *dma_contiguous_default_area; | |
106 | ||
107 | static inline struct cma *dev_get_cma_area(struct device *dev) | |
108 | { | |
109 | if (dev && dev->cma_area) | |
110 | return dev->cma_area; | |
111 | return dma_contiguous_default_area; | |
112 | } | |
113 | ||
114 | void dma_contiguous_reserve(phys_addr_t addr_limit); | |
115 | int __init dma_contiguous_reserve_area(phys_addr_t size, phys_addr_t base, | |
116 | phys_addr_t limit, struct cma **res_cma, bool fixed); | |
117 | ||
118 | struct page *dma_alloc_from_contiguous(struct device *dev, size_t count, | |
119 | unsigned int order, bool no_warn); | |
120 | bool dma_release_from_contiguous(struct device *dev, struct page *pages, | |
121 | int count); | |
122 | struct page *dma_alloc_contiguous(struct device *dev, size_t size, gfp_t gfp); | |
123 | void dma_free_contiguous(struct device *dev, struct page *page, size_t size); | |
5db5d930 CH |
124 | |
125 | void dma_contiguous_early_fixup(phys_addr_t base, unsigned long size); | |
0b1abd1f CH |
126 | #else /* CONFIG_DMA_CMA */ |
127 | static inline struct cma *dev_get_cma_area(struct device *dev) | |
128 | { | |
129 | return NULL; | |
130 | } | |
131 | static inline void dma_contiguous_reserve(phys_addr_t limit) | |
132 | { | |
133 | } | |
134 | static inline int dma_contiguous_reserve_area(phys_addr_t size, | |
135 | phys_addr_t base, phys_addr_t limit, struct cma **res_cma, | |
136 | bool fixed) | |
137 | { | |
138 | return -ENOSYS; | |
139 | } | |
140 | static inline struct page *dma_alloc_from_contiguous(struct device *dev, | |
141 | size_t count, unsigned int order, bool no_warn) | |
142 | { | |
143 | return NULL; | |
144 | } | |
145 | static inline bool dma_release_from_contiguous(struct device *dev, | |
146 | struct page *pages, int count) | |
147 | { | |
148 | return false; | |
149 | } | |
150 | /* Use fallback alloc() and free() when CONFIG_DMA_CMA=n */ | |
151 | static inline struct page *dma_alloc_contiguous(struct device *dev, size_t size, | |
152 | gfp_t gfp) | |
153 | { | |
154 | return NULL; | |
155 | } | |
156 | static inline void dma_free_contiguous(struct device *dev, struct page *page, | |
157 | size_t size) | |
158 | { | |
159 | __free_pages(page, get_order(size)); | |
160 | } | |
161 | #endif /* CONFIG_DMA_CMA*/ | |
162 | ||
0a0f0d8b CH |
163 | #ifdef CONFIG_DMA_DECLARE_COHERENT |
164 | int dma_declare_coherent_memory(struct device *dev, phys_addr_t phys_addr, | |
165 | dma_addr_t device_addr, size_t size); | |
e61c4514 | 166 | void dma_release_coherent_memory(struct device *dev); |
0a0f0d8b CH |
167 | int dma_alloc_from_dev_coherent(struct device *dev, ssize_t size, |
168 | dma_addr_t *dma_handle, void **ret); | |
169 | int dma_release_from_dev_coherent(struct device *dev, int order, void *vaddr); | |
170 | int dma_mmap_from_dev_coherent(struct device *dev, struct vm_area_struct *vma, | |
171 | void *cpu_addr, size_t size, int *ret); | |
0a0f0d8b CH |
172 | #else |
173 | static inline int dma_declare_coherent_memory(struct device *dev, | |
174 | phys_addr_t phys_addr, dma_addr_t device_addr, size_t size) | |
175 | { | |
176 | return -ENOSYS; | |
177 | } | |
e61c4514 | 178 | |
0a0f0d8b CH |
179 | #define dma_alloc_from_dev_coherent(dev, size, handle, ret) (0) |
180 | #define dma_release_from_dev_coherent(dev, order, vaddr) (0) | |
181 | #define dma_mmap_from_dev_coherent(dev, vma, vaddr, order, ret) (0) | |
50d6281c | 182 | static inline void dma_release_coherent_memory(struct device *dev) { } |
22f9feb4 | 183 | #endif /* CONFIG_DMA_DECLARE_COHERENT */ |
0a0f0d8b | 184 | |
22f9feb4 CH |
185 | #ifdef CONFIG_DMA_GLOBAL_POOL |
186 | void *dma_alloc_from_global_coherent(struct device *dev, ssize_t size, | |
187 | dma_addr_t *dma_handle); | |
188 | int dma_release_from_global_coherent(int order, void *vaddr); | |
189 | int dma_mmap_from_global_coherent(struct vm_area_struct *vma, void *cpu_addr, | |
190 | size_t size, int *ret); | |
191 | int dma_init_global_coherent(phys_addr_t phys_addr, size_t size); | |
192 | #else | |
0a0f0d8b CH |
193 | static inline void *dma_alloc_from_global_coherent(struct device *dev, |
194 | ssize_t size, dma_addr_t *dma_handle) | |
195 | { | |
196 | return NULL; | |
197 | } | |
198 | static inline int dma_release_from_global_coherent(int order, void *vaddr) | |
199 | { | |
200 | return 0; | |
201 | } | |
202 | static inline int dma_mmap_from_global_coherent(struct vm_area_struct *vma, | |
203 | void *cpu_addr, size_t size, int *ret) | |
204 | { | |
205 | return 0; | |
206 | } | |
22f9feb4 | 207 | #endif /* CONFIG_DMA_GLOBAL_POOL */ |
0a0f0d8b | 208 | |
7d5b5738 CH |
209 | /* |
210 | * This is the actual return value from the ->alloc_noncontiguous method. | |
211 | * The users of the DMA API should only care about the sg_table, but to make | |
212 | * the DMA-API internal vmaping and freeing easier we stash away the page | |
213 | * array as well (except for the fallback case). This can go away any time, | |
214 | * e.g. when a vmap-variant that takes a scatterlist comes along. | |
215 | */ | |
216 | struct dma_sgt_handle { | |
217 | struct sg_table sgt; | |
218 | struct page **pages; | |
219 | }; | |
220 | #define sgt_handle(sgt) \ | |
221 | container_of((sgt), struct dma_sgt_handle, sgt) | |
222 | ||
695cebe5 CH |
223 | int dma_common_get_sgtable(struct device *dev, struct sg_table *sgt, |
224 | void *cpu_addr, dma_addr_t dma_addr, size_t size, | |
225 | unsigned long attrs); | |
226 | int dma_common_mmap(struct device *dev, struct vm_area_struct *vma, | |
227 | void *cpu_addr, dma_addr_t dma_addr, size_t size, | |
228 | unsigned long attrs); | |
229 | struct page *dma_common_alloc_pages(struct device *dev, size_t size, | |
230 | dma_addr_t *dma_handle, enum dma_data_direction dir, gfp_t gfp); | |
231 | void dma_common_free_pages(struct device *dev, size_t size, struct page *vaddr, | |
232 | dma_addr_t dma_handle, enum dma_data_direction dir); | |
233 | ||
234 | struct page **dma_common_find_pages(void *cpu_addr); | |
235 | void *dma_common_contiguous_remap(struct page *page, size_t size, pgprot_t prot, | |
236 | const void *caller); | |
237 | void *dma_common_pages_remap(struct page **pages, size_t size, pgprot_t prot, | |
238 | const void *caller); | |
239 | void dma_common_free_remap(void *cpu_addr, size_t size); | |
240 | ||
241 | struct page *dma_alloc_from_pool(struct device *dev, size_t size, | |
242 | void **cpu_addr, gfp_t flags, | |
243 | bool (*phys_addr_ok)(struct device *, phys_addr_t, size_t)); | |
244 | bool dma_free_from_pool(struct device *dev, void *start, size_t size); | |
245 | ||
16fee29b CH |
246 | int dma_direct_set_offset(struct device *dev, phys_addr_t cpu_start, |
247 | dma_addr_t dma_start, u64 size); | |
248 | ||
6d4e9a8e | 249 | #if defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_DEVICE) || \ |
9f4df96b CH |
250 | defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU) || \ |
251 | defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU_ALL) | |
6d4e9a8e | 252 | extern bool dma_default_coherent; |
9f4df96b CH |
253 | static inline bool dev_is_dma_coherent(struct device *dev) |
254 | { | |
255 | return dev->dma_coherent; | |
256 | } | |
257 | #else | |
fe4e5efa JY |
258 | #define dma_default_coherent true |
259 | ||
9f4df96b CH |
260 | static inline bool dev_is_dma_coherent(struct device *dev) |
261 | { | |
262 | return true; | |
263 | } | |
264 | #endif /* CONFIG_ARCH_HAS_DMA_COHERENCE_H */ | |
265 | ||
f406c8e4 AL |
266 | static inline void dma_reset_need_sync(struct device *dev) |
267 | { | |
268 | #ifdef CONFIG_DMA_NEED_SYNC | |
269 | /* Reset it only once so that the function can be called on hotpath */ | |
a6016aac AL |
270 | if (unlikely(dev->dma_skip_sync)) |
271 | dev->dma_skip_sync = false; | |
f406c8e4 AL |
272 | #endif |
273 | } | |
274 | ||
370645f4 CM |
275 | /* |
276 | * Check whether potential kmalloc() buffers are safe for non-coherent DMA. | |
277 | */ | |
278 | static inline bool dma_kmalloc_safe(struct device *dev, | |
279 | enum dma_data_direction dir) | |
280 | { | |
281 | /* | |
282 | * If DMA bouncing of kmalloc() buffers is disabled, the kmalloc() | |
283 | * caches have already been aligned to a DMA-safe size. | |
284 | */ | |
285 | if (!IS_ENABLED(CONFIG_DMA_BOUNCE_UNALIGNED_KMALLOC)) | |
286 | return true; | |
287 | ||
288 | /* | |
289 | * kmalloc() buffers are DMA-safe irrespective of size if the device | |
290 | * is coherent or the direction is DMA_TO_DEVICE (non-desctructive | |
291 | * cache maintenance and benign cache line evictions). | |
292 | */ | |
293 | if (dev_is_dma_coherent(dev) || dir == DMA_TO_DEVICE) | |
294 | return true; | |
295 | ||
296 | return false; | |
297 | } | |
298 | ||
299 | /* | |
300 | * Check whether the given size, assuming it is for a kmalloc()'ed buffer, is | |
301 | * sufficiently aligned for non-coherent DMA. | |
302 | */ | |
303 | static inline bool dma_kmalloc_size_aligned(size_t size) | |
304 | { | |
305 | /* | |
306 | * Larger kmalloc() sizes are guaranteed to be aligned to | |
307 | * ARCH_DMA_MINALIGN. | |
308 | */ | |
309 | if (size >= 2 * ARCH_DMA_MINALIGN || | |
310 | IS_ALIGNED(kmalloc_size_roundup(size), dma_get_cache_alignment())) | |
311 | return true; | |
312 | ||
313 | return false; | |
314 | } | |
315 | ||
316 | /* | |
317 | * Check whether the given object size may have originated from a kmalloc() | |
318 | * buffer with a slab alignment below the DMA-safe alignment and needs | |
319 | * bouncing for non-coherent DMA. The pointer alignment is not considered and | |
320 | * in-structure DMA-safe offsets are the responsibility of the caller. Such | |
321 | * code should use the static ARCH_DMA_MINALIGN for compiler annotations. | |
322 | * | |
323 | * The heuristics can have false positives, bouncing unnecessarily, though the | |
324 | * buffers would be small. False negatives are theoretically possible if, for | |
325 | * example, multiple small kmalloc() buffers are coalesced into a larger | |
326 | * buffer that passes the alignment check. There are no such known constructs | |
327 | * in the kernel. | |
328 | */ | |
329 | static inline bool dma_kmalloc_needs_bounce(struct device *dev, size_t size, | |
330 | enum dma_data_direction dir) | |
331 | { | |
332 | return !dma_kmalloc_safe(dev, dir) && !dma_kmalloc_size_aligned(size); | |
333 | } | |
334 | ||
9f4df96b CH |
335 | void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle, |
336 | gfp_t gfp, unsigned long attrs); | |
337 | void arch_dma_free(struct device *dev, size_t size, void *cpu_addr, | |
338 | dma_addr_t dma_addr, unsigned long attrs); | |
339 | ||
3d6f126b AB |
340 | #ifdef CONFIG_ARCH_HAS_DMA_SET_MASK |
341 | void arch_dma_set_mask(struct device *dev, u64 mask); | |
342 | #else | |
343 | #define arch_dma_set_mask(dev, mask) do { } while (0) | |
344 | #endif | |
345 | ||
9f4df96b CH |
346 | #ifdef CONFIG_MMU |
347 | /* | |
348 | * Page protection so that devices that can't snoop CPU caches can use the | |
349 | * memory coherently. We default to pgprot_noncached which is usually used | |
350 | * for ioremap as a safe bet, but architectures can override this with less | |
351 | * strict semantics if possible. | |
352 | */ | |
353 | #ifndef pgprot_dmacoherent | |
354 | #define pgprot_dmacoherent(prot) pgprot_noncached(prot) | |
355 | #endif | |
356 | ||
357 | pgprot_t dma_pgprot(struct device *dev, pgprot_t prot, unsigned long attrs); | |
358 | #else | |
359 | static inline pgprot_t dma_pgprot(struct device *dev, pgprot_t prot, | |
360 | unsigned long attrs) | |
361 | { | |
362 | return prot; /* no protection bits supported without page tables */ | |
363 | } | |
364 | #endif /* CONFIG_MMU */ | |
365 | ||
366 | #ifdef CONFIG_ARCH_HAS_SYNC_DMA_FOR_DEVICE | |
367 | void arch_sync_dma_for_device(phys_addr_t paddr, size_t size, | |
368 | enum dma_data_direction dir); | |
369 | #else | |
370 | static inline void arch_sync_dma_for_device(phys_addr_t paddr, size_t size, | |
371 | enum dma_data_direction dir) | |
372 | { | |
373 | } | |
374 | #endif /* ARCH_HAS_SYNC_DMA_FOR_DEVICE */ | |
375 | ||
376 | #ifdef CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU | |
377 | void arch_sync_dma_for_cpu(phys_addr_t paddr, size_t size, | |
378 | enum dma_data_direction dir); | |
379 | #else | |
380 | static inline void arch_sync_dma_for_cpu(phys_addr_t paddr, size_t size, | |
381 | enum dma_data_direction dir) | |
382 | { | |
383 | } | |
384 | #endif /* ARCH_HAS_SYNC_DMA_FOR_CPU */ | |
385 | ||
386 | #ifdef CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU_ALL | |
387 | void arch_sync_dma_for_cpu_all(void); | |
388 | #else | |
389 | static inline void arch_sync_dma_for_cpu_all(void) | |
390 | { | |
391 | } | |
392 | #endif /* CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU_ALL */ | |
393 | ||
394 | #ifdef CONFIG_ARCH_HAS_DMA_PREP_COHERENT | |
395 | void arch_dma_prep_coherent(struct page *page, size_t size); | |
396 | #else | |
397 | static inline void arch_dma_prep_coherent(struct page *page, size_t size) | |
398 | { | |
399 | } | |
400 | #endif /* CONFIG_ARCH_HAS_DMA_PREP_COHERENT */ | |
401 | ||
402 | #ifdef CONFIG_ARCH_HAS_DMA_MARK_CLEAN | |
403 | void arch_dma_mark_clean(phys_addr_t paddr, size_t size); | |
404 | #else | |
405 | static inline void arch_dma_mark_clean(phys_addr_t paddr, size_t size) | |
406 | { | |
407 | } | |
408 | #endif /* ARCH_HAS_DMA_MARK_CLEAN */ | |
409 | ||
410 | void *arch_dma_set_uncached(void *addr, size_t size); | |
411 | void arch_dma_clear_uncached(void *addr, size_t size); | |
412 | ||
8d8d53cf AK |
413 | #ifdef CONFIG_ARCH_HAS_DMA_MAP_DIRECT |
414 | bool arch_dma_map_page_direct(struct device *dev, phys_addr_t addr); | |
415 | bool arch_dma_unmap_page_direct(struct device *dev, dma_addr_t dma_handle); | |
416 | bool arch_dma_map_sg_direct(struct device *dev, struct scatterlist *sg, | |
417 | int nents); | |
418 | bool arch_dma_unmap_sg_direct(struct device *dev, struct scatterlist *sg, | |
419 | int nents); | |
420 | #else | |
421 | #define arch_dma_map_page_direct(d, a) (false) | |
422 | #define arch_dma_unmap_page_direct(d, a) (false) | |
423 | #define arch_dma_map_sg_direct(d, s, n) (false) | |
424 | #define arch_dma_unmap_sg_direct(d, s, n) (false) | |
425 | #endif | |
426 | ||
0a0f0d8b | 427 | #ifdef CONFIG_ARCH_HAS_SETUP_DMA_OPS |
f091e933 | 428 | void arch_setup_dma_ops(struct device *dev, bool coherent); |
0a0f0d8b | 429 | #else |
f091e933 | 430 | static inline void arch_setup_dma_ops(struct device *dev, bool coherent) |
0a0f0d8b CH |
431 | { |
432 | } | |
433 | #endif /* CONFIG_ARCH_HAS_SETUP_DMA_OPS */ | |
434 | ||
435 | #ifdef CONFIG_ARCH_HAS_TEARDOWN_DMA_OPS | |
436 | void arch_teardown_dma_ops(struct device *dev); | |
437 | #else | |
438 | static inline void arch_teardown_dma_ops(struct device *dev) | |
439 | { | |
440 | } | |
441 | #endif /* CONFIG_ARCH_HAS_TEARDOWN_DMA_OPS */ | |
442 | ||
a1fd09e8 | 443 | #ifdef CONFIG_DMA_API_DEBUG |
86438841 | 444 | void dma_debug_add_bus(const struct bus_type *bus); |
a1fd09e8 CH |
445 | void debug_dma_dump_mappings(struct device *dev); |
446 | #else | |
86438841 | 447 | static inline void dma_debug_add_bus(const struct bus_type *bus) |
a1fd09e8 CH |
448 | { |
449 | } | |
450 | static inline void debug_dma_dump_mappings(struct device *dev) | |
451 | { | |
452 | } | |
453 | #endif /* CONFIG_DMA_API_DEBUG */ | |
454 | ||
0a0f0d8b CH |
455 | extern const struct dma_map_ops dma_dummy_ops; |
456 | ||
5e180ff3 LG |
457 | enum pci_p2pdma_map_type { |
458 | /* | |
459 | * PCI_P2PDMA_MAP_UNKNOWN: Used internally for indicating the mapping | |
460 | * type hasn't been calculated yet. Functions that return this enum | |
461 | * never return this value. | |
462 | */ | |
463 | PCI_P2PDMA_MAP_UNKNOWN = 0, | |
464 | ||
465 | /* | |
466 | * PCI_P2PDMA_MAP_NOT_SUPPORTED: Indicates the transaction will | |
467 | * traverse the host bridge and the host bridge is not in the | |
468 | * allowlist. DMA Mapping routines should return an error when | |
469 | * this is returned. | |
470 | */ | |
471 | PCI_P2PDMA_MAP_NOT_SUPPORTED, | |
472 | ||
473 | /* | |
474 | * PCI_P2PDMA_BUS_ADDR: Indicates that two devices can talk to | |
475 | * each other directly through a PCI switch and the transaction will | |
476 | * not traverse the host bridge. Such a mapping should program | |
477 | * the DMA engine with PCI bus addresses. | |
478 | */ | |
479 | PCI_P2PDMA_MAP_BUS_ADDR, | |
480 | ||
481 | /* | |
482 | * PCI_P2PDMA_MAP_THRU_HOST_BRIDGE: Indicates two devices can talk | |
483 | * to each other, but the transaction traverses a host bridge on the | |
484 | * allowlist. In this case, a normal mapping either with CPU physical | |
485 | * addresses (in the case of dma-direct) or IOVA addresses (in the | |
486 | * case of IOMMUs) should be used to program the DMA engine. | |
487 | */ | |
488 | PCI_P2PDMA_MAP_THRU_HOST_BRIDGE, | |
489 | }; | |
490 | ||
491 | struct pci_p2pdma_map_state { | |
492 | struct dev_pagemap *pgmap; | |
493 | int map; | |
494 | u64 bus_off; | |
495 | }; | |
496 | ||
497 | #ifdef CONFIG_PCI_P2PDMA | |
498 | enum pci_p2pdma_map_type | |
499 | pci_p2pdma_map_segment(struct pci_p2pdma_map_state *state, struct device *dev, | |
500 | struct scatterlist *sg); | |
501 | #else /* CONFIG_PCI_P2PDMA */ | |
502 | static inline enum pci_p2pdma_map_type | |
503 | pci_p2pdma_map_segment(struct pci_p2pdma_map_state *state, struct device *dev, | |
504 | struct scatterlist *sg) | |
505 | { | |
506 | return PCI_P2PDMA_MAP_NOT_SUPPORTED; | |
507 | } | |
508 | #endif /* CONFIG_PCI_P2PDMA */ | |
509 | ||
0a0f0d8b | 510 | #endif /* _LINUX_DMA_MAP_OPS_H */ |