Commit | Line | Data |
---|---|---|
09b55412 CM |
1 | /* |
2 | * Copyright (C) 2012 ARM Ltd. | |
3 | * | |
4 | * This program is free software; you can redistribute it and/or modify | |
5 | * it under the terms of the GNU General Public License version 2 as | |
6 | * published by the Free Software Foundation. | |
7 | * | |
8 | * This program is distributed in the hope that it will be useful, | |
9 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
10 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
11 | * GNU General Public License for more details. | |
12 | * | |
13 | * You should have received a copy of the GNU General Public License | |
14 | * along with this program. If not, see <http://www.gnu.org/licenses/>. | |
15 | */ | |
16 | #ifndef __ASM_DMA_MAPPING_H | |
17 | #define __ASM_DMA_MAPPING_H | |
18 | ||
19 | #ifdef __KERNEL__ | |
20 | ||
21 | #include <linux/types.h> | |
22 | #include <linux/vmalloc.h> | |
23 | ||
24 | #include <asm-generic/dma-coherent.h> | |
25 | ||
58c8b269 SS |
26 | #include <xen/xen.h> |
27 | #include <asm/xen/hypervisor.h> | |
28 | ||
25b719d7 | 29 | #define DMA_ERROR_CODE (~(dma_addr_t)0) |
09b55412 | 30 | extern struct dma_map_ops *dma_ops; |
7363590d CM |
31 | extern struct dma_map_ops coherent_swiotlb_dma_ops; |
32 | extern struct dma_map_ops noncoherent_swiotlb_dma_ops; | |
09b55412 | 33 | |
58c8b269 | 34 | static inline struct dma_map_ops *__generic_dma_ops(struct device *dev) |
09b55412 CM |
35 | { |
36 | if (unlikely(!dev) || !dev->archdata.dma_ops) | |
37 | return dma_ops; | |
38 | else | |
39 | return dev->archdata.dma_ops; | |
40 | } | |
41 | ||
58c8b269 SS |
42 | static inline struct dma_map_ops *get_dma_ops(struct device *dev) |
43 | { | |
44 | if (xen_initial_domain()) | |
45 | return xen_dma_ops; | |
46 | else | |
47 | return __generic_dma_ops(dev); | |
48 | } | |
49 | ||
7363590d CM |
50 | static inline void set_dma_ops(struct device *dev, struct dma_map_ops *ops) |
51 | { | |
52 | dev->archdata.dma_ops = ops; | |
53 | } | |
54 | ||
09b55412 CM |
55 | #include <asm-generic/dma-mapping-common.h> |
56 | ||
57 | static inline dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr) | |
58 | { | |
59 | return (dma_addr_t)paddr; | |
60 | } | |
61 | ||
62 | static inline phys_addr_t dma_to_phys(struct device *dev, dma_addr_t dev_addr) | |
63 | { | |
64 | return (phys_addr_t)dev_addr; | |
65 | } | |
66 | ||
67 | static inline int dma_mapping_error(struct device *dev, dma_addr_t dev_addr) | |
68 | { | |
69 | struct dma_map_ops *ops = get_dma_ops(dev); | |
bb7c4deb | 70 | debug_dma_mapping_error(dev, dev_addr); |
09b55412 CM |
71 | return ops->mapping_error(dev, dev_addr); |
72 | } | |
73 | ||
74 | static inline int dma_supported(struct device *dev, u64 mask) | |
75 | { | |
76 | struct dma_map_ops *ops = get_dma_ops(dev); | |
77 | return ops->dma_supported(dev, mask); | |
78 | } | |
79 | ||
80 | static inline int dma_set_mask(struct device *dev, u64 mask) | |
81 | { | |
82 | if (!dev->dma_mask || !dma_supported(dev, mask)) | |
83 | return -EIO; | |
84 | *dev->dma_mask = mask; | |
85 | ||
86 | return 0; | |
87 | } | |
88 | ||
89 | static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size) | |
90 | { | |
91 | if (!dev->dma_mask) | |
92 | return 0; | |
93 | ||
94 | return addr + size - 1 <= *dev->dma_mask; | |
95 | } | |
96 | ||
97 | static inline void dma_mark_clean(void *addr, size_t size) | |
98 | { | |
99 | } | |
100 | ||
d25749af DHG |
101 | #define dma_alloc_coherent(d, s, h, f) dma_alloc_attrs(d, s, h, f, NULL) |
102 | #define dma_free_coherent(d, s, h, f) dma_free_attrs(d, s, h, f, NULL) | |
103 | ||
104 | static inline void *dma_alloc_attrs(struct device *dev, size_t size, | |
105 | dma_addr_t *dma_handle, gfp_t flags, | |
106 | struct dma_attrs *attrs) | |
09b55412 CM |
107 | { |
108 | struct dma_map_ops *ops = get_dma_ops(dev); | |
109 | void *vaddr; | |
110 | ||
111 | if (dma_alloc_from_coherent(dev, size, dma_handle, &vaddr)) | |
112 | return vaddr; | |
113 | ||
d25749af | 114 | vaddr = ops->alloc(dev, size, dma_handle, flags, attrs); |
09b55412 CM |
115 | debug_dma_alloc_coherent(dev, size, *dma_handle, vaddr); |
116 | return vaddr; | |
117 | } | |
118 | ||
d25749af DHG |
119 | static inline void dma_free_attrs(struct device *dev, size_t size, |
120 | void *vaddr, dma_addr_t dev_addr, | |
121 | struct dma_attrs *attrs) | |
09b55412 CM |
122 | { |
123 | struct dma_map_ops *ops = get_dma_ops(dev); | |
124 | ||
125 | if (dma_release_from_coherent(dev, get_order(size), vaddr)) | |
126 | return; | |
127 | ||
128 | debug_dma_free_coherent(dev, size, vaddr, dev_addr); | |
d25749af | 129 | ops->free(dev, size, vaddr, dev_addr, attrs); |
09b55412 CM |
130 | } |
131 | ||
132 | /* | |
133 | * There is no dma_cache_sync() implementation, so just return NULL here. | |
134 | */ | |
135 | static inline void *dma_alloc_noncoherent(struct device *dev, size_t size, | |
136 | dma_addr_t *handle, gfp_t flags) | |
137 | { | |
138 | return NULL; | |
139 | } | |
140 | ||
141 | static inline void dma_free_noncoherent(struct device *dev, size_t size, | |
142 | void *cpu_addr, dma_addr_t handle) | |
143 | { | |
144 | } | |
145 | ||
146 | #endif /* __KERNEL__ */ | |
147 | #endif /* __ASM_DMA_MAPPING_H */ |