Commit | Line | Data |
---|---|---|
9b6b563c PM |
1 | /* |
2 | * Copyright (C) 2001 Mike Corrigan & Dave Engebretsen, IBM Corporation | |
3 | * Rewrite, cleanup: | |
91f14480 | 4 | * Copyright (C) 2004 Olof Johansson <olof@lixom.net>, IBM Corporation |
2db4928b | 5 | * |
9b6b563c PM |
6 | * This program is free software; you can redistribute it and/or modify |
7 | * it under the terms of the GNU General Public License as published by | |
8 | * the Free Software Foundation; either version 2 of the License, or | |
9 | * (at your option) any later version. | |
2db4928b | 10 | * |
9b6b563c PM |
11 | * This program is distributed in the hope that it will be useful, |
12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
14 | * GNU General Public License for more details. | |
2db4928b | 15 | * |
9b6b563c PM |
16 | * You should have received a copy of the GNU General Public License |
17 | * along with this program; if not, write to the Free Software | |
18 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | |
19 | */ | |
20 | ||
21 | #ifndef _ASM_IOMMU_H | |
22 | #define _ASM_IOMMU_H | |
88ced031 | 23 | #ifdef __KERNEL__ |
9b6b563c | 24 | |
5d2efba6 | 25 | #include <linux/compiler.h> |
9b6b563c PM |
26 | #include <linux/spinlock.h> |
27 | #include <linux/device.h> | |
28 | #include <linux/dma-mapping.h> | |
1977f032 | 29 | #include <linux/bitops.h> |
7e11580b | 30 | #include <asm/machdep.h> |
5d2efba6 | 31 | #include <asm/types.h> |
798248a3 | 32 | #include <asm/pci-bridge.h> |
5d2efba6 | 33 | |
e589a440 AP |
34 | #define IOMMU_PAGE_SHIFT_4K 12 |
35 | #define IOMMU_PAGE_SIZE_4K (ASM_CONST(1) << IOMMU_PAGE_SHIFT_4K) | |
36 | #define IOMMU_PAGE_MASK_4K (~((1 << IOMMU_PAGE_SHIFT_4K) - 1)) | |
37 | #define IOMMU_PAGE_ALIGN_4K(addr) _ALIGN_UP(addr, IOMMU_PAGE_SIZE_4K) | |
5d2efba6 | 38 | |
d0847757 AP |
39 | #define IOMMU_PAGE_SIZE(tblptr) (ASM_CONST(1) << (tblptr)->it_page_shift) |
40 | #define IOMMU_PAGE_MASK(tblptr) (~((1 << (tblptr)->it_page_shift) - 1)) | |
41 | #define IOMMU_PAGE_ALIGN(addr, tblptr) _ALIGN_UP(addr, IOMMU_PAGE_SIZE(tblptr)) | |
42 | ||
165785e5 JK |
43 | /* Boot time flags */ |
44 | extern int iommu_is_off; | |
45 | extern int iommu_force_on; | |
5d2efba6 | 46 | |
da004c36 | 47 | struct iommu_table_ops { |
05c6cfb9 AK |
48 | /* |
49 | * When called with direction==DMA_NONE, it is equal to clear(). | |
50 | * uaddr is a linear map address. | |
51 | */ | |
da004c36 AK |
52 | int (*set)(struct iommu_table *tbl, |
53 | long index, long npages, | |
54 | unsigned long uaddr, | |
55 | enum dma_data_direction direction, | |
00085f1e | 56 | unsigned long attrs); |
05c6cfb9 AK |
57 | #ifdef CONFIG_IOMMU_API |
58 | /* | |
59 | * Exchanges existing TCE with new TCE plus direction bits; | |
60 | * returns old TCE and DMA direction mask. | |
61 | * @tce is a physical address. | |
62 | */ | |
63 | int (*exchange)(struct iommu_table *tbl, | |
64 | long index, | |
65 | unsigned long *hpa, | |
66 | enum dma_data_direction *direction); | |
a540aa56 AK |
67 | /* Real mode */ |
68 | int (*exchange_rm)(struct iommu_table *tbl, | |
69 | long index, | |
70 | unsigned long *hpa, | |
71 | enum dma_data_direction *direction); | |
05c6cfb9 | 72 | #endif |
da004c36 AK |
73 | void (*clear)(struct iommu_table *tbl, |
74 | long index, long npages); | |
05c6cfb9 | 75 | /* get() returns a physical address */ |
da004c36 AK |
76 | unsigned long (*get)(struct iommu_table *tbl, long index); |
77 | void (*flush)(struct iommu_table *tbl); | |
4793d65d | 78 | void (*free)(struct iommu_table *tbl); |
da004c36 AK |
79 | }; |
80 | ||
81 | /* These are used by VIO */ | |
82 | extern struct iommu_table_ops iommu_table_lpar_multi_ops; | |
83 | extern struct iommu_table_ops iommu_table_pseries_ops; | |
84 | ||
9b6b563c PM |
85 | /* |
86 | * IOMAP_MAX_ORDER defines the largest contiguous block | |
87 | * of dma space we can get. IOMAP_MAX_ORDER = 13 | |
88 | * allows up to 2**12 pages (4096 * 4096) = 16 MB | |
89 | */ | |
5d2efba6 | 90 | #define IOMAP_MAX_ORDER 13 |
9b6b563c | 91 | |
b4c3a872 AB |
92 | #define IOMMU_POOL_HASHBITS 2 |
93 | #define IOMMU_NR_POOLS (1 << IOMMU_POOL_HASHBITS) | |
94 | ||
95 | struct iommu_pool { | |
96 | unsigned long start; | |
97 | unsigned long end; | |
98 | unsigned long hint; | |
99 | spinlock_t lock; | |
100 | } ____cacheline_aligned_in_smp; | |
101 | ||
9b6b563c PM |
102 | struct iommu_table { |
103 | unsigned long it_busno; /* Bus number this table belongs to */ | |
104 | unsigned long it_size; /* Size of iommu table in entries */ | |
bbb845c4 AK |
105 | unsigned long it_indirect_levels; |
106 | unsigned long it_level_size; | |
00547193 | 107 | unsigned long it_allocated_size; |
9b6b563c PM |
108 | unsigned long it_offset; /* Offset into global table */ |
109 | unsigned long it_base; /* mapped address of tce table */ | |
110 | unsigned long it_index; /* which iommu table this is */ | |
111 | unsigned long it_type; /* type: PCI or Virtual Bus */ | |
112 | unsigned long it_blocksize; /* Entries in each block (cacheline) */ | |
b4c3a872 AB |
113 | unsigned long poolsize; |
114 | unsigned long nr_pools; | |
115 | struct iommu_pool large_pool; | |
116 | struct iommu_pool pools[IOMMU_NR_POOLS]; | |
9b6b563c | 117 | unsigned long *it_map; /* A simple allocation bitmap for now */ |
3a553170 | 118 | unsigned long it_page_shift;/* table iommu page size */ |
0eaf4def | 119 | struct list_head it_group_list;/* List of iommu_table_group_link */ |
2157e7b8 | 120 | unsigned long *it_userspace; /* userspace view of the table */ |
da004c36 | 121 | struct iommu_table_ops *it_ops; |
e5afdf9d | 122 | struct kref it_kref; |
9b6b563c PM |
123 | }; |
124 | ||
2157e7b8 AK |
125 | #define IOMMU_TABLE_USERSPACE_ENTRY(tbl, entry) \ |
126 | ((tbl)->it_userspace ? \ | |
127 | &((tbl)->it_userspace[(entry) - (tbl)->it_offset]) : \ | |
128 | NULL) | |
129 | ||
d0847757 AP |
130 | /* Pure 2^n version of get_order */ |
131 | static inline __attribute_const__ | |
132 | int get_iommu_order(unsigned long size, struct iommu_table *tbl) | |
133 | { | |
134 | return __ilog2((size - 1) >> tbl->it_page_shift) + 1; | |
135 | } | |
136 | ||
137 | ||
9b6b563c | 138 | struct scatterlist; |
9b6b563c | 139 | |
2db4928b BH |
140 | #ifdef CONFIG_PPC64 |
141 | ||
6009faa4 CH |
142 | #define IOMMU_MAPPING_ERROR (~(dma_addr_t)0x0) |
143 | ||
2db4928b BH |
144 | static inline void set_iommu_table_base(struct device *dev, |
145 | struct iommu_table *base) | |
738ef42e | 146 | { |
2db4928b | 147 | dev->archdata.iommu_table_base = base; |
738ef42e BB |
148 | } |
149 | ||
150 | static inline void *get_iommu_table_base(struct device *dev) | |
151 | { | |
2db4928b | 152 | return dev->archdata.iommu_table_base; |
738ef42e BB |
153 | } |
154 | ||
2db4928b BH |
155 | extern int dma_iommu_dma_supported(struct device *dev, u64 mask); |
156 | ||
e5afdf9d AK |
157 | extern struct iommu_table *iommu_tce_table_get(struct iommu_table *tbl); |
158 | extern int iommu_tce_table_put(struct iommu_table *tbl); | |
9b6b563c | 159 | |
9b6b563c PM |
160 | /* Initializes an iommu_table based in values set in the passed-in |
161 | * structure | |
162 | */ | |
ca1588e7 AB |
163 | extern struct iommu_table *iommu_init_table(struct iommu_table * tbl, |
164 | int nid); | |
e633bc86 | 165 | #define IOMMU_TABLE_GROUP_MAX_TABLES 2 |
b348aa65 | 166 | |
f87a8864 AK |
167 | struct iommu_table_group; |
168 | ||
169 | struct iommu_table_group_ops { | |
00547193 AK |
170 | unsigned long (*get_table_size)( |
171 | __u32 page_shift, | |
172 | __u64 window_size, | |
173 | __u32 levels); | |
4793d65d AK |
174 | long (*create_table)(struct iommu_table_group *table_group, |
175 | int num, | |
176 | __u32 page_shift, | |
177 | __u64 window_size, | |
178 | __u32 levels, | |
179 | struct iommu_table **ptbl); | |
180 | long (*set_window)(struct iommu_table_group *table_group, | |
181 | int num, | |
182 | struct iommu_table *tblnew); | |
183 | long (*unset_window)(struct iommu_table_group *table_group, | |
184 | int num); | |
f87a8864 AK |
185 | /* Switch ownership from platform code to external user (e.g. VFIO) */ |
186 | void (*take_ownership)(struct iommu_table_group *table_group); | |
187 | /* Switch ownership from external user (e.g. VFIO) back to core */ | |
188 | void (*release_ownership)(struct iommu_table_group *table_group); | |
189 | }; | |
190 | ||
0eaf4def AK |
191 | struct iommu_table_group_link { |
192 | struct list_head next; | |
193 | struct rcu_head rcu; | |
194 | struct iommu_table_group *table_group; | |
195 | }; | |
196 | ||
b348aa65 | 197 | struct iommu_table_group { |
4793d65d AK |
198 | /* IOMMU properties */ |
199 | __u32 tce32_start; | |
200 | __u32 tce32_size; | |
201 | __u64 pgsizes; /* Bitmap of supported page sizes */ | |
202 | __u32 max_dynamic_windows_supported; | |
203 | __u32 max_levels; | |
204 | ||
b348aa65 AK |
205 | struct iommu_group *group; |
206 | struct iommu_table *tables[IOMMU_TABLE_GROUP_MAX_TABLES]; | |
f87a8864 | 207 | struct iommu_table_group_ops *ops; |
b348aa65 AK |
208 | }; |
209 | ||
d905c5df | 210 | #ifdef CONFIG_IOMMU_API |
b348aa65 AK |
211 | |
212 | extern void iommu_register_group(struct iommu_table_group *table_group, | |
4e13c1ac | 213 | int pci_domain_number, unsigned long pe_num); |
d905c5df AK |
214 | extern int iommu_add_device(struct device *dev); |
215 | extern void iommu_del_device(struct device *dev); | |
4ad04e59 | 216 | extern int __init tce_iommu_bus_notifier_init(void); |
05c6cfb9 AK |
217 | extern long iommu_tce_xchg(struct iommu_table *tbl, unsigned long entry, |
218 | unsigned long *hpa, enum dma_data_direction *direction); | |
a540aa56 AK |
219 | extern long iommu_tce_xchg_rm(struct iommu_table *tbl, unsigned long entry, |
220 | unsigned long *hpa, enum dma_data_direction *direction); | |
d905c5df | 221 | #else |
b348aa65 | 222 | static inline void iommu_register_group(struct iommu_table_group *table_group, |
d905c5df AK |
223 | int pci_domain_number, |
224 | unsigned long pe_num) | |
225 | { | |
226 | } | |
227 | ||
228 | static inline int iommu_add_device(struct device *dev) | |
229 | { | |
230 | return 0; | |
231 | } | |
232 | ||
233 | static inline void iommu_del_device(struct device *dev) | |
234 | { | |
235 | } | |
4ad04e59 NA |
236 | |
237 | static inline int __init tce_iommu_bus_notifier_init(void) | |
238 | { | |
239 | return 0; | |
240 | } | |
d905c5df AK |
241 | #endif /* !CONFIG_IOMMU_API */ |
242 | ||
6009faa4 CH |
243 | int dma_iommu_mapping_error(struct device *dev, dma_addr_t dma_addr); |
244 | ||
2db4928b BH |
245 | #else |
246 | ||
247 | static inline void *get_iommu_table_base(struct device *dev) | |
248 | { | |
249 | return NULL; | |
250 | } | |
251 | ||
252 | static inline int dma_iommu_dma_supported(struct device *dev, u64 mask) | |
253 | { | |
254 | return 0; | |
255 | } | |
256 | ||
257 | #endif /* CONFIG_PPC64 */ | |
258 | ||
0690cbd2 JR |
259 | extern int ppc_iommu_map_sg(struct device *dev, struct iommu_table *tbl, |
260 | struct scatterlist *sglist, int nelems, | |
261 | unsigned long mask, | |
262 | enum dma_data_direction direction, | |
00085f1e | 263 | unsigned long attrs); |
0690cbd2 JR |
264 | extern void ppc_iommu_unmap_sg(struct iommu_table *tbl, |
265 | struct scatterlist *sglist, | |
266 | int nelems, | |
267 | enum dma_data_direction direction, | |
00085f1e | 268 | unsigned long attrs); |
9b6b563c | 269 | |
fb3475e9 FT |
270 | extern void *iommu_alloc_coherent(struct device *dev, struct iommu_table *tbl, |
271 | size_t size, dma_addr_t *dma_handle, | |
272 | unsigned long mask, gfp_t flag, int node); | |
9b6b563c | 273 | extern void iommu_free_coherent(struct iommu_table *tbl, size_t size, |
12d04eef | 274 | void *vaddr, dma_addr_t dma_handle); |
f9226d57 MN |
275 | extern dma_addr_t iommu_map_page(struct device *dev, struct iommu_table *tbl, |
276 | struct page *page, unsigned long offset, | |
277 | size_t size, unsigned long mask, | |
278 | enum dma_data_direction direction, | |
00085f1e | 279 | unsigned long attrs); |
f9226d57 MN |
280 | extern void iommu_unmap_page(struct iommu_table *tbl, dma_addr_t dma_handle, |
281 | size_t size, enum dma_data_direction direction, | |
00085f1e | 282 | unsigned long attrs); |
9b6b563c PM |
283 | |
284 | extern void iommu_init_early_pSeries(void); | |
798248a3 | 285 | extern void iommu_init_early_dart(struct pci_controller_ops *controller_ops); |
31c56d82 | 286 | extern void iommu_init_early_pasemi(void); |
9b6b563c | 287 | |
7e11580b JB |
288 | #if defined(CONFIG_PPC64) && defined(CONFIG_PM) |
289 | static inline void iommu_save(void) | |
290 | { | |
291 | if (ppc_md.iommu_save) | |
292 | ppc_md.iommu_save(); | |
293 | } | |
294 | ||
295 | static inline void iommu_restore(void) | |
296 | { | |
297 | if (ppc_md.iommu_restore) | |
298 | ppc_md.iommu_restore(); | |
299 | } | |
300 | #endif | |
9b6b563c | 301 | |
4e13c1ac | 302 | /* The API to support IOMMU operations for VFIO */ |
b1af23d8 AK |
303 | extern int iommu_tce_check_ioba(unsigned long page_shift, |
304 | unsigned long offset, unsigned long size, | |
305 | unsigned long ioba, unsigned long npages); | |
306 | extern int iommu_tce_check_gpa(unsigned long page_shift, | |
307 | unsigned long gpa); | |
308 | ||
309 | #define iommu_tce_clear_param_check(tbl, ioba, tce_value, npages) \ | |
310 | (iommu_tce_check_ioba((tbl)->it_page_shift, \ | |
311 | (tbl)->it_offset, (tbl)->it_size, \ | |
312 | (ioba), (npages)) || (tce_value)) | |
313 | #define iommu_tce_put_param_check(tbl, ioba, gpa) \ | |
314 | (iommu_tce_check_ioba((tbl)->it_page_shift, \ | |
315 | (tbl)->it_offset, (tbl)->it_size, \ | |
316 | (ioba), 1) || \ | |
317 | iommu_tce_check_gpa((tbl)->it_page_shift, (gpa))) | |
4e13c1ac AK |
318 | |
319 | extern void iommu_flush_tce(struct iommu_table *tbl); | |
320 | extern int iommu_take_ownership(struct iommu_table *tbl); | |
321 | extern void iommu_release_ownership(struct iommu_table *tbl); | |
322 | ||
323 | extern enum dma_data_direction iommu_tce_direction(unsigned long tce); | |
10b35b2b | 324 | extern unsigned long iommu_direction_to_tce_perm(enum dma_data_direction dir); |
4e13c1ac | 325 | |
88ced031 | 326 | #endif /* __KERNEL__ */ |
9b6b563c | 327 | #endif /* _ASM_IOMMU_H */ |