Commit | Line | Data |
---|---|---|
9b6b563c PM |
1 | /* |
2 | * Copyright (C) 2001 Mike Corrigan & Dave Engebretsen, IBM Corporation | |
3 | * Rewrite, cleanup: | |
91f14480 | 4 | * Copyright (C) 2004 Olof Johansson <olof@lixom.net>, IBM Corporation |
2db4928b | 5 | * |
9b6b563c PM |
6 | * This program is free software; you can redistribute it and/or modify |
7 | * it under the terms of the GNU General Public License as published by | |
8 | * the Free Software Foundation; either version 2 of the License, or | |
9 | * (at your option) any later version. | |
2db4928b | 10 | * |
9b6b563c PM |
11 | * This program is distributed in the hope that it will be useful, |
12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
14 | * GNU General Public License for more details. | |
2db4928b | 15 | * |
9b6b563c PM |
16 | * You should have received a copy of the GNU General Public License |
17 | * along with this program; if not, write to the Free Software | |
18 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | |
19 | */ | |
20 | ||
21 | #ifndef _ASM_IOMMU_H | |
22 | #define _ASM_IOMMU_H | |
88ced031 | 23 | #ifdef __KERNEL__ |
9b6b563c | 24 | |
5d2efba6 | 25 | #include <linux/compiler.h> |
9b6b563c PM |
26 | #include <linux/spinlock.h> |
27 | #include <linux/device.h> | |
28 | #include <linux/dma-mapping.h> | |
1977f032 | 29 | #include <linux/bitops.h> |
7e11580b | 30 | #include <asm/machdep.h> |
5d2efba6 | 31 | #include <asm/types.h> |
798248a3 | 32 | #include <asm/pci-bridge.h> |
ec0c464c | 33 | #include <asm/asm-const.h> |
5d2efba6 | 34 | |
e589a440 AP |
35 | #define IOMMU_PAGE_SHIFT_4K 12 |
36 | #define IOMMU_PAGE_SIZE_4K (ASM_CONST(1) << IOMMU_PAGE_SHIFT_4K) | |
37 | #define IOMMU_PAGE_MASK_4K (~((1 << IOMMU_PAGE_SHIFT_4K) - 1)) | |
38 | #define IOMMU_PAGE_ALIGN_4K(addr) _ALIGN_UP(addr, IOMMU_PAGE_SIZE_4K) | |
5d2efba6 | 39 | |
d0847757 AP |
40 | #define IOMMU_PAGE_SIZE(tblptr) (ASM_CONST(1) << (tblptr)->it_page_shift) |
41 | #define IOMMU_PAGE_MASK(tblptr) (~((1 << (tblptr)->it_page_shift) - 1)) | |
42 | #define IOMMU_PAGE_ALIGN(addr, tblptr) _ALIGN_UP(addr, IOMMU_PAGE_SIZE(tblptr)) | |
43 | ||
165785e5 JK |
44 | /* Boot time flags */ |
45 | extern int iommu_is_off; | |
46 | extern int iommu_force_on; | |
5d2efba6 | 47 | |
da004c36 | 48 | struct iommu_table_ops { |
05c6cfb9 AK |
49 | /* |
50 | * When called with direction==DMA_NONE, it is equal to clear(). | |
51 | * uaddr is a linear map address. | |
52 | */ | |
da004c36 AK |
53 | int (*set)(struct iommu_table *tbl, |
54 | long index, long npages, | |
55 | unsigned long uaddr, | |
56 | enum dma_data_direction direction, | |
00085f1e | 57 | unsigned long attrs); |
05c6cfb9 AK |
58 | #ifdef CONFIG_IOMMU_API |
59 | /* | |
60 | * Exchanges existing TCE with new TCE plus direction bits; | |
61 | * returns old TCE and DMA direction mask. | |
62 | * @tce is a physical address. | |
63 | */ | |
64 | int (*exchange)(struct iommu_table *tbl, | |
65 | long index, | |
66 | unsigned long *hpa, | |
67 | enum dma_data_direction *direction); | |
a540aa56 AK |
68 | /* Real mode */ |
69 | int (*exchange_rm)(struct iommu_table *tbl, | |
70 | long index, | |
71 | unsigned long *hpa, | |
72 | enum dma_data_direction *direction); | |
090bad39 | 73 | |
a68bd126 | 74 | __be64 *(*useraddrptr)(struct iommu_table *tbl, long index, bool alloc); |
05c6cfb9 | 75 | #endif |
da004c36 AK |
76 | void (*clear)(struct iommu_table *tbl, |
77 | long index, long npages); | |
05c6cfb9 | 78 | /* get() returns a physical address */ |
da004c36 AK |
79 | unsigned long (*get)(struct iommu_table *tbl, long index); |
80 | void (*flush)(struct iommu_table *tbl); | |
4793d65d | 81 | void (*free)(struct iommu_table *tbl); |
da004c36 AK |
82 | }; |
83 | ||
84 | /* These are used by VIO */ | |
85 | extern struct iommu_table_ops iommu_table_lpar_multi_ops; | |
86 | extern struct iommu_table_ops iommu_table_pseries_ops; | |
87 | ||
9b6b563c PM |
88 | /* |
89 | * IOMAP_MAX_ORDER defines the largest contiguous block | |
90 | * of dma space we can get. IOMAP_MAX_ORDER = 13 | |
91 | * allows up to 2**12 pages (4096 * 4096) = 16 MB | |
92 | */ | |
5d2efba6 | 93 | #define IOMAP_MAX_ORDER 13 |
9b6b563c | 94 | |
b4c3a872 AB |
95 | #define IOMMU_POOL_HASHBITS 2 |
96 | #define IOMMU_NR_POOLS (1 << IOMMU_POOL_HASHBITS) | |
97 | ||
98 | struct iommu_pool { | |
99 | unsigned long start; | |
100 | unsigned long end; | |
101 | unsigned long hint; | |
102 | spinlock_t lock; | |
103 | } ____cacheline_aligned_in_smp; | |
104 | ||
9b6b563c PM |
105 | struct iommu_table { |
106 | unsigned long it_busno; /* Bus number this table belongs to */ | |
107 | unsigned long it_size; /* Size of iommu table in entries */ | |
bbb845c4 AK |
108 | unsigned long it_indirect_levels; |
109 | unsigned long it_level_size; | |
00547193 | 110 | unsigned long it_allocated_size; |
9b6b563c PM |
111 | unsigned long it_offset; /* Offset into global table */ |
112 | unsigned long it_base; /* mapped address of tce table */ | |
113 | unsigned long it_index; /* which iommu table this is */ | |
114 | unsigned long it_type; /* type: PCI or Virtual Bus */ | |
115 | unsigned long it_blocksize; /* Entries in each block (cacheline) */ | |
b4c3a872 AB |
116 | unsigned long poolsize; |
117 | unsigned long nr_pools; | |
118 | struct iommu_pool large_pool; | |
119 | struct iommu_pool pools[IOMMU_NR_POOLS]; | |
9b6b563c | 120 | unsigned long *it_map; /* A simple allocation bitmap for now */ |
3a553170 | 121 | unsigned long it_page_shift;/* table iommu page size */ |
0eaf4def | 122 | struct list_head it_group_list;/* List of iommu_table_group_link */ |
00a5c58d | 123 | __be64 *it_userspace; /* userspace view of the table */ |
da004c36 | 124 | struct iommu_table_ops *it_ops; |
e5afdf9d | 125 | struct kref it_kref; |
a68bd126 | 126 | int it_nid; |
9b6b563c PM |
127 | }; |
128 | ||
6e301a8e | 129 | #define IOMMU_TABLE_USERSPACE_ENTRY_RO(tbl, entry) \ |
a68bd126 | 130 | ((tbl)->it_ops->useraddrptr((tbl), (entry), false)) |
2157e7b8 | 131 | #define IOMMU_TABLE_USERSPACE_ENTRY(tbl, entry) \ |
a68bd126 | 132 | ((tbl)->it_ops->useraddrptr((tbl), (entry), true)) |
2157e7b8 | 133 | |
d0847757 AP |
134 | /* Pure 2^n version of get_order */ |
135 | static inline __attribute_const__ | |
136 | int get_iommu_order(unsigned long size, struct iommu_table *tbl) | |
137 | { | |
138 | return __ilog2((size - 1) >> tbl->it_page_shift) + 1; | |
139 | } | |
140 | ||
141 | ||
9b6b563c | 142 | struct scatterlist; |
9b6b563c | 143 | |
2db4928b BH |
144 | #ifdef CONFIG_PPC64 |
145 | ||
146 | static inline void set_iommu_table_base(struct device *dev, | |
147 | struct iommu_table *base) | |
738ef42e | 148 | { |
2db4928b | 149 | dev->archdata.iommu_table_base = base; |
738ef42e BB |
150 | } |
151 | ||
152 | static inline void *get_iommu_table_base(struct device *dev) | |
153 | { | |
2db4928b | 154 | return dev->archdata.iommu_table_base; |
738ef42e BB |
155 | } |
156 | ||
2db4928b BH |
157 | extern int dma_iommu_dma_supported(struct device *dev, u64 mask); |
158 | ||
e5afdf9d AK |
159 | extern struct iommu_table *iommu_tce_table_get(struct iommu_table *tbl); |
160 | extern int iommu_tce_table_put(struct iommu_table *tbl); | |
9b6b563c | 161 | |
9b6b563c PM |
162 | /* Initializes an iommu_table based in values set in the passed-in |
163 | * structure | |
164 | */ | |
ca1588e7 AB |
165 | extern struct iommu_table *iommu_init_table(struct iommu_table * tbl, |
166 | int nid); | |
e633bc86 | 167 | #define IOMMU_TABLE_GROUP_MAX_TABLES 2 |
b348aa65 | 168 | |
f87a8864 AK |
169 | struct iommu_table_group; |
170 | ||
171 | struct iommu_table_group_ops { | |
00547193 AK |
172 | unsigned long (*get_table_size)( |
173 | __u32 page_shift, | |
174 | __u64 window_size, | |
175 | __u32 levels); | |
4793d65d AK |
176 | long (*create_table)(struct iommu_table_group *table_group, |
177 | int num, | |
178 | __u32 page_shift, | |
179 | __u64 window_size, | |
180 | __u32 levels, | |
181 | struct iommu_table **ptbl); | |
182 | long (*set_window)(struct iommu_table_group *table_group, | |
183 | int num, | |
184 | struct iommu_table *tblnew); | |
185 | long (*unset_window)(struct iommu_table_group *table_group, | |
186 | int num); | |
f87a8864 AK |
187 | /* Switch ownership from platform code to external user (e.g. VFIO) */ |
188 | void (*take_ownership)(struct iommu_table_group *table_group); | |
189 | /* Switch ownership from external user (e.g. VFIO) back to core */ | |
190 | void (*release_ownership)(struct iommu_table_group *table_group); | |
191 | }; | |
192 | ||
0eaf4def AK |
193 | struct iommu_table_group_link { |
194 | struct list_head next; | |
195 | struct rcu_head rcu; | |
196 | struct iommu_table_group *table_group; | |
197 | }; | |
198 | ||
b348aa65 | 199 | struct iommu_table_group { |
4793d65d AK |
200 | /* IOMMU properties */ |
201 | __u32 tce32_start; | |
202 | __u32 tce32_size; | |
203 | __u64 pgsizes; /* Bitmap of supported page sizes */ | |
204 | __u32 max_dynamic_windows_supported; | |
205 | __u32 max_levels; | |
206 | ||
b348aa65 AK |
207 | struct iommu_group *group; |
208 | struct iommu_table *tables[IOMMU_TABLE_GROUP_MAX_TABLES]; | |
f87a8864 | 209 | struct iommu_table_group_ops *ops; |
b348aa65 AK |
210 | }; |
211 | ||
d905c5df | 212 | #ifdef CONFIG_IOMMU_API |
b348aa65 AK |
213 | |
214 | extern void iommu_register_group(struct iommu_table_group *table_group, | |
4e13c1ac | 215 | int pci_domain_number, unsigned long pe_num); |
c4e9d3c1 AK |
216 | extern int iommu_add_device(struct iommu_table_group *table_group, |
217 | struct device *dev); | |
d905c5df | 218 | extern void iommu_del_device(struct device *dev); |
c10c21ef AK |
219 | extern long iommu_tce_xchg(struct mm_struct *mm, struct iommu_table *tbl, |
220 | unsigned long entry, unsigned long *hpa, | |
221 | enum dma_data_direction *direction); | |
d905c5df | 222 | #else |
b348aa65 | 223 | static inline void iommu_register_group(struct iommu_table_group *table_group, |
d905c5df AK |
224 | int pci_domain_number, |
225 | unsigned long pe_num) | |
226 | { | |
227 | } | |
228 | ||
c4e9d3c1 AK |
229 | static inline int iommu_add_device(struct iommu_table_group *table_group, |
230 | struct device *dev) | |
d905c5df AK |
231 | { |
232 | return 0; | |
233 | } | |
234 | ||
235 | static inline void iommu_del_device(struct device *dev) | |
236 | { | |
237 | } | |
238 | #endif /* !CONFIG_IOMMU_API */ | |
239 | ||
2db4928b BH |
240 | #else |
241 | ||
242 | static inline void *get_iommu_table_base(struct device *dev) | |
243 | { | |
244 | return NULL; | |
245 | } | |
246 | ||
247 | static inline int dma_iommu_dma_supported(struct device *dev, u64 mask) | |
248 | { | |
249 | return 0; | |
250 | } | |
251 | ||
252 | #endif /* CONFIG_PPC64 */ | |
253 | ||
0690cbd2 JR |
254 | extern int ppc_iommu_map_sg(struct device *dev, struct iommu_table *tbl, |
255 | struct scatterlist *sglist, int nelems, | |
256 | unsigned long mask, | |
257 | enum dma_data_direction direction, | |
00085f1e | 258 | unsigned long attrs); |
0690cbd2 JR |
259 | extern void ppc_iommu_unmap_sg(struct iommu_table *tbl, |
260 | struct scatterlist *sglist, | |
261 | int nelems, | |
262 | enum dma_data_direction direction, | |
00085f1e | 263 | unsigned long attrs); |
9b6b563c | 264 | |
fb3475e9 FT |
265 | extern void *iommu_alloc_coherent(struct device *dev, struct iommu_table *tbl, |
266 | size_t size, dma_addr_t *dma_handle, | |
267 | unsigned long mask, gfp_t flag, int node); | |
9b6b563c | 268 | extern void iommu_free_coherent(struct iommu_table *tbl, size_t size, |
12d04eef | 269 | void *vaddr, dma_addr_t dma_handle); |
f9226d57 MN |
270 | extern dma_addr_t iommu_map_page(struct device *dev, struct iommu_table *tbl, |
271 | struct page *page, unsigned long offset, | |
272 | size_t size, unsigned long mask, | |
273 | enum dma_data_direction direction, | |
00085f1e | 274 | unsigned long attrs); |
f9226d57 MN |
275 | extern void iommu_unmap_page(struct iommu_table *tbl, dma_addr_t dma_handle, |
276 | size_t size, enum dma_data_direction direction, | |
00085f1e | 277 | unsigned long attrs); |
9b6b563c PM |
278 | |
279 | extern void iommu_init_early_pSeries(void); | |
798248a3 | 280 | extern void iommu_init_early_dart(struct pci_controller_ops *controller_ops); |
31c56d82 | 281 | extern void iommu_init_early_pasemi(void); |
9b6b563c | 282 | |
7e11580b JB |
283 | #if defined(CONFIG_PPC64) && defined(CONFIG_PM) |
284 | static inline void iommu_save(void) | |
285 | { | |
286 | if (ppc_md.iommu_save) | |
287 | ppc_md.iommu_save(); | |
288 | } | |
289 | ||
290 | static inline void iommu_restore(void) | |
291 | { | |
292 | if (ppc_md.iommu_restore) | |
293 | ppc_md.iommu_restore(); | |
294 | } | |
295 | #endif | |
9b6b563c | 296 | |
4e13c1ac | 297 | /* The API to support IOMMU operations for VFIO */ |
b1af23d8 AK |
298 | extern int iommu_tce_check_ioba(unsigned long page_shift, |
299 | unsigned long offset, unsigned long size, | |
300 | unsigned long ioba, unsigned long npages); | |
301 | extern int iommu_tce_check_gpa(unsigned long page_shift, | |
302 | unsigned long gpa); | |
303 | ||
304 | #define iommu_tce_clear_param_check(tbl, ioba, tce_value, npages) \ | |
305 | (iommu_tce_check_ioba((tbl)->it_page_shift, \ | |
306 | (tbl)->it_offset, (tbl)->it_size, \ | |
307 | (ioba), (npages)) || (tce_value)) | |
308 | #define iommu_tce_put_param_check(tbl, ioba, gpa) \ | |
309 | (iommu_tce_check_ioba((tbl)->it_page_shift, \ | |
310 | (tbl)->it_offset, (tbl)->it_size, \ | |
311 | (ioba), 1) || \ | |
312 | iommu_tce_check_gpa((tbl)->it_page_shift, (gpa))) | |
4e13c1ac AK |
313 | |
314 | extern void iommu_flush_tce(struct iommu_table *tbl); | |
315 | extern int iommu_take_ownership(struct iommu_table *tbl); | |
316 | extern void iommu_release_ownership(struct iommu_table *tbl); | |
317 | ||
318 | extern enum dma_data_direction iommu_tce_direction(unsigned long tce); | |
10b35b2b | 319 | extern unsigned long iommu_direction_to_tce_perm(enum dma_data_direction dir); |
4e13c1ac | 320 | |
88ced031 | 321 | #endif /* __KERNEL__ */ |
9b6b563c | 322 | #endif /* _ASM_IOMMU_H */ |