Commit | Line | Data |
---|---|---|
1a59d1b8 | 1 | // SPDX-License-Identifier: GPL-2.0-or-later |
1da177e4 | 2 | /* |
1da177e4 LT |
3 | * Copyright (C) 2001 Mike Corrigan & Dave Engebretsen, IBM Corporation |
4 | * | |
5 | * Rewrite, cleanup, new allocation schemes, virtual merging: | |
6 | * Copyright (C) 2004 Olof Johansson, IBM Corporation | |
7 | * and Ben. Herrenschmidt, IBM Corporation | |
8 | * | |
9 | * Dynamic DMA mapping support, bus-independent parts. | |
1da177e4 LT |
10 | */ |
11 | ||
12 | ||
1da177e4 LT |
13 | #include <linux/init.h> |
14 | #include <linux/types.h> | |
15 | #include <linux/slab.h> | |
16 | #include <linux/mm.h> | |
17 | #include <linux/spinlock.h> | |
18 | #include <linux/string.h> | |
19 | #include <linux/dma-mapping.h> | |
a66022c4 | 20 | #include <linux/bitmap.h> |
fb3475e9 | 21 | #include <linux/iommu-helper.h> |
62a8bd6c | 22 | #include <linux/crash_dump.h> |
b4c3a872 | 23 | #include <linux/hash.h> |
d6b9a81b AB |
24 | #include <linux/fault-inject.h> |
25 | #include <linux/pci.h> | |
4e13c1ac AK |
26 | #include <linux/iommu.h> |
27 | #include <linux/sched.h> | |
691602aa | 28 | #include <linux/debugfs.h> |
1da177e4 | 29 | #include <asm/io.h> |
1da177e4 LT |
30 | #include <asm/iommu.h> |
31 | #include <asm/pci-bridge.h> | |
32 | #include <asm/machdep.h> | |
5f50867b | 33 | #include <asm/kdump.h> |
3ccc00a7 | 34 | #include <asm/fadump.h> |
d6b9a81b | 35 | #include <asm/vio.h> |
4e13c1ac | 36 | #include <asm/tce.h> |
c10c21ef | 37 | #include <asm/mmu_context.h> |
1da177e4 LT |
38 | |
39 | #define DBG(...) | |
40 | ||
691602aa AK |
41 | #ifdef CONFIG_IOMMU_DEBUGFS |
42 | static int iommu_debugfs_weight_get(void *data, u64 *val) | |
43 | { | |
44 | struct iommu_table *tbl = data; | |
45 | *val = bitmap_weight(tbl->it_map, tbl->it_size); | |
46 | return 0; | |
47 | } | |
48 | DEFINE_DEBUGFS_ATTRIBUTE(iommu_debugfs_fops_weight, iommu_debugfs_weight_get, NULL, "%llu\n"); | |
49 | ||
50 | static void iommu_debugfs_add(struct iommu_table *tbl) | |
51 | { | |
52 | char name[10]; | |
53 | struct dentry *liobn_entry; | |
54 | ||
55 | sprintf(name, "%08lx", tbl->it_index); | |
56 | liobn_entry = debugfs_create_dir(name, iommu_debugfs_dir); | |
57 | ||
58 | debugfs_create_file_unsafe("weight", 0400, liobn_entry, tbl, &iommu_debugfs_fops_weight); | |
59 | debugfs_create_ulong("it_size", 0400, liobn_entry, &tbl->it_size); | |
60 | debugfs_create_ulong("it_page_shift", 0400, liobn_entry, &tbl->it_page_shift); | |
61 | debugfs_create_ulong("it_reserved_start", 0400, liobn_entry, &tbl->it_reserved_start); | |
62 | debugfs_create_ulong("it_reserved_end", 0400, liobn_entry, &tbl->it_reserved_end); | |
63 | debugfs_create_ulong("it_indirect_levels", 0400, liobn_entry, &tbl->it_indirect_levels); | |
64 | debugfs_create_ulong("it_level_size", 0400, liobn_entry, &tbl->it_level_size); | |
65 | } | |
66 | ||
67 | static void iommu_debugfs_del(struct iommu_table *tbl) | |
68 | { | |
69 | char name[10]; | |
70 | struct dentry *liobn_entry; | |
71 | ||
72 | sprintf(name, "%08lx", tbl->it_index); | |
73 | liobn_entry = debugfs_lookup(name, iommu_debugfs_dir); | |
bbbe563f | 74 | debugfs_remove(liobn_entry); |
691602aa AK |
75 | } |
76 | #else | |
77 | static void iommu_debugfs_add(struct iommu_table *tbl){} | |
78 | static void iommu_debugfs_del(struct iommu_table *tbl){} | |
79 | #endif | |
80 | ||
191aee58 | 81 | static int novmerge; |
56997559 | 82 | |
6490c490 RJ |
83 | static void __iommu_free(struct iommu_table *, dma_addr_t, unsigned int); |
84 | ||
1da177e4 LT |
85 | static int __init setup_iommu(char *str) |
86 | { | |
87 | if (!strcmp(str, "novmerge")) | |
88 | novmerge = 1; | |
89 | else if (!strcmp(str, "vmerge")) | |
90 | novmerge = 0; | |
91 | return 1; | |
92 | } | |
93 | ||
94 | __setup("iommu=", setup_iommu); | |
95 | ||
b4c3a872 AB |
96 | static DEFINE_PER_CPU(unsigned int, iommu_pool_hash); |
97 | ||
98 | /* | |
99 | * We precalculate the hash to avoid doing it on every allocation. | |
100 | * | |
101 | * The hash is important to spread CPUs across all the pools. For example, | |
102 | * on a POWER7 with 4 way SMT we want interrupts on the primary threads and | |
103 | * with 4 pools all primary threads would map to the same pool. | |
104 | */ | |
105 | static int __init setup_iommu_pool_hash(void) | |
106 | { | |
107 | unsigned int i; | |
108 | ||
109 | for_each_possible_cpu(i) | |
110 | per_cpu(iommu_pool_hash, i) = hash_32(i, IOMMU_POOL_HASHBITS); | |
111 | ||
112 | return 0; | |
113 | } | |
114 | subsys_initcall(setup_iommu_pool_hash); | |
115 | ||
d6b9a81b AB |
116 | #ifdef CONFIG_FAIL_IOMMU |
117 | ||
118 | static DECLARE_FAULT_ATTR(fail_iommu); | |
119 | ||
120 | static int __init setup_fail_iommu(char *str) | |
121 | { | |
122 | return setup_fault_attr(&fail_iommu, str); | |
123 | } | |
124 | __setup("fail_iommu=", setup_fail_iommu); | |
125 | ||
126 | static bool should_fail_iommu(struct device *dev) | |
127 | { | |
128 | return dev->archdata.fail_iommu && should_fail(&fail_iommu, 1); | |
129 | } | |
130 | ||
131 | static int __init fail_iommu_debugfs(void) | |
132 | { | |
133 | struct dentry *dir = fault_create_debugfs_attr("fail_iommu", | |
134 | NULL, &fail_iommu); | |
135 | ||
8c6ffba0 | 136 | return PTR_ERR_OR_ZERO(dir); |
d6b9a81b AB |
137 | } |
138 | late_initcall(fail_iommu_debugfs); | |
139 | ||
140 | static ssize_t fail_iommu_show(struct device *dev, | |
141 | struct device_attribute *attr, char *buf) | |
142 | { | |
143 | return sprintf(buf, "%d\n", dev->archdata.fail_iommu); | |
144 | } | |
145 | ||
146 | static ssize_t fail_iommu_store(struct device *dev, | |
147 | struct device_attribute *attr, const char *buf, | |
148 | size_t count) | |
149 | { | |
150 | int i; | |
151 | ||
152 | if (count > 0 && sscanf(buf, "%d", &i) > 0) | |
153 | dev->archdata.fail_iommu = (i == 0) ? 0 : 1; | |
154 | ||
155 | return count; | |
156 | } | |
157 | ||
8a7aef2c | 158 | static DEVICE_ATTR_RW(fail_iommu); |
d6b9a81b AB |
159 | |
160 | static int fail_iommu_bus_notify(struct notifier_block *nb, | |
161 | unsigned long action, void *data) | |
162 | { | |
163 | struct device *dev = data; | |
164 | ||
165 | if (action == BUS_NOTIFY_ADD_DEVICE) { | |
166 | if (device_create_file(dev, &dev_attr_fail_iommu)) | |
167 | pr_warn("Unable to create IOMMU fault injection sysfs " | |
168 | "entries\n"); | |
169 | } else if (action == BUS_NOTIFY_DEL_DEVICE) { | |
170 | device_remove_file(dev, &dev_attr_fail_iommu); | |
171 | } | |
172 | ||
173 | return 0; | |
174 | } | |
175 | ||
176 | static struct notifier_block fail_iommu_bus_notifier = { | |
177 | .notifier_call = fail_iommu_bus_notify | |
178 | }; | |
179 | ||
180 | static int __init fail_iommu_setup(void) | |
181 | { | |
182 | #ifdef CONFIG_PCI | |
183 | bus_register_notifier(&pci_bus_type, &fail_iommu_bus_notifier); | |
184 | #endif | |
185 | #ifdef CONFIG_IBMVIO | |
186 | bus_register_notifier(&vio_bus_type, &fail_iommu_bus_notifier); | |
187 | #endif | |
188 | ||
189 | return 0; | |
190 | } | |
191 | /* | |
192 | * Must execute after PCI and VIO subsystem have initialised but before | |
193 | * devices are probed. | |
194 | */ | |
195 | arch_initcall(fail_iommu_setup); | |
196 | #else | |
197 | static inline bool should_fail_iommu(struct device *dev) | |
198 | { | |
199 | return false; | |
200 | } | |
201 | #endif | |
202 | ||
fb3475e9 FT |
203 | static unsigned long iommu_range_alloc(struct device *dev, |
204 | struct iommu_table *tbl, | |
1da177e4 LT |
205 | unsigned long npages, |
206 | unsigned long *handle, | |
7daa411b | 207 | unsigned long mask, |
1da177e4 LT |
208 | unsigned int align_order) |
209 | { | |
fb3475e9 | 210 | unsigned long n, end, start; |
1da177e4 LT |
211 | unsigned long limit; |
212 | int largealloc = npages > 15; | |
213 | int pass = 0; | |
214 | unsigned long align_mask; | |
d3622137 | 215 | unsigned long flags; |
b4c3a872 AB |
216 | unsigned int pool_nr; |
217 | struct iommu_pool *pool; | |
1da177e4 | 218 | |
63b85621 | 219 | align_mask = (1ull << align_order) - 1; |
1da177e4 LT |
220 | |
221 | /* This allocator was derived from x86_64's bit string search */ | |
222 | ||
223 | /* Sanity check */ | |
13a2eea1 | 224 | if (unlikely(npages == 0)) { |
1da177e4 LT |
225 | if (printk_ratelimit()) |
226 | WARN_ON(1); | |
d11e3d3d | 227 | return DMA_MAPPING_ERROR; |
1da177e4 LT |
228 | } |
229 | ||
d6b9a81b | 230 | if (should_fail_iommu(dev)) |
d11e3d3d | 231 | return DMA_MAPPING_ERROR; |
d6b9a81b | 232 | |
b4c3a872 AB |
233 | /* |
234 | * We don't need to disable preemption here because any CPU can | |
235 | * safely use any IOMMU pool. | |
236 | */ | |
75f327c6 | 237 | pool_nr = raw_cpu_read(iommu_pool_hash) & (tbl->nr_pools - 1); |
d3622137 | 238 | |
b4c3a872 AB |
239 | if (largealloc) |
240 | pool = &(tbl->large_pool); | |
1da177e4 | 241 | else |
b4c3a872 | 242 | pool = &(tbl->pools[pool_nr]); |
1da177e4 | 243 | |
b4c3a872 AB |
244 | spin_lock_irqsave(&(pool->lock), flags); |
245 | ||
246 | again: | |
d900bd73 AB |
247 | if ((pass == 0) && handle && *handle && |
248 | (*handle >= pool->start) && (*handle < pool->end)) | |
b4c3a872 AB |
249 | start = *handle; |
250 | else | |
251 | start = pool->hint; | |
1da177e4 | 252 | |
b4c3a872 | 253 | limit = pool->end; |
1da177e4 LT |
254 | |
255 | /* The case below can happen if we have a small segment appended | |
256 | * to a large, or when the previous alloc was at the very end of | |
257 | * the available space. If so, go back to the initial start. | |
258 | */ | |
259 | if (start >= limit) | |
b4c3a872 | 260 | start = pool->start; |
1da177e4 | 261 | |
7daa411b OJ |
262 | if (limit + tbl->it_offset > mask) { |
263 | limit = mask - tbl->it_offset + 1; | |
264 | /* If we're constrained on address range, first try | |
265 | * at the masked hint to avoid O(n) search complexity, | |
b4c3a872 | 266 | * but on second pass, start at 0 in pool 0. |
7daa411b | 267 | */ |
b4c3a872 | 268 | if ((start & mask) >= limit || pass > 0) { |
d900bd73 | 269 | spin_unlock(&(pool->lock)); |
b4c3a872 | 270 | pool = &(tbl->pools[0]); |
d900bd73 | 271 | spin_lock(&(pool->lock)); |
b4c3a872 AB |
272 | start = pool->start; |
273 | } else { | |
7daa411b | 274 | start &= mask; |
b4c3a872 | 275 | } |
7daa411b OJ |
276 | } |
277 | ||
d0847757 | 278 | n = iommu_area_alloc(tbl->it_map, limit, start, npages, tbl->it_offset, |
1e9d90db NC |
279 | dma_get_seg_boundary_nr_pages(dev, tbl->it_page_shift), |
280 | align_mask); | |
fb3475e9 | 281 | if (n == -1) { |
b4c3a872 AB |
282 | if (likely(pass == 0)) { |
283 | /* First try the pool from the start */ | |
284 | pool->hint = pool->start; | |
1da177e4 LT |
285 | pass++; |
286 | goto again; | |
b4c3a872 AB |
287 | |
288 | } else if (pass <= tbl->nr_pools) { | |
289 | /* Now try scanning all the other pools */ | |
290 | spin_unlock(&(pool->lock)); | |
291 | pool_nr = (pool_nr + 1) & (tbl->nr_pools - 1); | |
292 | pool = &tbl->pools[pool_nr]; | |
293 | spin_lock(&(pool->lock)); | |
294 | pool->hint = pool->start; | |
295 | pass++; | |
296 | goto again; | |
297 | ||
fc5590fd LB |
298 | } else if (pass == tbl->nr_pools + 1) { |
299 | /* Last resort: try largepool */ | |
300 | spin_unlock(&pool->lock); | |
301 | pool = &tbl->large_pool; | |
302 | spin_lock(&pool->lock); | |
303 | pool->hint = pool->start; | |
304 | pass++; | |
305 | goto again; | |
306 | ||
1da177e4 | 307 | } else { |
b4c3a872 AB |
308 | /* Give up */ |
309 | spin_unlock_irqrestore(&(pool->lock), flags); | |
d11e3d3d | 310 | return DMA_MAPPING_ERROR; |
1da177e4 LT |
311 | } |
312 | } | |
313 | ||
fb3475e9 | 314 | end = n + npages; |
1da177e4 LT |
315 | |
316 | /* Bump the hint to a new block for small allocs. */ | |
317 | if (largealloc) { | |
318 | /* Don't bump to new block to avoid fragmentation */ | |
b4c3a872 | 319 | pool->hint = end; |
1da177e4 LT |
320 | } else { |
321 | /* Overflow will be taken care of at the next allocation */ | |
b4c3a872 | 322 | pool->hint = (end + tbl->it_blocksize - 1) & |
1da177e4 LT |
323 | ~(tbl->it_blocksize - 1); |
324 | } | |
325 | ||
326 | /* Update handle for SG allocations */ | |
327 | if (handle) | |
328 | *handle = end; | |
329 | ||
b4c3a872 AB |
330 | spin_unlock_irqrestore(&(pool->lock), flags); |
331 | ||
1da177e4 LT |
332 | return n; |
333 | } | |
334 | ||
fb3475e9 FT |
335 | static dma_addr_t iommu_alloc(struct device *dev, struct iommu_table *tbl, |
336 | void *page, unsigned int npages, | |
337 | enum dma_data_direction direction, | |
4f3dd8a0 | 338 | unsigned long mask, unsigned int align_order, |
00085f1e | 339 | unsigned long attrs) |
1da177e4 | 340 | { |
d3622137 | 341 | unsigned long entry; |
d11e3d3d | 342 | dma_addr_t ret = DMA_MAPPING_ERROR; |
6490c490 | 343 | int build_fail; |
7daa411b | 344 | |
fb3475e9 | 345 | entry = iommu_range_alloc(dev, tbl, npages, NULL, mask, align_order); |
1da177e4 | 346 | |
d11e3d3d CH |
347 | if (unlikely(entry == DMA_MAPPING_ERROR)) |
348 | return DMA_MAPPING_ERROR; | |
1da177e4 LT |
349 | |
350 | entry += tbl->it_offset; /* Offset into real TCE table */ | |
d0847757 | 351 | ret = entry << tbl->it_page_shift; /* Set the return dma address */ |
1da177e4 LT |
352 | |
353 | /* Put the TCEs in the HW table */ | |
da004c36 | 354 | build_fail = tbl->it_ops->set(tbl, entry, npages, |
d0847757 AP |
355 | (unsigned long)page & |
356 | IOMMU_PAGE_MASK(tbl), direction, attrs); | |
6490c490 | 357 | |
da004c36 | 358 | /* tbl->it_ops->set() only returns non-zero for transient errors. |
6490c490 | 359 | * Clean up the table bitmap in this case and return |
d11e3d3d | 360 | * DMA_MAPPING_ERROR. For all other errors the functionality is |
6490c490 RJ |
361 | * not altered. |
362 | */ | |
363 | if (unlikely(build_fail)) { | |
364 | __iommu_free(tbl, ret, npages); | |
d11e3d3d | 365 | return DMA_MAPPING_ERROR; |
6490c490 | 366 | } |
1da177e4 LT |
367 | |
368 | /* Flush/invalidate TLB caches if necessary */ | |
da004c36 AK |
369 | if (tbl->it_ops->flush) |
370 | tbl->it_ops->flush(tbl); | |
1da177e4 | 371 | |
1da177e4 LT |
372 | /* Make sure updates are seen by hardware */ |
373 | mb(); | |
374 | ||
375 | return ret; | |
376 | } | |
377 | ||
67ca1415 AB |
378 | static bool iommu_free_check(struct iommu_table *tbl, dma_addr_t dma_addr, |
379 | unsigned int npages) | |
1da177e4 LT |
380 | { |
381 | unsigned long entry, free_entry; | |
1da177e4 | 382 | |
d0847757 | 383 | entry = dma_addr >> tbl->it_page_shift; |
1da177e4 LT |
384 | free_entry = entry - tbl->it_offset; |
385 | ||
386 | if (((free_entry + npages) > tbl->it_size) || | |
387 | (entry < tbl->it_offset)) { | |
388 | if (printk_ratelimit()) { | |
389 | printk(KERN_INFO "iommu_free: invalid entry\n"); | |
390 | printk(KERN_INFO "\tentry = 0x%lx\n", entry); | |
fe333321 IM |
391 | printk(KERN_INFO "\tdma_addr = 0x%llx\n", (u64)dma_addr); |
392 | printk(KERN_INFO "\tTable = 0x%llx\n", (u64)tbl); | |
393 | printk(KERN_INFO "\tbus# = 0x%llx\n", (u64)tbl->it_busno); | |
394 | printk(KERN_INFO "\tsize = 0x%llx\n", (u64)tbl->it_size); | |
395 | printk(KERN_INFO "\tstartOff = 0x%llx\n", (u64)tbl->it_offset); | |
396 | printk(KERN_INFO "\tindex = 0x%llx\n", (u64)tbl->it_index); | |
1da177e4 LT |
397 | WARN_ON(1); |
398 | } | |
67ca1415 AB |
399 | |
400 | return false; | |
1da177e4 LT |
401 | } |
402 | ||
67ca1415 AB |
403 | return true; |
404 | } | |
405 | ||
b4c3a872 AB |
406 | static struct iommu_pool *get_pool(struct iommu_table *tbl, |
407 | unsigned long entry) | |
408 | { | |
409 | struct iommu_pool *p; | |
410 | unsigned long largepool_start = tbl->large_pool.start; | |
411 | ||
412 | /* The large pool is the last pool at the top of the table */ | |
413 | if (entry >= largepool_start) { | |
414 | p = &tbl->large_pool; | |
415 | } else { | |
416 | unsigned int pool_nr = entry / tbl->poolsize; | |
417 | ||
418 | BUG_ON(pool_nr > tbl->nr_pools); | |
419 | p = &tbl->pools[pool_nr]; | |
420 | } | |
421 | ||
422 | return p; | |
423 | } | |
424 | ||
67ca1415 AB |
425 | static void __iommu_free(struct iommu_table *tbl, dma_addr_t dma_addr, |
426 | unsigned int npages) | |
1da177e4 | 427 | { |
67ca1415 | 428 | unsigned long entry, free_entry; |
1da177e4 | 429 | unsigned long flags; |
b4c3a872 | 430 | struct iommu_pool *pool; |
1da177e4 | 431 | |
d0847757 | 432 | entry = dma_addr >> tbl->it_page_shift; |
67ca1415 AB |
433 | free_entry = entry - tbl->it_offset; |
434 | ||
b4c3a872 AB |
435 | pool = get_pool(tbl, free_entry); |
436 | ||
67ca1415 AB |
437 | if (!iommu_free_check(tbl, dma_addr, npages)) |
438 | return; | |
439 | ||
da004c36 | 440 | tbl->it_ops->clear(tbl, entry, npages); |
67ca1415 | 441 | |
b4c3a872 | 442 | spin_lock_irqsave(&(pool->lock), flags); |
67ca1415 | 443 | bitmap_clear(tbl->it_map, free_entry, npages); |
b4c3a872 | 444 | spin_unlock_irqrestore(&(pool->lock), flags); |
67ca1415 AB |
445 | } |
446 | ||
447 | static void iommu_free(struct iommu_table *tbl, dma_addr_t dma_addr, | |
448 | unsigned int npages) | |
449 | { | |
450 | __iommu_free(tbl, dma_addr, npages); | |
1da177e4 LT |
451 | |
452 | /* Make sure TLB cache is flushed if the HW needs it. We do | |
453 | * not do an mb() here on purpose, it is not needed on any of | |
454 | * the current platforms. | |
455 | */ | |
da004c36 AK |
456 | if (tbl->it_ops->flush) |
457 | tbl->it_ops->flush(tbl); | |
1da177e4 LT |
458 | } |
459 | ||
0690cbd2 JR |
460 | int ppc_iommu_map_sg(struct device *dev, struct iommu_table *tbl, |
461 | struct scatterlist *sglist, int nelems, | |
462 | unsigned long mask, enum dma_data_direction direction, | |
00085f1e | 463 | unsigned long attrs) |
1da177e4 LT |
464 | { |
465 | dma_addr_t dma_next = 0, dma_addr; | |
1da177e4 | 466 | struct scatterlist *s, *outs, *segstart; |
6490c490 | 467 | int outcount, incount, i, build_fail = 0; |
d262c32a | 468 | unsigned int align; |
1da177e4 | 469 | unsigned long handle; |
740c3ce6 | 470 | unsigned int max_seg_size; |
1da177e4 LT |
471 | |
472 | BUG_ON(direction == DMA_NONE); | |
473 | ||
474 | if ((nelems == 0) || !tbl) | |
c4e0e892 | 475 | return -EINVAL; |
1da177e4 LT |
476 | |
477 | outs = s = segstart = &sglist[0]; | |
478 | outcount = 1; | |
ac9af7cb | 479 | incount = nelems; |
1da177e4 LT |
480 | handle = 0; |
481 | ||
482 | /* Init first segment length for backout at failure */ | |
483 | outs->dma_length = 0; | |
484 | ||
5d2efba6 | 485 | DBG("sg mapping %d elements:\n", nelems); |
1da177e4 | 486 | |
740c3ce6 | 487 | max_seg_size = dma_get_max_seg_size(dev); |
78bdc310 | 488 | for_each_sg(sglist, s, nelems, i) { |
1da177e4 LT |
489 | unsigned long vaddr, npages, entry, slen; |
490 | ||
491 | slen = s->length; | |
492 | /* Sanity check */ | |
493 | if (slen == 0) { | |
494 | dma_next = 0; | |
495 | continue; | |
496 | } | |
497 | /* Allocate iommu entries for that segment */ | |
58b053e4 | 498 | vaddr = (unsigned long) sg_virt(s); |
d0847757 | 499 | npages = iommu_num_pages(vaddr, slen, IOMMU_PAGE_SIZE(tbl)); |
d262c32a | 500 | align = 0; |
d0847757 | 501 | if (tbl->it_page_shift < PAGE_SHIFT && slen >= PAGE_SIZE && |
d262c32a | 502 | (vaddr & ~PAGE_MASK) == 0) |
d0847757 | 503 | align = PAGE_SHIFT - tbl->it_page_shift; |
fb3475e9 | 504 | entry = iommu_range_alloc(dev, tbl, npages, &handle, |
d0847757 | 505 | mask >> tbl->it_page_shift, align); |
1da177e4 LT |
506 | |
507 | DBG(" - vaddr: %lx, size: %lx\n", vaddr, slen); | |
508 | ||
509 | /* Handle failure */ | |
d11e3d3d | 510 | if (unlikely(entry == DMA_MAPPING_ERROR)) { |
af8a2498 MFO |
511 | if (!(attrs & DMA_ATTR_NO_WARN) && |
512 | printk_ratelimit()) | |
4dfa9c47 AB |
513 | dev_info(dev, "iommu_alloc failed, tbl %p " |
514 | "vaddr %lx npages %lu\n", tbl, vaddr, | |
515 | npages); | |
1da177e4 LT |
516 | goto failure; |
517 | } | |
518 | ||
519 | /* Convert entry to a dma_addr_t */ | |
520 | entry += tbl->it_offset; | |
d0847757 AP |
521 | dma_addr = entry << tbl->it_page_shift; |
522 | dma_addr |= (s->offset & ~IOMMU_PAGE_MASK(tbl)); | |
1da177e4 | 523 | |
5d2efba6 | 524 | DBG(" - %lu pages, entry: %lx, dma_addr: %lx\n", |
1da177e4 LT |
525 | npages, entry, dma_addr); |
526 | ||
527 | /* Insert into HW table */ | |
da004c36 | 528 | build_fail = tbl->it_ops->set(tbl, entry, npages, |
d0847757 AP |
529 | vaddr & IOMMU_PAGE_MASK(tbl), |
530 | direction, attrs); | |
6490c490 RJ |
531 | if(unlikely(build_fail)) |
532 | goto failure; | |
1da177e4 LT |
533 | |
534 | /* If we are in an open segment, try merging */ | |
535 | if (segstart != s) { | |
536 | DBG(" - trying merge...\n"); | |
537 | /* We cannot merge if: | |
538 | * - allocated dma_addr isn't contiguous to previous allocation | |
539 | */ | |
740c3ce6 FT |
540 | if (novmerge || (dma_addr != dma_next) || |
541 | (outs->dma_length + s->length > max_seg_size)) { | |
1da177e4 LT |
542 | /* Can't merge: create a new segment */ |
543 | segstart = s; | |
78bdc310 JA |
544 | outcount++; |
545 | outs = sg_next(outs); | |
1da177e4 LT |
546 | DBG(" can't merge, new segment.\n"); |
547 | } else { | |
548 | outs->dma_length += s->length; | |
5d2efba6 | 549 | DBG(" merged, new len: %ux\n", outs->dma_length); |
1da177e4 LT |
550 | } |
551 | } | |
552 | ||
553 | if (segstart == s) { | |
554 | /* This is a new segment, fill entries */ | |
555 | DBG(" - filling new segment.\n"); | |
556 | outs->dma_address = dma_addr; | |
557 | outs->dma_length = slen; | |
558 | } | |
559 | ||
560 | /* Calculate next page pointer for contiguous check */ | |
561 | dma_next = dma_addr + slen; | |
562 | ||
563 | DBG(" - dma next is: %lx\n", dma_next); | |
564 | } | |
565 | ||
566 | /* Flush/invalidate TLB caches if necessary */ | |
da004c36 AK |
567 | if (tbl->it_ops->flush) |
568 | tbl->it_ops->flush(tbl); | |
1da177e4 | 569 | |
1da177e4 LT |
570 | DBG("mapped %d elements:\n", outcount); |
571 | ||
0690cbd2 | 572 | /* For the sake of ppc_iommu_unmap_sg, we clear out the length in the |
1da177e4 LT |
573 | * next entry of the sglist if we didn't fill the list completely |
574 | */ | |
ac9af7cb | 575 | if (outcount < incount) { |
78bdc310 | 576 | outs = sg_next(outs); |
1da177e4 LT |
577 | outs->dma_length = 0; |
578 | } | |
a958a264 JM |
579 | |
580 | /* Make sure updates are seen by hardware */ | |
581 | mb(); | |
582 | ||
1da177e4 LT |
583 | return outcount; |
584 | ||
585 | failure: | |
78bdc310 | 586 | for_each_sg(sglist, s, nelems, i) { |
1da177e4 LT |
587 | if (s->dma_length != 0) { |
588 | unsigned long vaddr, npages; | |
589 | ||
d0847757 | 590 | vaddr = s->dma_address & IOMMU_PAGE_MASK(tbl); |
2994a3b2 | 591 | npages = iommu_num_pages(s->dma_address, s->dma_length, |
d0847757 | 592 | IOMMU_PAGE_SIZE(tbl)); |
d3622137 | 593 | __iommu_free(tbl, vaddr, npages); |
a958a264 | 594 | s->dma_length = 0; |
1da177e4 | 595 | } |
78bdc310 JA |
596 | if (s == outs) |
597 | break; | |
1da177e4 | 598 | } |
c4e0e892 | 599 | return -EIO; |
1da177e4 LT |
600 | } |
601 | ||
602 | ||
0690cbd2 JR |
603 | void ppc_iommu_unmap_sg(struct iommu_table *tbl, struct scatterlist *sglist, |
604 | int nelems, enum dma_data_direction direction, | |
00085f1e | 605 | unsigned long attrs) |
1da177e4 | 606 | { |
78bdc310 | 607 | struct scatterlist *sg; |
1da177e4 LT |
608 | |
609 | BUG_ON(direction == DMA_NONE); | |
610 | ||
611 | if (!tbl) | |
612 | return; | |
613 | ||
78bdc310 | 614 | sg = sglist; |
1da177e4 LT |
615 | while (nelems--) { |
616 | unsigned int npages; | |
78bdc310 | 617 | dma_addr_t dma_handle = sg->dma_address; |
1da177e4 | 618 | |
78bdc310 | 619 | if (sg->dma_length == 0) |
1da177e4 | 620 | break; |
2994a3b2 | 621 | npages = iommu_num_pages(dma_handle, sg->dma_length, |
d0847757 | 622 | IOMMU_PAGE_SIZE(tbl)); |
d3622137 | 623 | __iommu_free(tbl, dma_handle, npages); |
78bdc310 | 624 | sg = sg_next(sg); |
1da177e4 LT |
625 | } |
626 | ||
627 | /* Flush/invalidate TLBs if necessary. As for iommu_free(), we | |
628 | * do not do an mb() here, the affected platforms do not need it | |
629 | * when freeing. | |
630 | */ | |
da004c36 AK |
631 | if (tbl->it_ops->flush) |
632 | tbl->it_ops->flush(tbl); | |
1da177e4 LT |
633 | } |
634 | ||
54622f10 MK |
635 | static void iommu_table_clear(struct iommu_table *tbl) |
636 | { | |
3ccc00a7 MS |
637 | /* |
638 | * In case of firmware assisted dump system goes through clean | |
639 | * reboot process at the time of system crash. Hence it's safe to | |
640 | * clear the TCE entries if firmware assisted dump is active. | |
641 | */ | |
642 | if (!is_kdump_kernel() || is_fadump_active()) { | |
54622f10 | 643 | /* Clear the table in case firmware left allocations in it */ |
da004c36 | 644 | tbl->it_ops->clear(tbl, tbl->it_offset, tbl->it_size); |
54622f10 MK |
645 | return; |
646 | } | |
647 | ||
648 | #ifdef CONFIG_CRASH_DUMP | |
da004c36 | 649 | if (tbl->it_ops->get) { |
54622f10 MK |
650 | unsigned long index, tceval, tcecount = 0; |
651 | ||
652 | /* Reserve the existing mappings left by the first kernel. */ | |
653 | for (index = 0; index < tbl->it_size; index++) { | |
da004c36 | 654 | tceval = tbl->it_ops->get(tbl, index + tbl->it_offset); |
54622f10 MK |
655 | /* |
656 | * Freed TCE entry contains 0x7fffffffffffffff on JS20 | |
657 | */ | |
658 | if (tceval && (tceval != 0x7fffffffffffffffUL)) { | |
659 | __set_bit(index, tbl->it_map); | |
660 | tcecount++; | |
661 | } | |
662 | } | |
663 | ||
664 | if ((tbl->it_size - tcecount) < KDUMP_MIN_TCE_ENTRIES) { | |
665 | printk(KERN_WARNING "TCE table is full; freeing "); | |
666 | printk(KERN_WARNING "%d entries for the kdump boot\n", | |
667 | KDUMP_MIN_TCE_ENTRIES); | |
668 | for (index = tbl->it_size - KDUMP_MIN_TCE_ENTRIES; | |
669 | index < tbl->it_size; index++) | |
670 | __clear_bit(index, tbl->it_map); | |
671 | } | |
672 | } | |
673 | #endif | |
674 | } | |
675 | ||
201ed7f3 AK |
676 | static void iommu_table_reserve_pages(struct iommu_table *tbl, |
677 | unsigned long res_start, unsigned long res_end) | |
678 | { | |
679 | int i; | |
680 | ||
681 | WARN_ON_ONCE(res_end < res_start); | |
682 | /* | |
683 | * Reserve page 0 so it will not be used for any mappings. | |
684 | * This avoids buggy drivers that consider page 0 to be invalid | |
685 | * to crash the machine or even lose data. | |
686 | */ | |
687 | if (tbl->it_offset == 0) | |
688 | set_bit(0, tbl->it_map); | |
689 | ||
3c33066a LB |
690 | if (res_start < tbl->it_offset) |
691 | res_start = tbl->it_offset; | |
201ed7f3 | 692 | |
3c33066a LB |
693 | if (res_end > (tbl->it_offset + tbl->it_size)) |
694 | res_end = tbl->it_offset + tbl->it_size; | |
201ed7f3 | 695 | |
3c33066a LB |
696 | /* Check if res_start..res_end is a valid range in the table */ |
697 | if (res_start >= res_end) { | |
698 | tbl->it_reserved_start = tbl->it_offset; | |
699 | tbl->it_reserved_end = tbl->it_offset; | |
700 | return; | |
701 | } | |
201ed7f3 | 702 | |
3c33066a LB |
703 | tbl->it_reserved_start = res_start; |
704 | tbl->it_reserved_end = res_end; | |
201ed7f3 AK |
705 | |
706 | for (i = tbl->it_reserved_start; i < tbl->it_reserved_end; ++i) | |
3c33066a | 707 | set_bit(i - tbl->it_offset, tbl->it_map); |
201ed7f3 AK |
708 | } |
709 | ||
1da177e4 LT |
710 | /* |
711 | * Build a iommu_table structure. This contains a bit map which | |
712 | * is used to manage allocation of the tce space. | |
713 | */ | |
201ed7f3 AK |
714 | struct iommu_table *iommu_init_table(struct iommu_table *tbl, int nid, |
715 | unsigned long res_start, unsigned long res_end) | |
1da177e4 LT |
716 | { |
717 | unsigned long sz; | |
718 | static int welcomed = 0; | |
b4c3a872 AB |
719 | unsigned int i; |
720 | struct iommu_pool *p; | |
1da177e4 | 721 | |
da004c36 AK |
722 | BUG_ON(!tbl->it_ops); |
723 | ||
1da177e4 | 724 | /* number of bytes needed for the bitmap */ |
c5a0809a | 725 | sz = BITS_TO_LONGS(tbl->it_size) * sizeof(unsigned long); |
1da177e4 | 726 | |
7f1fa82d | 727 | tbl->it_map = vzalloc_node(sz, nid); |
4be518d8 AK |
728 | if (!tbl->it_map) { |
729 | pr_err("%s: Can't allocate %ld bytes\n", __func__, sz); | |
730 | return NULL; | |
731 | } | |
1da177e4 | 732 | |
201ed7f3 | 733 | iommu_table_reserve_pages(tbl, res_start, res_end); |
d12b524f | 734 | |
b4c3a872 | 735 | /* We only split the IOMMU table if we have 1GB or more of space */ |
d0847757 | 736 | if ((tbl->it_size << tbl->it_page_shift) >= (1UL * 1024 * 1024 * 1024)) |
b4c3a872 AB |
737 | tbl->nr_pools = IOMMU_NR_POOLS; |
738 | else | |
739 | tbl->nr_pools = 1; | |
740 | ||
741 | /* We reserve the top 1/4 of the table for large allocations */ | |
dcd261ba | 742 | tbl->poolsize = (tbl->it_size * 3 / 4) / tbl->nr_pools; |
b4c3a872 | 743 | |
dcd261ba | 744 | for (i = 0; i < tbl->nr_pools; i++) { |
b4c3a872 AB |
745 | p = &tbl->pools[i]; |
746 | spin_lock_init(&(p->lock)); | |
747 | p->start = tbl->poolsize * i; | |
748 | p->hint = p->start; | |
749 | p->end = p->start + tbl->poolsize; | |
750 | } | |
751 | ||
752 | p = &tbl->large_pool; | |
753 | spin_lock_init(&(p->lock)); | |
754 | p->start = tbl->poolsize * i; | |
755 | p->hint = p->start; | |
756 | p->end = tbl->it_size; | |
1da177e4 | 757 | |
54622f10 | 758 | iommu_table_clear(tbl); |
d3588ba9 | 759 | |
1da177e4 LT |
760 | if (!welcomed) { |
761 | printk(KERN_INFO "IOMMU table initialized, virtual merging %s\n", | |
762 | novmerge ? "disabled" : "enabled"); | |
763 | welcomed = 1; | |
764 | } | |
765 | ||
691602aa AK |
766 | iommu_debugfs_add(tbl); |
767 | ||
1da177e4 LT |
768 | return tbl; |
769 | } | |
770 | ||
3c33066a LB |
771 | bool iommu_table_in_use(struct iommu_table *tbl) |
772 | { | |
773 | unsigned long start = 0, end; | |
774 | ||
775 | /* ignore reserved bit0 */ | |
776 | if (tbl->it_offset == 0) | |
777 | start = 1; | |
d80f6de9 AK |
778 | |
779 | /* Simple case with no reserved MMIO32 region */ | |
780 | if (!tbl->it_reserved_start && !tbl->it_reserved_end) | |
781 | return find_next_bit(tbl->it_map, tbl->it_size, start) != tbl->it_size; | |
782 | ||
3c33066a LB |
783 | end = tbl->it_reserved_start - tbl->it_offset; |
784 | if (find_next_bit(tbl->it_map, end, start) != end) | |
785 | return true; | |
786 | ||
787 | start = tbl->it_reserved_end - tbl->it_offset; | |
788 | end = tbl->it_size; | |
789 | return find_next_bit(tbl->it_map, end, start) != end; | |
790 | } | |
791 | ||
e5afdf9d | 792 | static void iommu_table_free(struct kref *kref) |
1da177e4 | 793 | { |
e5afdf9d | 794 | struct iommu_table *tbl; |
1da177e4 | 795 | |
e5afdf9d | 796 | tbl = container_of(kref, struct iommu_table, it_kref); |
8aca92d8 | 797 | |
11edf116 AK |
798 | if (tbl->it_ops->free) |
799 | tbl->it_ops->free(tbl); | |
800 | ||
8aca92d8 AK |
801 | if (!tbl->it_map) { |
802 | kfree(tbl); | |
1da177e4 LT |
803 | return; |
804 | } | |
805 | ||
691602aa AK |
806 | iommu_debugfs_del(tbl); |
807 | ||
1da177e4 | 808 | /* verify that table contains no entries */ |
3c33066a | 809 | if (iommu_table_in_use(tbl)) |
e5afdf9d | 810 | pr_warn("%s: Unexpected TCEs\n", __func__); |
1da177e4 | 811 | |
1da177e4 | 812 | /* free bitmap */ |
7f1fa82d | 813 | vfree(tbl->it_map); |
1da177e4 LT |
814 | |
815 | /* free table */ | |
816 | kfree(tbl); | |
817 | } | |
e5afdf9d AK |
818 | |
819 | struct iommu_table *iommu_tce_table_get(struct iommu_table *tbl) | |
820 | { | |
821 | if (kref_get_unless_zero(&tbl->it_kref)) | |
822 | return tbl; | |
823 | ||
824 | return NULL; | |
825 | } | |
826 | EXPORT_SYMBOL_GPL(iommu_tce_table_get); | |
827 | ||
828 | int iommu_tce_table_put(struct iommu_table *tbl) | |
829 | { | |
830 | if (WARN_ON(!tbl)) | |
831 | return 0; | |
832 | ||
833 | return kref_put(&tbl->it_kref, iommu_table_free); | |
834 | } | |
835 | EXPORT_SYMBOL_GPL(iommu_tce_table_put); | |
1da177e4 LT |
836 | |
837 | /* Creates TCEs for a user provided buffer. The user buffer must be | |
f9226d57 MN |
838 | * contiguous real kernel storage (not vmalloc). The address passed here |
839 | * comprises a page address and offset into that page. The dma_addr_t | |
840 | * returned will point to the same byte within the page as was passed in. | |
1da177e4 | 841 | */ |
f9226d57 MN |
842 | dma_addr_t iommu_map_page(struct device *dev, struct iommu_table *tbl, |
843 | struct page *page, unsigned long offset, size_t size, | |
844 | unsigned long mask, enum dma_data_direction direction, | |
00085f1e | 845 | unsigned long attrs) |
1da177e4 | 846 | { |
d11e3d3d | 847 | dma_addr_t dma_handle = DMA_MAPPING_ERROR; |
f9226d57 | 848 | void *vaddr; |
1da177e4 | 849 | unsigned long uaddr; |
d262c32a | 850 | unsigned int npages, align; |
1da177e4 LT |
851 | |
852 | BUG_ON(direction == DMA_NONE); | |
853 | ||
f9226d57 | 854 | vaddr = page_address(page) + offset; |
1da177e4 | 855 | uaddr = (unsigned long)vaddr; |
1da177e4 LT |
856 | |
857 | if (tbl) { | |
984ecdd6 | 858 | npages = iommu_num_pages(uaddr, size, IOMMU_PAGE_SIZE(tbl)); |
d262c32a | 859 | align = 0; |
d0847757 | 860 | if (tbl->it_page_shift < PAGE_SHIFT && size >= PAGE_SIZE && |
d262c32a | 861 | ((unsigned long)vaddr & ~PAGE_MASK) == 0) |
d0847757 | 862 | align = PAGE_SHIFT - tbl->it_page_shift; |
d262c32a | 863 | |
fb3475e9 | 864 | dma_handle = iommu_alloc(dev, tbl, vaddr, npages, direction, |
d0847757 | 865 | mask >> tbl->it_page_shift, align, |
4f3dd8a0 | 866 | attrs); |
d11e3d3d | 867 | if (dma_handle == DMA_MAPPING_ERROR) { |
af8a2498 MFO |
868 | if (!(attrs & DMA_ATTR_NO_WARN) && |
869 | printk_ratelimit()) { | |
4dfa9c47 AB |
870 | dev_info(dev, "iommu_alloc failed, tbl %p " |
871 | "vaddr %p npages %d\n", tbl, vaddr, | |
872 | npages); | |
1da177e4 LT |
873 | } |
874 | } else | |
d0847757 | 875 | dma_handle |= (uaddr & ~IOMMU_PAGE_MASK(tbl)); |
1da177e4 LT |
876 | } |
877 | ||
878 | return dma_handle; | |
879 | } | |
880 | ||
f9226d57 MN |
881 | void iommu_unmap_page(struct iommu_table *tbl, dma_addr_t dma_handle, |
882 | size_t size, enum dma_data_direction direction, | |
00085f1e | 883 | unsigned long attrs) |
1da177e4 | 884 | { |
5d2efba6 LV |
885 | unsigned int npages; |
886 | ||
1da177e4 LT |
887 | BUG_ON(direction == DMA_NONE); |
888 | ||
5d2efba6 | 889 | if (tbl) { |
d0847757 AP |
890 | npages = iommu_num_pages(dma_handle, size, |
891 | IOMMU_PAGE_SIZE(tbl)); | |
5d2efba6 LV |
892 | iommu_free(tbl, dma_handle, npages); |
893 | } | |
1da177e4 LT |
894 | } |
895 | ||
896 | /* Allocates a contiguous real buffer and creates mappings over it. | |
897 | * Returns the virtual address of the buffer and sets dma_handle | |
898 | * to the dma address (mapping) of the first page. | |
899 | */ | |
fb3475e9 FT |
900 | void *iommu_alloc_coherent(struct device *dev, struct iommu_table *tbl, |
901 | size_t size, dma_addr_t *dma_handle, | |
902 | unsigned long mask, gfp_t flag, int node) | |
1da177e4 LT |
903 | { |
904 | void *ret = NULL; | |
905 | dma_addr_t mapping; | |
5d2efba6 LV |
906 | unsigned int order; |
907 | unsigned int nio_pages, io_order; | |
8eb6c6e3 | 908 | struct page *page; |
1da177e4 LT |
909 | |
910 | size = PAGE_ALIGN(size); | |
1da177e4 LT |
911 | order = get_order(size); |
912 | ||
913 | /* | |
914 | * Client asked for way too much space. This is checked later | |
915 | * anyway. It is easier to debug here for the drivers than in | |
916 | * the tce tables. | |
917 | */ | |
918 | if (order >= IOMAP_MAX_ORDER) { | |
4dfa9c47 AB |
919 | dev_info(dev, "iommu_alloc_consistent size too large: 0x%lx\n", |
920 | size); | |
1da177e4 LT |
921 | return NULL; |
922 | } | |
923 | ||
924 | if (!tbl) | |
925 | return NULL; | |
926 | ||
927 | /* Alloc enough pages (and possibly more) */ | |
05061354 | 928 | page = alloc_pages_node(node, flag, order); |
8eb6c6e3 | 929 | if (!page) |
1da177e4 | 930 | return NULL; |
8eb6c6e3 | 931 | ret = page_address(page); |
1da177e4 LT |
932 | memset(ret, 0, size); |
933 | ||
934 | /* Set up tces to cover the allocated range */ | |
59cc84c8 FB |
935 | nio_pages = size >> tbl->it_page_shift; |
936 | io_order = get_iommu_order(size, tbl); | |
fb3475e9 | 937 | mapping = iommu_alloc(dev, tbl, ret, nio_pages, DMA_BIDIRECTIONAL, |
00085f1e | 938 | mask >> tbl->it_page_shift, io_order, 0); |
d11e3d3d | 939 | if (mapping == DMA_MAPPING_ERROR) { |
1da177e4 | 940 | free_pages((unsigned long)ret, order); |
8eb6c6e3 CH |
941 | return NULL; |
942 | } | |
943 | *dma_handle = mapping; | |
1da177e4 LT |
944 | return ret; |
945 | } | |
946 | ||
947 | void iommu_free_coherent(struct iommu_table *tbl, size_t size, | |
948 | void *vaddr, dma_addr_t dma_handle) | |
949 | { | |
1da177e4 | 950 | if (tbl) { |
59cc84c8 | 951 | unsigned int nio_pages; |
5d2efba6 | 952 | |
59cc84c8 FB |
953 | size = PAGE_ALIGN(size); |
954 | nio_pages = size >> tbl->it_page_shift; | |
5d2efba6 | 955 | iommu_free(tbl, dma_handle, nio_pages); |
1da177e4 | 956 | size = PAGE_ALIGN(size); |
1da177e4 LT |
957 | free_pages((unsigned long)vaddr, get_order(size)); |
958 | } | |
959 | } | |
4e13c1ac | 960 | |
10b35b2b AK |
961 | unsigned long iommu_direction_to_tce_perm(enum dma_data_direction dir) |
962 | { | |
963 | switch (dir) { | |
964 | case DMA_BIDIRECTIONAL: | |
965 | return TCE_PCI_READ | TCE_PCI_WRITE; | |
966 | case DMA_FROM_DEVICE: | |
967 | return TCE_PCI_WRITE; | |
968 | case DMA_TO_DEVICE: | |
969 | return TCE_PCI_READ; | |
970 | default: | |
971 | return 0; | |
972 | } | |
973 | } | |
974 | EXPORT_SYMBOL_GPL(iommu_direction_to_tce_perm); | |
975 | ||
4e13c1ac AK |
976 | #ifdef CONFIG_IOMMU_API |
977 | /* | |
978 | * SPAPR TCE API | |
979 | */ | |
980 | static void group_release(void *iommu_data) | |
981 | { | |
b348aa65 AK |
982 | struct iommu_table_group *table_group = iommu_data; |
983 | ||
984 | table_group->group = NULL; | |
4e13c1ac AK |
985 | } |
986 | ||
b348aa65 | 987 | void iommu_register_group(struct iommu_table_group *table_group, |
4e13c1ac AK |
988 | int pci_domain_number, unsigned long pe_num) |
989 | { | |
990 | struct iommu_group *grp; | |
991 | char *name; | |
992 | ||
993 | grp = iommu_group_alloc(); | |
994 | if (IS_ERR(grp)) { | |
995 | pr_warn("powerpc iommu api: cannot create new group, err=%ld\n", | |
996 | PTR_ERR(grp)); | |
997 | return; | |
998 | } | |
b348aa65 AK |
999 | table_group->group = grp; |
1000 | iommu_group_set_iommudata(grp, table_group, group_release); | |
4e13c1ac AK |
1001 | name = kasprintf(GFP_KERNEL, "domain%d-pe%lx", |
1002 | pci_domain_number, pe_num); | |
1003 | if (!name) | |
1004 | return; | |
1005 | iommu_group_set_name(grp, name); | |
1006 | kfree(name); | |
1007 | } | |
1008 | ||
1009 | enum dma_data_direction iommu_tce_direction(unsigned long tce) | |
1010 | { | |
1011 | if ((tce & TCE_PCI_READ) && (tce & TCE_PCI_WRITE)) | |
1012 | return DMA_BIDIRECTIONAL; | |
1013 | else if (tce & TCE_PCI_READ) | |
1014 | return DMA_TO_DEVICE; | |
1015 | else if (tce & TCE_PCI_WRITE) | |
1016 | return DMA_FROM_DEVICE; | |
1017 | else | |
1018 | return DMA_NONE; | |
1019 | } | |
1020 | EXPORT_SYMBOL_GPL(iommu_tce_direction); | |
1021 | ||
1022 | void iommu_flush_tce(struct iommu_table *tbl) | |
1023 | { | |
1024 | /* Flush/invalidate TLB caches if necessary */ | |
da004c36 AK |
1025 | if (tbl->it_ops->flush) |
1026 | tbl->it_ops->flush(tbl); | |
4e13c1ac AK |
1027 | |
1028 | /* Make sure updates are seen by hardware */ | |
1029 | mb(); | |
1030 | } | |
1031 | EXPORT_SYMBOL_GPL(iommu_flush_tce); | |
1032 | ||
b1af23d8 AK |
1033 | int iommu_tce_check_ioba(unsigned long page_shift, |
1034 | unsigned long offset, unsigned long size, | |
1035 | unsigned long ioba, unsigned long npages) | |
4e13c1ac | 1036 | { |
b1af23d8 | 1037 | unsigned long mask = (1UL << page_shift) - 1; |
4e13c1ac | 1038 | |
b1af23d8 | 1039 | if (ioba & mask) |
4e13c1ac AK |
1040 | return -EINVAL; |
1041 | ||
b1af23d8 AK |
1042 | ioba >>= page_shift; |
1043 | if (ioba < offset) | |
4e13c1ac AK |
1044 | return -EINVAL; |
1045 | ||
b1af23d8 | 1046 | if ((ioba + 1) > (offset + size)) |
4e13c1ac AK |
1047 | return -EINVAL; |
1048 | ||
1049 | return 0; | |
1050 | } | |
b1af23d8 | 1051 | EXPORT_SYMBOL_GPL(iommu_tce_check_ioba); |
4e13c1ac | 1052 | |
b1af23d8 | 1053 | int iommu_tce_check_gpa(unsigned long page_shift, unsigned long gpa) |
4e13c1ac | 1054 | { |
b1af23d8 | 1055 | unsigned long mask = (1UL << page_shift) - 1; |
4e13c1ac | 1056 | |
b1af23d8 | 1057 | if (gpa & mask) |
4e13c1ac AK |
1058 | return -EINVAL; |
1059 | ||
1060 | return 0; | |
1061 | } | |
b1af23d8 | 1062 | EXPORT_SYMBOL_GPL(iommu_tce_check_gpa); |
4e13c1ac | 1063 | |
35872480 AK |
1064 | extern long iommu_tce_xchg_no_kill(struct mm_struct *mm, |
1065 | struct iommu_table *tbl, | |
c10c21ef AK |
1066 | unsigned long entry, unsigned long *hpa, |
1067 | enum dma_data_direction *direction) | |
4e13c1ac | 1068 | { |
05c6cfb9 | 1069 | long ret; |
c10c21ef | 1070 | unsigned long size = 0; |
4e13c1ac | 1071 | |
cad32d9d | 1072 | ret = tbl->it_ops->xchg_no_kill(tbl, entry, hpa, direction); |
05c6cfb9 | 1073 | if (!ret && ((*direction == DMA_FROM_DEVICE) || |
c10c21ef AK |
1074 | (*direction == DMA_BIDIRECTIONAL)) && |
1075 | !mm_iommu_is_devmem(mm, *hpa, tbl->it_page_shift, | |
1076 | &size)) | |
05c6cfb9 | 1077 | SetPageDirty(pfn_to_page(*hpa >> PAGE_SHIFT)); |
4e13c1ac | 1078 | |
4e13c1ac AK |
1079 | return ret; |
1080 | } | |
35872480 AK |
1081 | EXPORT_SYMBOL_GPL(iommu_tce_xchg_no_kill); |
1082 | ||
1083 | void iommu_tce_kill(struct iommu_table *tbl, | |
1084 | unsigned long entry, unsigned long pages) | |
1085 | { | |
1086 | if (tbl->it_ops->tce_kill) | |
cad32d9d | 1087 | tbl->it_ops->tce_kill(tbl, entry, pages); |
35872480 AK |
1088 | } |
1089 | EXPORT_SYMBOL_GPL(iommu_tce_kill); | |
4e13c1ac | 1090 | |
4e13c1ac AK |
1091 | int iommu_take_ownership(struct iommu_table *tbl) |
1092 | { | |
b82c75bf AK |
1093 | unsigned long flags, i, sz = (tbl->it_size + 7) >> 3; |
1094 | int ret = 0; | |
1095 | ||
05c6cfb9 AK |
1096 | /* |
1097 | * VFIO does not control TCE entries allocation and the guest | |
1098 | * can write new TCEs on top of existing ones so iommu_tce_build() | |
1099 | * must be able to release old pages. This functionality | |
1100 | * requires exchange() callback defined so if it is not | |
1101 | * implemented, we disallow taking ownership over the table. | |
1102 | */ | |
a102f139 | 1103 | if (!tbl->it_ops->xchg_no_kill) |
05c6cfb9 AK |
1104 | return -EINVAL; |
1105 | ||
b82c75bf AK |
1106 | spin_lock_irqsave(&tbl->large_pool.lock, flags); |
1107 | for (i = 0; i < tbl->nr_pools; i++) | |
cc7130bf | 1108 | spin_lock_nest_lock(&tbl->pools[i].lock, &tbl->large_pool.lock); |
4e13c1ac | 1109 | |
3c33066a | 1110 | if (iommu_table_in_use(tbl)) { |
4e13c1ac | 1111 | pr_err("iommu_tce: it_map is not empty"); |
b82c75bf | 1112 | ret = -EBUSY; |
b82c75bf AK |
1113 | } else { |
1114 | memset(tbl->it_map, 0xff, sz); | |
4e13c1ac AK |
1115 | } |
1116 | ||
b82c75bf AK |
1117 | for (i = 0; i < tbl->nr_pools; i++) |
1118 | spin_unlock(&tbl->pools[i].lock); | |
1119 | spin_unlock_irqrestore(&tbl->large_pool.lock, flags); | |
4e13c1ac | 1120 | |
b82c75bf | 1121 | return ret; |
4e13c1ac AK |
1122 | } |
1123 | EXPORT_SYMBOL_GPL(iommu_take_ownership); | |
1124 | ||
1125 | void iommu_release_ownership(struct iommu_table *tbl) | |
1126 | { | |
b82c75bf AK |
1127 | unsigned long flags, i, sz = (tbl->it_size + 7) >> 3; |
1128 | ||
1129 | spin_lock_irqsave(&tbl->large_pool.lock, flags); | |
1130 | for (i = 0; i < tbl->nr_pools; i++) | |
cc7130bf | 1131 | spin_lock_nest_lock(&tbl->pools[i].lock, &tbl->large_pool.lock); |
4e13c1ac | 1132 | |
4e13c1ac AK |
1133 | memset(tbl->it_map, 0, sz); |
1134 | ||
201ed7f3 AK |
1135 | iommu_table_reserve_pages(tbl, tbl->it_reserved_start, |
1136 | tbl->it_reserved_end); | |
b82c75bf AK |
1137 | |
1138 | for (i = 0; i < tbl->nr_pools; i++) | |
1139 | spin_unlock(&tbl->pools[i].lock); | |
1140 | spin_unlock_irqrestore(&tbl->large_pool.lock, flags); | |
4e13c1ac AK |
1141 | } |
1142 | EXPORT_SYMBOL_GPL(iommu_release_ownership); | |
1143 | ||
c4e9d3c1 | 1144 | int iommu_add_device(struct iommu_table_group *table_group, struct device *dev) |
4e13c1ac | 1145 | { |
763fe0ad GS |
1146 | /* |
1147 | * The sysfs entries should be populated before | |
1148 | * binding IOMMU group. If sysfs entries isn't | |
1149 | * ready, we simply bail. | |
1150 | */ | |
1151 | if (!device_is_registered(dev)) | |
1152 | return -ENOENT; | |
1153 | ||
bf8763d8 | 1154 | if (device_iommu_mapped(dev)) { |
763fe0ad GS |
1155 | pr_debug("%s: Skipping device %s with iommu group %d\n", |
1156 | __func__, dev_name(dev), | |
1157 | iommu_group_id(dev->iommu_group)); | |
4e13c1ac AK |
1158 | return -EBUSY; |
1159 | } | |
1160 | ||
763fe0ad | 1161 | pr_debug("%s: Adding %s to iommu group %d\n", |
c4e9d3c1 | 1162 | __func__, dev_name(dev), iommu_group_id(table_group->group)); |
d0847757 | 1163 | |
c4e9d3c1 | 1164 | return iommu_group_add_device(table_group->group, dev); |
4e13c1ac | 1165 | } |
d905c5df | 1166 | EXPORT_SYMBOL_GPL(iommu_add_device); |
4e13c1ac | 1167 | |
d905c5df | 1168 | void iommu_del_device(struct device *dev) |
4e13c1ac | 1169 | { |
0c4b9e27 GS |
1170 | /* |
1171 | * Some devices might not have IOMMU table and group | |
1172 | * and we needn't detach them from the associated | |
1173 | * IOMMU groups | |
1174 | */ | |
bf8763d8 | 1175 | if (!device_iommu_mapped(dev)) { |
0c4b9e27 GS |
1176 | pr_debug("iommu_tce: skipping device %s with no tbl\n", |
1177 | dev_name(dev)); | |
1178 | return; | |
1179 | } | |
1180 | ||
4e13c1ac AK |
1181 | iommu_group_remove_device(dev); |
1182 | } | |
d905c5df | 1183 | EXPORT_SYMBOL_GPL(iommu_del_device); |
4e13c1ac | 1184 | #endif /* CONFIG_IOMMU_API */ |