vfio/spapr: Add a helper to create default DMA window
[linux-block.git] / drivers / vfio / vfio_iommu_spapr_tce.c
CommitLineData
5ffd229c
AK
1/*
2 * VFIO: IOMMU DMA mapping support for TCE on POWER
3 *
4 * Copyright (C) 2013 IBM Corp. All rights reserved.
5 * Author: Alexey Kardashevskiy <aik@ozlabs.ru>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 *
11 * Derived from original vfio_iommu_type1.c:
12 * Copyright (C) 2012 Red Hat, Inc. All rights reserved.
13 * Author: Alex Williamson <alex.williamson@redhat.com>
14 */
15
16#include <linux/module.h>
17#include <linux/pci.h>
18#include <linux/slab.h>
19#include <linux/uaccess.h>
20#include <linux/err.h>
21#include <linux/vfio.h>
2157e7b8 22#include <linux/vmalloc.h>
5ffd229c
AK
23#include <asm/iommu.h>
24#include <asm/tce.h>
2157e7b8 25#include <asm/mmu_context.h>
5ffd229c
AK
26
27#define DRIVER_VERSION "0.1"
28#define DRIVER_AUTHOR "aik@ozlabs.ru"
29#define DRIVER_DESC "VFIO IOMMU SPAPR TCE"
30
31static void tce_iommu_detach_group(void *iommu_data,
32 struct iommu_group *iommu_group);
33
2d270df8
AK
34static long try_increment_locked_vm(long npages)
35{
36 long ret = 0, locked, lock_limit;
37
38 if (!current || !current->mm)
39 return -ESRCH; /* process exited */
40
41 if (!npages)
42 return 0;
43
44 down_write(&current->mm->mmap_sem);
45 locked = current->mm->locked_vm + npages;
46 lock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
47 if (locked > lock_limit && !capable(CAP_IPC_LOCK))
48 ret = -ENOMEM;
49 else
50 current->mm->locked_vm += npages;
51
52 pr_debug("[%d] RLIMIT_MEMLOCK +%ld %ld/%ld%s\n", current->pid,
53 npages << PAGE_SHIFT,
54 current->mm->locked_vm << PAGE_SHIFT,
55 rlimit(RLIMIT_MEMLOCK),
56 ret ? " - exceeded" : "");
57
58 up_write(&current->mm->mmap_sem);
59
60 return ret;
61}
62
63static void decrement_locked_vm(long npages)
64{
65 if (!current || !current->mm || !npages)
66 return; /* process exited */
67
68 down_write(&current->mm->mmap_sem);
69 if (WARN_ON_ONCE(npages > current->mm->locked_vm))
70 npages = current->mm->locked_vm;
71 current->mm->locked_vm -= npages;
72 pr_debug("[%d] RLIMIT_MEMLOCK -%ld %ld/%ld\n", current->pid,
73 npages << PAGE_SHIFT,
74 current->mm->locked_vm << PAGE_SHIFT,
75 rlimit(RLIMIT_MEMLOCK));
76 up_write(&current->mm->mmap_sem);
77}
78
5ffd229c
AK
79/*
80 * VFIO IOMMU fd for SPAPR_TCE IOMMU implementation
81 *
82 * This code handles mapping and unmapping of user data buffers
83 * into DMA'ble space using the IOMMU
84 */
85
2157e7b8
AK
86struct tce_iommu_group {
87 struct list_head next;
88 struct iommu_group *grp;
89};
90
5ffd229c
AK
91/*
92 * The container descriptor supports only a single group per container.
93 * Required by the API as the container is not supplied with the IOMMU group
94 * at the moment of initialization.
95 */
96struct tce_container {
97 struct mutex lock;
5ffd229c 98 bool enabled;
2157e7b8 99 bool v2;
2d270df8 100 unsigned long locked_pages;
2157e7b8
AK
101 struct iommu_table *tables[IOMMU_TABLE_GROUP_MAX_TABLES];
102 struct list_head group_list;
5ffd229c
AK
103};
104
2157e7b8
AK
105static long tce_iommu_unregister_pages(struct tce_container *container,
106 __u64 vaddr, __u64 size)
107{
108 struct mm_iommu_table_group_mem_t *mem;
109
d7baee69
AK
110 if (!current || !current->mm)
111 return -ESRCH; /* process exited */
112
2157e7b8
AK
113 if ((vaddr & ~PAGE_MASK) || (size & ~PAGE_MASK))
114 return -EINVAL;
115
d7baee69 116 mem = mm_iommu_find(current->mm, vaddr, size >> PAGE_SHIFT);
2157e7b8
AK
117 if (!mem)
118 return -ENOENT;
119
d7baee69 120 return mm_iommu_put(current->mm, mem);
2157e7b8
AK
121}
122
123static long tce_iommu_register_pages(struct tce_container *container,
124 __u64 vaddr, __u64 size)
125{
126 long ret = 0;
127 struct mm_iommu_table_group_mem_t *mem = NULL;
128 unsigned long entries = size >> PAGE_SHIFT;
129
d7baee69
AK
130 if (!current || !current->mm)
131 return -ESRCH; /* process exited */
132
2157e7b8
AK
133 if ((vaddr & ~PAGE_MASK) || (size & ~PAGE_MASK) ||
134 ((vaddr + size) < vaddr))
135 return -EINVAL;
136
d7baee69 137 ret = mm_iommu_get(current->mm, vaddr, entries, &mem);
2157e7b8
AK
138 if (ret)
139 return ret;
140
141 container->enabled = true;
142
143 return 0;
144}
145
146static long tce_iommu_userspace_view_alloc(struct iommu_table *tbl)
147{
148 unsigned long cb = _ALIGN_UP(sizeof(tbl->it_userspace[0]) *
149 tbl->it_size, PAGE_SIZE);
150 unsigned long *uas;
151 long ret;
152
153 BUG_ON(tbl->it_userspace);
154
155 ret = try_increment_locked_vm(cb >> PAGE_SHIFT);
156 if (ret)
157 return ret;
158
159 uas = vzalloc(cb);
160 if (!uas) {
161 decrement_locked_vm(cb >> PAGE_SHIFT);
162 return -ENOMEM;
163 }
164 tbl->it_userspace = uas;
165
166 return 0;
167}
168
169static void tce_iommu_userspace_view_free(struct iommu_table *tbl)
170{
171 unsigned long cb = _ALIGN_UP(sizeof(tbl->it_userspace[0]) *
172 tbl->it_size, PAGE_SIZE);
173
174 if (!tbl->it_userspace)
175 return;
176
177 vfree(tbl->it_userspace);
178 tbl->it_userspace = NULL;
179 decrement_locked_vm(cb >> PAGE_SHIFT);
180}
181
e432bc7e
AK
182static bool tce_page_is_contained(struct page *page, unsigned page_shift)
183{
184 /*
185 * Check that the TCE table granularity is not bigger than the size of
186 * a page we just found. Otherwise the hardware can get access to
187 * a bigger memory chunk that it should.
188 */
189 return (PAGE_SHIFT + compound_order(compound_head(page))) >= page_shift;
190}
191
2157e7b8
AK
192static inline bool tce_groups_attached(struct tce_container *container)
193{
194 return !list_empty(&container->group_list);
195}
196
0eaf4def
AK
197static long tce_iommu_find_table(struct tce_container *container,
198 phys_addr_t ioba, struct iommu_table **ptbl)
199{
200 long i;
0eaf4def
AK
201
202 for (i = 0; i < IOMMU_TABLE_GROUP_MAX_TABLES; ++i) {
2157e7b8 203 struct iommu_table *tbl = container->tables[i];
0eaf4def
AK
204
205 if (tbl) {
206 unsigned long entry = ioba >> tbl->it_page_shift;
207 unsigned long start = tbl->it_offset;
208 unsigned long end = start + tbl->it_size;
209
210 if ((start <= entry) && (entry < end)) {
211 *ptbl = tbl;
212 return i;
213 }
214 }
215 }
216
217 return -1;
218}
219
e633bc86
AK
220static int tce_iommu_find_free_table(struct tce_container *container)
221{
222 int i;
223
224 for (i = 0; i < IOMMU_TABLE_GROUP_MAX_TABLES; ++i) {
225 if (!container->tables[i])
226 return i;
227 }
228
229 return -ENOSPC;
230}
231
5ffd229c
AK
232static int tce_iommu_enable(struct tce_container *container)
233{
234 int ret = 0;
2d270df8 235 unsigned long locked;
0eaf4def 236 struct iommu_table_group *table_group;
2157e7b8 237 struct tce_iommu_group *tcegrp;
5ffd229c
AK
238
239 if (!current->mm)
240 return -ESRCH; /* process exited */
241
242 if (container->enabled)
243 return -EBUSY;
244
245 /*
246 * When userspace pages are mapped into the IOMMU, they are effectively
247 * locked memory, so, theoretically, we need to update the accounting
248 * of locked pages on each map and unmap. For powerpc, the map unmap
249 * paths can be very hot, though, and the accounting would kill
250 * performance, especially since it would be difficult to impossible
251 * to handle the accounting in real mode only.
252 *
253 * To address that, rather than precisely accounting every page, we
254 * instead account for a worst case on locked memory when the iommu is
255 * enabled and disabled. The worst case upper bound on locked memory
256 * is the size of the whole iommu window, which is usually relatively
257 * small (compared to total memory sizes) on POWER hardware.
258 *
259 * Also we don't have a nice way to fail on H_PUT_TCE due to ulimits,
260 * that would effectively kill the guest at random points, much better
261 * enforcing the limit based on the max that the guest can map.
2d270df8
AK
262 *
263 * Unfortunately at the moment it counts whole tables, no matter how
264 * much memory the guest has. I.e. for 4GB guest and 4 IOMMU groups
265 * each with 2GB DMA window, 8GB will be counted here. The reason for
266 * this is that we cannot tell here the amount of RAM used by the guest
267 * as this information is only available from KVM and VFIO is
268 * KVM agnostic.
4793d65d
AK
269 *
270 * So we do not allow enabling a container without a group attached
271 * as there is no way to know how much we should increment
272 * the locked_vm counter.
5ffd229c 273 */
2157e7b8
AK
274 if (!tce_groups_attached(container))
275 return -ENODEV;
276
277 tcegrp = list_first_entry(&container->group_list,
278 struct tce_iommu_group, next);
279 table_group = iommu_group_get_iommudata(tcegrp->grp);
0eaf4def
AK
280 if (!table_group)
281 return -ENODEV;
282
4793d65d
AK
283 if (!table_group->tce32_size)
284 return -EPERM;
285
286 locked = table_group->tce32_size >> PAGE_SHIFT;
2d270df8
AK
287 ret = try_increment_locked_vm(locked);
288 if (ret)
289 return ret;
5ffd229c 290
2d270df8
AK
291 container->locked_pages = locked;
292
293 container->enabled = true;
5ffd229c
AK
294
295 return ret;
296}
297
298static void tce_iommu_disable(struct tce_container *container)
299{
300 if (!container->enabled)
301 return;
302
303 container->enabled = false;
304
2d270df8 305 if (!current->mm)
5ffd229c
AK
306 return;
307
2d270df8 308 decrement_locked_vm(container->locked_pages);
5ffd229c
AK
309}
310
311static void *tce_iommu_open(unsigned long arg)
312{
313 struct tce_container *container;
314
2157e7b8 315 if ((arg != VFIO_SPAPR_TCE_IOMMU) && (arg != VFIO_SPAPR_TCE_v2_IOMMU)) {
5ffd229c
AK
316 pr_err("tce_vfio: Wrong IOMMU type\n");
317 return ERR_PTR(-EINVAL);
318 }
319
320 container = kzalloc(sizeof(*container), GFP_KERNEL);
321 if (!container)
322 return ERR_PTR(-ENOMEM);
323
324 mutex_init(&container->lock);
2157e7b8
AK
325 INIT_LIST_HEAD_RCU(&container->group_list);
326
327 container->v2 = arg == VFIO_SPAPR_TCE_v2_IOMMU;
5ffd229c
AK
328
329 return container;
330}
331
2157e7b8
AK
332static int tce_iommu_clear(struct tce_container *container,
333 struct iommu_table *tbl,
334 unsigned long entry, unsigned long pages);
335static void tce_iommu_free_table(struct iommu_table *tbl);
336
5ffd229c
AK
337static void tce_iommu_release(void *iommu_data)
338{
339 struct tce_container *container = iommu_data;
2157e7b8
AK
340 struct tce_iommu_group *tcegrp;
341 long i;
5ffd229c 342
2157e7b8
AK
343 while (tce_groups_attached(container)) {
344 tcegrp = list_first_entry(&container->group_list,
345 struct tce_iommu_group, next);
2157e7b8
AK
346 tce_iommu_detach_group(iommu_data, tcegrp->grp);
347 }
5ffd229c 348
2157e7b8
AK
349 /*
350 * If VFIO created a table, it was not disposed
351 * by tce_iommu_detach_group() so do it now.
352 */
353 for (i = 0; i < IOMMU_TABLE_GROUP_MAX_TABLES; ++i) {
354 struct iommu_table *tbl = container->tables[i];
355
356 if (!tbl)
357 continue;
358
359 tce_iommu_clear(container, tbl, tbl->it_offset, tbl->it_size);
360 tce_iommu_free_table(tbl);
361 }
5ffd229c 362
649354b7 363 tce_iommu_disable(container);
5ffd229c
AK
364 mutex_destroy(&container->lock);
365
366 kfree(container);
367}
368
649354b7 369static void tce_iommu_unuse_page(struct tce_container *container,
05c6cfb9 370 unsigned long hpa)
649354b7
AK
371{
372 struct page *page;
373
05c6cfb9 374 page = pfn_to_page(hpa >> PAGE_SHIFT);
649354b7
AK
375 put_page(page);
376}
377
2157e7b8
AK
378static int tce_iommu_prereg_ua_to_hpa(unsigned long tce, unsigned long size,
379 unsigned long *phpa, struct mm_iommu_table_group_mem_t **pmem)
380{
381 long ret = 0;
382 struct mm_iommu_table_group_mem_t *mem;
383
d7baee69 384 mem = mm_iommu_lookup(current->mm, tce, size);
2157e7b8
AK
385 if (!mem)
386 return -EINVAL;
387
388 ret = mm_iommu_ua_to_hpa(mem, tce, phpa);
389 if (ret)
390 return -EINVAL;
391
392 *pmem = mem;
393
394 return 0;
395}
396
397static void tce_iommu_unuse_page_v2(struct iommu_table *tbl,
398 unsigned long entry)
399{
400 struct mm_iommu_table_group_mem_t *mem = NULL;
401 int ret;
402 unsigned long hpa = 0;
403 unsigned long *pua = IOMMU_TABLE_USERSPACE_ENTRY(tbl, entry);
404
405 if (!pua || !current || !current->mm)
406 return;
407
408 ret = tce_iommu_prereg_ua_to_hpa(*pua, IOMMU_PAGE_SIZE(tbl),
409 &hpa, &mem);
410 if (ret)
411 pr_debug("%s: tce %lx at #%lx was not cached, ret=%d\n",
412 __func__, *pua, entry, ret);
413 if (mem)
414 mm_iommu_mapped_dec(mem);
415
416 *pua = 0;
417}
418
9b14a1ff
AK
419static int tce_iommu_clear(struct tce_container *container,
420 struct iommu_table *tbl,
421 unsigned long entry, unsigned long pages)
422{
05c6cfb9
AK
423 unsigned long oldhpa;
424 long ret;
425 enum dma_data_direction direction;
9b14a1ff
AK
426
427 for ( ; pages; --pages, ++entry) {
05c6cfb9
AK
428 direction = DMA_NONE;
429 oldhpa = 0;
430 ret = iommu_tce_xchg(tbl, entry, &oldhpa, &direction);
431 if (ret)
432 continue;
433
434 if (direction == DMA_NONE)
9b14a1ff
AK
435 continue;
436
2157e7b8
AK
437 if (container->v2) {
438 tce_iommu_unuse_page_v2(tbl, entry);
439 continue;
440 }
441
05c6cfb9 442 tce_iommu_unuse_page(container, oldhpa);
9b14a1ff
AK
443 }
444
445 return 0;
446}
447
649354b7
AK
448static int tce_iommu_use_page(unsigned long tce, unsigned long *hpa)
449{
450 struct page *page = NULL;
451 enum dma_data_direction direction = iommu_tce_direction(tce);
452
453 if (get_user_pages_fast(tce & PAGE_MASK, 1,
454 direction != DMA_TO_DEVICE, &page) != 1)
455 return -EFAULT;
456
457 *hpa = __pa((unsigned long) page_address(page));
458
459 return 0;
460}
461
9b14a1ff
AK
462static long tce_iommu_build(struct tce_container *container,
463 struct iommu_table *tbl,
05c6cfb9
AK
464 unsigned long entry, unsigned long tce, unsigned long pages,
465 enum dma_data_direction direction)
9b14a1ff
AK
466{
467 long i, ret = 0;
649354b7
AK
468 struct page *page;
469 unsigned long hpa;
05c6cfb9 470 enum dma_data_direction dirtmp;
9b14a1ff
AK
471
472 for (i = 0; i < pages; ++i) {
473 unsigned long offset = tce & IOMMU_PAGE_MASK(tbl) & ~PAGE_MASK;
474
649354b7
AK
475 ret = tce_iommu_use_page(tce, &hpa);
476 if (ret)
9b14a1ff 477 break;
e432bc7e 478
649354b7 479 page = pfn_to_page(hpa >> PAGE_SHIFT);
e432bc7e
AK
480 if (!tce_page_is_contained(page, tbl->it_page_shift)) {
481 ret = -EPERM;
482 break;
483 }
484
649354b7 485 hpa |= offset;
05c6cfb9
AK
486 dirtmp = direction;
487 ret = iommu_tce_xchg(tbl, entry + i, &hpa, &dirtmp);
9b14a1ff 488 if (ret) {
649354b7 489 tce_iommu_unuse_page(container, hpa);
9b14a1ff
AK
490 pr_err("iommu_tce: %s failed ioba=%lx, tce=%lx, ret=%ld\n",
491 __func__, entry << tbl->it_page_shift,
492 tce, ret);
493 break;
494 }
05c6cfb9
AK
495
496 if (dirtmp != DMA_NONE)
497 tce_iommu_unuse_page(container, hpa);
498
00663d4e 499 tce += IOMMU_PAGE_SIZE(tbl);
9b14a1ff
AK
500 }
501
502 if (ret)
503 tce_iommu_clear(container, tbl, entry, i);
504
505 return ret;
506}
507
2157e7b8
AK
508static long tce_iommu_build_v2(struct tce_container *container,
509 struct iommu_table *tbl,
510 unsigned long entry, unsigned long tce, unsigned long pages,
511 enum dma_data_direction direction)
512{
513 long i, ret = 0;
514 struct page *page;
515 unsigned long hpa;
516 enum dma_data_direction dirtmp;
517
39701e56
AK
518 if (!tbl->it_userspace) {
519 ret = tce_iommu_userspace_view_alloc(tbl);
520 if (ret)
521 return ret;
522 }
523
2157e7b8
AK
524 for (i = 0; i < pages; ++i) {
525 struct mm_iommu_table_group_mem_t *mem = NULL;
526 unsigned long *pua = IOMMU_TABLE_USERSPACE_ENTRY(tbl,
527 entry + i);
528
529 ret = tce_iommu_prereg_ua_to_hpa(tce, IOMMU_PAGE_SIZE(tbl),
530 &hpa, &mem);
531 if (ret)
532 break;
533
534 page = pfn_to_page(hpa >> PAGE_SHIFT);
535 if (!tce_page_is_contained(page, tbl->it_page_shift)) {
536 ret = -EPERM;
537 break;
538 }
539
540 /* Preserve offset within IOMMU page */
541 hpa |= tce & IOMMU_PAGE_MASK(tbl) & ~PAGE_MASK;
542 dirtmp = direction;
543
544 /* The registered region is being unregistered */
545 if (mm_iommu_mapped_inc(mem))
546 break;
547
548 ret = iommu_tce_xchg(tbl, entry + i, &hpa, &dirtmp);
549 if (ret) {
550 /* dirtmp cannot be DMA_NONE here */
551 tce_iommu_unuse_page_v2(tbl, entry + i);
552 pr_err("iommu_tce: %s failed ioba=%lx, tce=%lx, ret=%ld\n",
553 __func__, entry << tbl->it_page_shift,
554 tce, ret);
555 break;
556 }
557
558 if (dirtmp != DMA_NONE)
559 tce_iommu_unuse_page_v2(tbl, entry + i);
560
561 *pua = tce;
562
563 tce += IOMMU_PAGE_SIZE(tbl);
564 }
565
566 if (ret)
567 tce_iommu_clear(container, tbl, entry, i);
568
569 return ret;
570}
571
46d3e1e1
AK
572static long tce_iommu_create_table(struct tce_container *container,
573 struct iommu_table_group *table_group,
574 int num,
575 __u32 page_shift,
576 __u64 window_size,
577 __u32 levels,
578 struct iommu_table **ptbl)
579{
580 long ret, table_size;
581
582 table_size = table_group->ops->get_table_size(page_shift, window_size,
583 levels);
584 if (!table_size)
585 return -EINVAL;
586
587 ret = try_increment_locked_vm(table_size >> PAGE_SHIFT);
588 if (ret)
589 return ret;
590
591 ret = table_group->ops->create_table(table_group, num,
592 page_shift, window_size, levels, ptbl);
593
594 WARN_ON(!ret && !(*ptbl)->it_ops->free);
595 WARN_ON(!ret && ((*ptbl)->it_allocated_size != table_size));
596
46d3e1e1
AK
597 return ret;
598}
599
600static void tce_iommu_free_table(struct iommu_table *tbl)
601{
602 unsigned long pages = tbl->it_allocated_size >> PAGE_SHIFT;
603
2157e7b8 604 tce_iommu_userspace_view_free(tbl);
46d3e1e1
AK
605 tbl->it_ops->free(tbl);
606 decrement_locked_vm(pages);
607}
608
e633bc86
AK
609static long tce_iommu_create_window(struct tce_container *container,
610 __u32 page_shift, __u64 window_size, __u32 levels,
611 __u64 *start_addr)
612{
613 struct tce_iommu_group *tcegrp;
614 struct iommu_table_group *table_group;
615 struct iommu_table *tbl = NULL;
616 long ret, num;
617
618 num = tce_iommu_find_free_table(container);
619 if (num < 0)
620 return num;
621
622 /* Get the first group for ops::create_table */
623 tcegrp = list_first_entry(&container->group_list,
624 struct tce_iommu_group, next);
625 table_group = iommu_group_get_iommudata(tcegrp->grp);
626 if (!table_group)
627 return -EFAULT;
628
629 if (!(table_group->pgsizes & (1ULL << page_shift)))
630 return -EINVAL;
631
632 if (!table_group->ops->set_window || !table_group->ops->unset_window ||
633 !table_group->ops->get_table_size ||
634 !table_group->ops->create_table)
635 return -EPERM;
636
637 /* Create TCE table */
638 ret = tce_iommu_create_table(container, table_group, num,
639 page_shift, window_size, levels, &tbl);
640 if (ret)
641 return ret;
642
643 BUG_ON(!tbl->it_ops->free);
644
645 /*
646 * Program the table to every group.
647 * Groups have been tested for compatibility at the attach time.
648 */
649 list_for_each_entry(tcegrp, &container->group_list, next) {
650 table_group = iommu_group_get_iommudata(tcegrp->grp);
651
652 ret = table_group->ops->set_window(table_group, num, tbl);
653 if (ret)
654 goto unset_exit;
655 }
656
657 container->tables[num] = tbl;
658
659 /* Return start address assigned by platform in create_table() */
660 *start_addr = tbl->it_offset << tbl->it_page_shift;
661
662 return 0;
663
664unset_exit:
665 list_for_each_entry(tcegrp, &container->group_list, next) {
666 table_group = iommu_group_get_iommudata(tcegrp->grp);
667 table_group->ops->unset_window(table_group, num);
668 }
669 tce_iommu_free_table(tbl);
670
671 return ret;
672}
673
674static long tce_iommu_remove_window(struct tce_container *container,
675 __u64 start_addr)
676{
677 struct iommu_table_group *table_group = NULL;
678 struct iommu_table *tbl;
679 struct tce_iommu_group *tcegrp;
680 int num;
681
682 num = tce_iommu_find_table(container, start_addr, &tbl);
683 if (num < 0)
684 return -EINVAL;
685
686 BUG_ON(!tbl->it_size);
687
688 /* Detach groups from IOMMUs */
689 list_for_each_entry(tcegrp, &container->group_list, next) {
690 table_group = iommu_group_get_iommudata(tcegrp->grp);
691
692 /*
693 * SPAPR TCE IOMMU exposes the default DMA window to
694 * the guest via dma32_window_start/size of
695 * VFIO_IOMMU_SPAPR_TCE_GET_INFO. Some platforms allow
696 * the userspace to remove this window, some do not so
697 * here we check for the platform capability.
698 */
699 if (!table_group->ops || !table_group->ops->unset_window)
700 return -EPERM;
701
702 table_group->ops->unset_window(table_group, num);
703 }
704
705 /* Free table */
706 tce_iommu_clear(container, tbl, tbl->it_offset, tbl->it_size);
707 tce_iommu_free_table(tbl);
708 container->tables[num] = NULL;
709
710 return 0;
711}
712
6f01cc69
AK
713static long tce_iommu_create_default_window(struct tce_container *container)
714{
715 long ret;
716 __u64 start_addr = 0;
717 struct tce_iommu_group *tcegrp;
718 struct iommu_table_group *table_group;
719
720 if (!tce_groups_attached(container))
721 return -ENODEV;
722
723 tcegrp = list_first_entry(&container->group_list,
724 struct tce_iommu_group, next);
725 table_group = iommu_group_get_iommudata(tcegrp->grp);
726 if (!table_group)
727 return -ENODEV;
728
729 ret = tce_iommu_create_window(container, IOMMU_PAGE_SHIFT_4K,
730 table_group->tce32_size, 1, &start_addr);
731 WARN_ON_ONCE(!ret && start_addr);
732
733 return ret;
734}
735
5ffd229c
AK
736static long tce_iommu_ioctl(void *iommu_data,
737 unsigned int cmd, unsigned long arg)
738{
739 struct tce_container *container = iommu_data;
e633bc86 740 unsigned long minsz, ddwsz;
5ffd229c
AK
741 long ret;
742
743 switch (cmd) {
744 case VFIO_CHECK_EXTENSION:
1b69be5e
GS
745 switch (arg) {
746 case VFIO_SPAPR_TCE_IOMMU:
2157e7b8 747 case VFIO_SPAPR_TCE_v2_IOMMU:
1b69be5e
GS
748 ret = 1;
749 break;
750 default:
751 ret = vfio_spapr_iommu_eeh_ioctl(NULL, cmd, arg);
752 break;
753 }
754
755 return (ret < 0) ? 0 : ret;
5ffd229c
AK
756
757 case VFIO_IOMMU_SPAPR_TCE_GET_INFO: {
758 struct vfio_iommu_spapr_tce_info info;
2157e7b8 759 struct tce_iommu_group *tcegrp;
0eaf4def
AK
760 struct iommu_table_group *table_group;
761
2157e7b8 762 if (!tce_groups_attached(container))
0eaf4def
AK
763 return -ENXIO;
764
2157e7b8
AK
765 tcegrp = list_first_entry(&container->group_list,
766 struct tce_iommu_group, next);
767 table_group = iommu_group_get_iommudata(tcegrp->grp);
5ffd229c 768
4793d65d 769 if (!table_group)
5ffd229c
AK
770 return -ENXIO;
771
772 minsz = offsetofend(struct vfio_iommu_spapr_tce_info,
773 dma32_window_size);
774
775 if (copy_from_user(&info, (void __user *)arg, minsz))
776 return -EFAULT;
777
778 if (info.argsz < minsz)
779 return -EINVAL;
780
4793d65d
AK
781 info.dma32_window_start = table_group->tce32_start;
782 info.dma32_window_size = table_group->tce32_size;
5ffd229c 783 info.flags = 0;
e633bc86
AK
784 memset(&info.ddw, 0, sizeof(info.ddw));
785
786 if (table_group->max_dynamic_windows_supported &&
787 container->v2) {
788 info.flags |= VFIO_IOMMU_SPAPR_INFO_DDW;
789 info.ddw.pgsizes = table_group->pgsizes;
790 info.ddw.max_dynamic_windows_supported =
791 table_group->max_dynamic_windows_supported;
792 info.ddw.levels = table_group->max_levels;
793 }
794
795 ddwsz = offsetofend(struct vfio_iommu_spapr_tce_info, ddw);
796
797 if (info.argsz >= ddwsz)
798 minsz = ddwsz;
5ffd229c
AK
799
800 if (copy_to_user((void __user *)arg, &info, minsz))
801 return -EFAULT;
802
803 return 0;
804 }
805 case VFIO_IOMMU_MAP_DMA: {
806 struct vfio_iommu_type1_dma_map param;
0eaf4def 807 struct iommu_table *tbl = NULL;
0eaf4def 808 long num;
05c6cfb9 809 enum dma_data_direction direction;
5ffd229c 810
3c56e822
AK
811 if (!container->enabled)
812 return -EPERM;
813
5ffd229c
AK
814 minsz = offsetofend(struct vfio_iommu_type1_dma_map, size);
815
816 if (copy_from_user(&param, (void __user *)arg, minsz))
817 return -EFAULT;
818
819 if (param.argsz < minsz)
820 return -EINVAL;
821
822 if (param.flags & ~(VFIO_DMA_MAP_FLAG_READ |
823 VFIO_DMA_MAP_FLAG_WRITE))
824 return -EINVAL;
825
0eaf4def
AK
826 num = tce_iommu_find_table(container, param.iova, &tbl);
827 if (num < 0)
828 return -ENXIO;
829
00663d4e
AK
830 if ((param.size & ~IOMMU_PAGE_MASK(tbl)) ||
831 (param.vaddr & ~IOMMU_PAGE_MASK(tbl)))
5ffd229c
AK
832 return -EINVAL;
833
834 /* iova is checked by the IOMMU API */
05c6cfb9
AK
835 if (param.flags & VFIO_DMA_MAP_FLAG_READ) {
836 if (param.flags & VFIO_DMA_MAP_FLAG_WRITE)
837 direction = DMA_BIDIRECTIONAL;
838 else
839 direction = DMA_TO_DEVICE;
840 } else {
841 if (param.flags & VFIO_DMA_MAP_FLAG_WRITE)
842 direction = DMA_FROM_DEVICE;
843 else
844 return -EINVAL;
845 }
5ffd229c 846
05c6cfb9 847 ret = iommu_tce_put_param_check(tbl, param.iova, param.vaddr);
5ffd229c
AK
848 if (ret)
849 return ret;
850
2157e7b8
AK
851 if (container->v2)
852 ret = tce_iommu_build_v2(container, tbl,
853 param.iova >> tbl->it_page_shift,
854 param.vaddr,
855 param.size >> tbl->it_page_shift,
856 direction);
857 else
858 ret = tce_iommu_build(container, tbl,
859 param.iova >> tbl->it_page_shift,
860 param.vaddr,
861 param.size >> tbl->it_page_shift,
862 direction);
5ffd229c
AK
863
864 iommu_flush_tce(tbl);
865
866 return ret;
867 }
868 case VFIO_IOMMU_UNMAP_DMA: {
869 struct vfio_iommu_type1_dma_unmap param;
0eaf4def
AK
870 struct iommu_table *tbl = NULL;
871 long num;
5ffd229c 872
3c56e822
AK
873 if (!container->enabled)
874 return -EPERM;
875
5ffd229c
AK
876 minsz = offsetofend(struct vfio_iommu_type1_dma_unmap,
877 size);
878
879 if (copy_from_user(&param, (void __user *)arg, minsz))
880 return -EFAULT;
881
882 if (param.argsz < minsz)
883 return -EINVAL;
884
885 /* No flag is supported now */
886 if (param.flags)
887 return -EINVAL;
888
0eaf4def
AK
889 num = tce_iommu_find_table(container, param.iova, &tbl);
890 if (num < 0)
891 return -ENXIO;
892
00663d4e 893 if (param.size & ~IOMMU_PAGE_MASK(tbl))
5ffd229c
AK
894 return -EINVAL;
895
896 ret = iommu_tce_clear_param_check(tbl, param.iova, 0,
00663d4e 897 param.size >> tbl->it_page_shift);
5ffd229c
AK
898 if (ret)
899 return ret;
900
9b14a1ff 901 ret = tce_iommu_clear(container, tbl,
00663d4e
AK
902 param.iova >> tbl->it_page_shift,
903 param.size >> tbl->it_page_shift);
5ffd229c
AK
904 iommu_flush_tce(tbl);
905
906 return ret;
907 }
2157e7b8
AK
908 case VFIO_IOMMU_SPAPR_REGISTER_MEMORY: {
909 struct vfio_iommu_spapr_register_memory param;
910
911 if (!container->v2)
912 break;
913
914 minsz = offsetofend(struct vfio_iommu_spapr_register_memory,
915 size);
916
917 if (copy_from_user(&param, (void __user *)arg, minsz))
918 return -EFAULT;
919
920 if (param.argsz < minsz)
921 return -EINVAL;
922
923 /* No flag is supported now */
924 if (param.flags)
925 return -EINVAL;
926
927 mutex_lock(&container->lock);
928 ret = tce_iommu_register_pages(container, param.vaddr,
929 param.size);
930 mutex_unlock(&container->lock);
931
932 return ret;
933 }
934 case VFIO_IOMMU_SPAPR_UNREGISTER_MEMORY: {
935 struct vfio_iommu_spapr_register_memory param;
936
937 if (!container->v2)
938 break;
939
940 minsz = offsetofend(struct vfio_iommu_spapr_register_memory,
941 size);
942
943 if (copy_from_user(&param, (void __user *)arg, minsz))
944 return -EFAULT;
945
946 if (param.argsz < minsz)
947 return -EINVAL;
948
949 /* No flag is supported now */
950 if (param.flags)
951 return -EINVAL;
952
953 mutex_lock(&container->lock);
954 ret = tce_iommu_unregister_pages(container, param.vaddr,
955 param.size);
956 mutex_unlock(&container->lock);
957
958 return ret;
959 }
5ffd229c 960 case VFIO_IOMMU_ENABLE:
2157e7b8
AK
961 if (container->v2)
962 break;
963
5ffd229c
AK
964 mutex_lock(&container->lock);
965 ret = tce_iommu_enable(container);
966 mutex_unlock(&container->lock);
967 return ret;
968
969
970 case VFIO_IOMMU_DISABLE:
2157e7b8
AK
971 if (container->v2)
972 break;
973
5ffd229c
AK
974 mutex_lock(&container->lock);
975 tce_iommu_disable(container);
976 mutex_unlock(&container->lock);
977 return 0;
1b69be5e 978
2157e7b8
AK
979 case VFIO_EEH_PE_OP: {
980 struct tce_iommu_group *tcegrp;
981
982 ret = 0;
983 list_for_each_entry(tcegrp, &container->group_list, next) {
984 ret = vfio_spapr_iommu_eeh_ioctl(tcegrp->grp,
985 cmd, arg);
986 if (ret)
987 return ret;
988 }
989 return ret;
990 }
991
e633bc86
AK
992 case VFIO_IOMMU_SPAPR_TCE_CREATE: {
993 struct vfio_iommu_spapr_tce_create create;
994
995 if (!container->v2)
996 break;
997
998 if (!tce_groups_attached(container))
999 return -ENXIO;
1000
1001 minsz = offsetofend(struct vfio_iommu_spapr_tce_create,
1002 start_addr);
1003
1004 if (copy_from_user(&create, (void __user *)arg, minsz))
1005 return -EFAULT;
1006
1007 if (create.argsz < minsz)
1008 return -EINVAL;
1009
1010 if (create.flags)
1011 return -EINVAL;
1012
1013 mutex_lock(&container->lock);
1014
1015 ret = tce_iommu_create_window(container, create.page_shift,
1016 create.window_size, create.levels,
1017 &create.start_addr);
1018
1019 mutex_unlock(&container->lock);
1020
1021 if (!ret && copy_to_user((void __user *)arg, &create, minsz))
1022 ret = -EFAULT;
1023
1024 return ret;
1025 }
1026 case VFIO_IOMMU_SPAPR_TCE_REMOVE: {
1027 struct vfio_iommu_spapr_tce_remove remove;
1028
1029 if (!container->v2)
1030 break;
1031
1032 if (!tce_groups_attached(container))
1033 return -ENXIO;
1034
1035 minsz = offsetofend(struct vfio_iommu_spapr_tce_remove,
1036 start_addr);
1037
1038 if (copy_from_user(&remove, (void __user *)arg, minsz))
1039 return -EFAULT;
1040
1041 if (remove.argsz < minsz)
1042 return -EINVAL;
1043
1044 if (remove.flags)
1045 return -EINVAL;
1046
1047 mutex_lock(&container->lock);
1048
1049 ret = tce_iommu_remove_window(container, remove.start_addr);
1050
1051 mutex_unlock(&container->lock);
1052
1053 return ret;
1054 }
5ffd229c
AK
1055 }
1056
1057 return -ENOTTY;
1058}
1059
f87a8864
AK
1060static void tce_iommu_release_ownership(struct tce_container *container,
1061 struct iommu_table_group *table_group)
1062{
1063 int i;
1064
1065 for (i = 0; i < IOMMU_TABLE_GROUP_MAX_TABLES; ++i) {
2157e7b8 1066 struct iommu_table *tbl = container->tables[i];
f87a8864
AK
1067
1068 if (!tbl)
1069 continue;
1070
1071 tce_iommu_clear(container, tbl, tbl->it_offset, tbl->it_size);
2157e7b8 1072 tce_iommu_userspace_view_free(tbl);
f87a8864
AK
1073 if (tbl->it_map)
1074 iommu_release_ownership(tbl);
2157e7b8
AK
1075
1076 container->tables[i] = NULL;
f87a8864
AK
1077 }
1078}
1079
1080static int tce_iommu_take_ownership(struct tce_container *container,
1081 struct iommu_table_group *table_group)
1082{
1083 int i, j, rc = 0;
1084
1085 for (i = 0; i < IOMMU_TABLE_GROUP_MAX_TABLES; ++i) {
1086 struct iommu_table *tbl = table_group->tables[i];
1087
1088 if (!tbl || !tbl->it_map)
1089 continue;
1090
39701e56 1091 rc = iommu_take_ownership(tbl);
f87a8864
AK
1092 if (rc) {
1093 for (j = 0; j < i; ++j)
1094 iommu_release_ownership(
1095 table_group->tables[j]);
1096
1097 return rc;
1098 }
1099 }
1100
2157e7b8
AK
1101 for (i = 0; i < IOMMU_TABLE_GROUP_MAX_TABLES; ++i)
1102 container->tables[i] = table_group->tables[i];
1103
f87a8864
AK
1104 return 0;
1105}
1106
1107static void tce_iommu_release_ownership_ddw(struct tce_container *container,
1108 struct iommu_table_group *table_group)
1109{
46d3e1e1
AK
1110 long i;
1111
1112 if (!table_group->ops->unset_window) {
1113 WARN_ON_ONCE(1);
1114 return;
1115 }
1116
2157e7b8 1117 for (i = 0; i < IOMMU_TABLE_GROUP_MAX_TABLES; ++i)
46d3e1e1 1118 table_group->ops->unset_window(table_group, i);
46d3e1e1 1119
f87a8864
AK
1120 table_group->ops->release_ownership(table_group);
1121}
1122
1123static long tce_iommu_take_ownership_ddw(struct tce_container *container,
1124 struct iommu_table_group *table_group)
1125{
46d3e1e1
AK
1126 if (!table_group->ops->create_table || !table_group->ops->set_window ||
1127 !table_group->ops->release_ownership) {
1128 WARN_ON_ONCE(1);
1129 return -EFAULT;
1130 }
1131
f87a8864
AK
1132 table_group->ops->take_ownership(table_group);
1133
2157e7b8 1134 return 0;
f87a8864
AK
1135}
1136
5ffd229c
AK
1137static int tce_iommu_attach_group(void *iommu_data,
1138 struct iommu_group *iommu_group)
1139{
1140 int ret;
1141 struct tce_container *container = iommu_data;
0eaf4def 1142 struct iommu_table_group *table_group;
2157e7b8 1143 struct tce_iommu_group *tcegrp = NULL;
6f01cc69 1144 bool create_default_window = false;
5ffd229c 1145
5ffd229c
AK
1146 mutex_lock(&container->lock);
1147
1148 /* pr_debug("tce_vfio: Attaching group #%u to iommu %p\n",
1149 iommu_group_id(iommu_group), iommu_group); */
2157e7b8
AK
1150 table_group = iommu_group_get_iommudata(iommu_group);
1151
1152 if (tce_groups_attached(container) && (!table_group->ops ||
1153 !table_group->ops->take_ownership ||
1154 !table_group->ops->release_ownership)) {
5ffd229c 1155 ret = -EBUSY;
22af4859
AK
1156 goto unlock_exit;
1157 }
1158
2157e7b8
AK
1159 /* Check if new group has the same iommu_ops (i.e. compatible) */
1160 list_for_each_entry(tcegrp, &container->group_list, next) {
1161 struct iommu_table_group *table_group_tmp;
1162
1163 if (tcegrp->grp == iommu_group) {
1164 pr_warn("tce_vfio: Group %d is already attached\n",
1165 iommu_group_id(iommu_group));
1166 ret = -EBUSY;
1167 goto unlock_exit;
1168 }
1169 table_group_tmp = iommu_group_get_iommudata(tcegrp->grp);
54de285b
AK
1170 if (table_group_tmp->ops->create_table !=
1171 table_group->ops->create_table) {
2157e7b8
AK
1172 pr_warn("tce_vfio: Group %d is incompatible with group %d\n",
1173 iommu_group_id(iommu_group),
1174 iommu_group_id(tcegrp->grp));
1175 ret = -EPERM;
1176 goto unlock_exit;
1177 }
5ffd229c
AK
1178 }
1179
2157e7b8
AK
1180 tcegrp = kzalloc(sizeof(*tcegrp), GFP_KERNEL);
1181 if (!tcegrp) {
1182 ret = -ENOMEM;
0eaf4def
AK
1183 goto unlock_exit;
1184 }
1185
f87a8864 1186 if (!table_group->ops || !table_group->ops->take_ownership ||
6f01cc69 1187 !table_group->ops->release_ownership) {
f87a8864 1188 ret = tce_iommu_take_ownership(container, table_group);
6f01cc69 1189 } else {
f87a8864 1190 ret = tce_iommu_take_ownership_ddw(container, table_group);
6f01cc69
AK
1191 if (!tce_groups_attached(container) && !container->tables[0])
1192 create_default_window = true;
1193 }
f87a8864 1194
2157e7b8
AK
1195 if (!ret) {
1196 tcegrp->grp = iommu_group;
1197 list_add(&tcegrp->next, &container->group_list);
6f01cc69
AK
1198 /*
1199 * If it the first group attached, check if there is
1200 * a default DMA window and create one if none as
1201 * the userspace expects it to exist.
1202 */
1203 if (create_default_window) {
1204 ret = tce_iommu_create_default_window(container);
1205 if (ret) {
1206 list_del(&tcegrp->next);
1207 tce_iommu_release_ownership_ddw(container,
1208 table_group);
1209 }
1210 }
2157e7b8 1211 }
22af4859
AK
1212
1213unlock_exit:
2157e7b8
AK
1214 if (ret && tcegrp)
1215 kfree(tcegrp);
1216
5ffd229c
AK
1217 mutex_unlock(&container->lock);
1218
1219 return ret;
1220}
1221
1222static void tce_iommu_detach_group(void *iommu_data,
1223 struct iommu_group *iommu_group)
1224{
1225 struct tce_container *container = iommu_data;
0eaf4def 1226 struct iommu_table_group *table_group;
2157e7b8
AK
1227 bool found = false;
1228 struct tce_iommu_group *tcegrp;
5ffd229c 1229
5ffd229c 1230 mutex_lock(&container->lock);
2157e7b8
AK
1231
1232 list_for_each_entry(tcegrp, &container->group_list, next) {
1233 if (tcegrp->grp == iommu_group) {
1234 found = true;
1235 break;
1236 }
22af4859 1237 }
5ffd229c 1238
2157e7b8
AK
1239 if (!found) {
1240 pr_warn("tce_vfio: detaching unattached group #%u\n",
1241 iommu_group_id(iommu_group));
1242 goto unlock_exit;
5ffd229c 1243 }
22af4859 1244
2157e7b8
AK
1245 list_del(&tcegrp->next);
1246 kfree(tcegrp);
0eaf4def
AK
1247
1248 table_group = iommu_group_get_iommudata(iommu_group);
1249 BUG_ON(!table_group);
1250
f87a8864
AK
1251 if (!table_group->ops || !table_group->ops->release_ownership)
1252 tce_iommu_release_ownership(container, table_group);
1253 else
1254 tce_iommu_release_ownership_ddw(container, table_group);
22af4859
AK
1255
1256unlock_exit:
5ffd229c
AK
1257 mutex_unlock(&container->lock);
1258}
1259
1260const struct vfio_iommu_driver_ops tce_iommu_driver_ops = {
1261 .name = "iommu-vfio-powerpc",
1262 .owner = THIS_MODULE,
1263 .open = tce_iommu_open,
1264 .release = tce_iommu_release,
1265 .ioctl = tce_iommu_ioctl,
1266 .attach_group = tce_iommu_attach_group,
1267 .detach_group = tce_iommu_detach_group,
1268};
1269
1270static int __init tce_iommu_init(void)
1271{
1272 return vfio_register_iommu_driver(&tce_iommu_driver_ops);
1273}
1274
1275static void __exit tce_iommu_cleanup(void)
1276{
1277 vfio_unregister_iommu_driver(&tce_iommu_driver_ops);
1278}
1279
1280module_init(tce_iommu_init);
1281module_exit(tce_iommu_cleanup);
1282
1283MODULE_VERSION(DRIVER_VERSION);
1284MODULE_LICENSE("GPL v2");
1285MODULE_AUTHOR(DRIVER_AUTHOR);
1286MODULE_DESCRIPTION(DRIVER_DESC);
1287