1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2021-2022, NVIDIA CORPORATION & AFFILIATES.
4 * The io_pagetable is the top of datastructure that maps IOVA's to PFNs. The
5 * PFNs can be placed into an iommu_domain, or returned to the caller as a page
6 * list for access by an in-kernel user.
8 * The datastructure uses the iopt_pages to optimize the storage of the PFNs
9 * between the domains and xarray.
11 #include <linux/iommufd.h>
12 #include <linux/lockdep.h>
13 #include <linux/iommu.h>
14 #include <linux/sched/mm.h>
15 #include <linux/err.h>
16 #include <linux/slab.h>
17 #include <linux/errno.h>
18 #include <uapi/linux/iommufd.h>
20 #include "io_pagetable.h"
21 #include "double_span.h"
23 struct iopt_pages_list {
24 struct iopt_pages *pages;
25 struct iopt_area *area;
26 struct list_head next;
27 unsigned long start_byte;
31 struct iopt_area *iopt_area_contig_init(struct iopt_area_contig_iter *iter,
32 struct io_pagetable *iopt,
34 unsigned long last_iova)
36 lockdep_assert_held(&iopt->iova_rwsem);
38 iter->cur_iova = iova;
39 iter->last_iova = last_iova;
40 iter->area = iopt_area_iter_first(iopt, iova, iova);
43 if (!iter->area->pages) {
50 struct iopt_area *iopt_area_contig_next(struct iopt_area_contig_iter *iter)
52 unsigned long last_iova;
56 last_iova = iopt_area_last_iova(iter->area);
57 if (iter->last_iova <= last_iova)
60 iter->cur_iova = last_iova + 1;
61 iter->area = iopt_area_iter_next(iter->area, iter->cur_iova,
65 if (iter->cur_iova != iopt_area_iova(iter->area) ||
73 static bool __alloc_iova_check_hole(struct interval_tree_double_span_iter *span,
75 unsigned long iova_alignment,
76 unsigned long page_offset)
78 if (span->is_used || span->last_hole - span->start_hole < length - 1)
81 span->start_hole = ALIGN(span->start_hole, iova_alignment) |
83 if (span->start_hole > span->last_hole ||
84 span->last_hole - span->start_hole < length - 1)
89 static bool __alloc_iova_check_used(struct interval_tree_span_iter *span,
91 unsigned long iova_alignment,
92 unsigned long page_offset)
94 if (span->is_hole || span->last_used - span->start_used < length - 1)
97 span->start_used = ALIGN(span->start_used, iova_alignment) |
99 if (span->start_used > span->last_used ||
100 span->last_used - span->start_used < length - 1)
106 * Automatically find a block of IOVA that is not being used and not reserved.
107 * Does not return a 0 IOVA even if it is valid.
109 static int iopt_alloc_iova(struct io_pagetable *iopt, unsigned long *iova,
110 unsigned long uptr, unsigned long length)
112 unsigned long page_offset = uptr % PAGE_SIZE;
113 struct interval_tree_double_span_iter used_span;
114 struct interval_tree_span_iter allowed_span;
115 unsigned long iova_alignment;
117 lockdep_assert_held(&iopt->iova_rwsem);
119 /* Protect roundup_pow-of_two() from overflow */
120 if (length == 0 || length >= ULONG_MAX / 2)
124 * Keep alignment present in the uptr when building the IOVA, this
125 * increases the chance we can map a THP.
128 iova_alignment = roundup_pow_of_two(length);
130 iova_alignment = min_t(unsigned long,
131 roundup_pow_of_two(length),
132 1UL << __ffs64(uptr));
134 if (iova_alignment < iopt->iova_alignment)
137 interval_tree_for_each_span(&allowed_span, &iopt->allowed_itree,
138 PAGE_SIZE, ULONG_MAX - PAGE_SIZE) {
139 if (RB_EMPTY_ROOT(&iopt->allowed_itree.rb_root)) {
140 allowed_span.start_used = PAGE_SIZE;
141 allowed_span.last_used = ULONG_MAX - PAGE_SIZE;
142 allowed_span.is_hole = false;
145 if (!__alloc_iova_check_used(&allowed_span, length,
146 iova_alignment, page_offset))
149 interval_tree_for_each_double_span(
150 &used_span, &iopt->reserved_itree, &iopt->area_itree,
151 allowed_span.start_used, allowed_span.last_used) {
152 if (!__alloc_iova_check_hole(&used_span, length,
157 *iova = used_span.start_hole;
164 static int iopt_check_iova(struct io_pagetable *iopt, unsigned long iova,
165 unsigned long length)
169 lockdep_assert_held(&iopt->iova_rwsem);
171 if ((iova & (iopt->iova_alignment - 1)))
174 if (check_add_overflow(iova, length - 1, &last))
177 /* No reserved IOVA intersects the range */
178 if (iopt_reserved_iter_first(iopt, iova, last))
181 /* Check that there is not already a mapping in the range */
182 if (iopt_area_iter_first(iopt, iova, last))
188 * The area takes a slice of the pages from start_bytes to start_byte + length
190 static int iopt_insert_area(struct io_pagetable *iopt, struct iopt_area *area,
191 struct iopt_pages *pages, unsigned long iova,
192 unsigned long start_byte, unsigned long length,
195 lockdep_assert_held_write(&iopt->iova_rwsem);
197 if ((iommu_prot & IOMMU_WRITE) && !pages->writable)
200 area->iommu_prot = iommu_prot;
201 area->page_offset = start_byte % PAGE_SIZE;
202 if (area->page_offset & (iopt->iova_alignment - 1))
205 area->node.start = iova;
206 if (check_add_overflow(iova, length - 1, &area->node.last))
209 area->pages_node.start = start_byte / PAGE_SIZE;
210 if (check_add_overflow(start_byte, length - 1, &area->pages_node.last))
212 area->pages_node.last = area->pages_node.last / PAGE_SIZE;
213 if (WARN_ON(area->pages_node.last >= pages->npages))
217 * The area is inserted with a NULL pages indicating it is not fully
221 interval_tree_insert(&area->node, &iopt->area_itree);
225 static struct iopt_area *iopt_area_alloc(void)
227 struct iopt_area *area;
229 area = kzalloc(sizeof(*area), GFP_KERNEL_ACCOUNT);
232 RB_CLEAR_NODE(&area->node.rb);
233 RB_CLEAR_NODE(&area->pages_node.rb);
237 static int iopt_alloc_area_pages(struct io_pagetable *iopt,
238 struct list_head *pages_list,
239 unsigned long length, unsigned long *dst_iova,
240 int iommu_prot, unsigned int flags)
242 struct iopt_pages_list *elm;
246 list_for_each_entry(elm, pages_list, next) {
247 elm->area = iopt_area_alloc();
252 down_write(&iopt->iova_rwsem);
253 if ((length & (iopt->iova_alignment - 1)) || !length) {
258 if (flags & IOPT_ALLOC_IOVA) {
259 /* Use the first entry to guess the ideal IOVA alignment */
260 elm = list_first_entry(pages_list, struct iopt_pages_list,
262 rc = iopt_alloc_iova(
264 (uintptr_t)elm->pages->uptr + elm->start_byte, length);
267 if (IS_ENABLED(CONFIG_IOMMUFD_TEST) &&
268 WARN_ON(iopt_check_iova(iopt, *dst_iova, length))) {
273 rc = iopt_check_iova(iopt, *dst_iova, length);
279 * Areas are created with a NULL pages so that the IOVA space is
280 * reserved and we can unlock the iova_rwsem.
283 list_for_each_entry(elm, pages_list, next) {
284 rc = iopt_insert_area(iopt, elm->area, elm->pages, iova,
285 elm->start_byte, elm->length, iommu_prot);
292 up_write(&iopt->iova_rwsem);
296 static void iopt_abort_area(struct iopt_area *area)
298 if (IS_ENABLED(CONFIG_IOMMUFD_TEST))
299 WARN_ON(area->pages);
301 down_write(&area->iopt->iova_rwsem);
302 interval_tree_remove(&area->node, &area->iopt->area_itree);
303 up_write(&area->iopt->iova_rwsem);
308 void iopt_free_pages_list(struct list_head *pages_list)
310 struct iopt_pages_list *elm;
312 while ((elm = list_first_entry_or_null(pages_list,
313 struct iopt_pages_list, next))) {
315 iopt_abort_area(elm->area);
317 iopt_put_pages(elm->pages);
318 list_del(&elm->next);
323 static int iopt_fill_domains_pages(struct list_head *pages_list)
325 struct iopt_pages_list *undo_elm;
326 struct iopt_pages_list *elm;
329 list_for_each_entry(elm, pages_list, next) {
330 rc = iopt_area_fill_domains(elm->area, elm->pages);
337 list_for_each_entry(undo_elm, pages_list, next) {
340 iopt_area_unfill_domains(undo_elm->area, undo_elm->pages);
345 int iopt_map_pages(struct io_pagetable *iopt, struct list_head *pages_list,
346 unsigned long length, unsigned long *dst_iova,
347 int iommu_prot, unsigned int flags)
349 struct iopt_pages_list *elm;
352 rc = iopt_alloc_area_pages(iopt, pages_list, length, dst_iova,
357 down_read(&iopt->domains_rwsem);
358 rc = iopt_fill_domains_pages(pages_list);
360 goto out_unlock_domains;
362 down_write(&iopt->iova_rwsem);
363 list_for_each_entry(elm, pages_list, next) {
365 * area->pages must be set inside the domains_rwsem to ensure
366 * any newly added domains will get filled. Moves the reference
369 elm->area->pages = elm->pages;
373 up_write(&iopt->iova_rwsem);
375 up_read(&iopt->domains_rwsem);
380 * iopt_map_user_pages() - Map a user VA to an iova in the io page table
381 * @ictx: iommufd_ctx the iopt is part of
382 * @iopt: io_pagetable to act on
383 * @iova: If IOPT_ALLOC_IOVA is set this is unused on input and contains
384 * the chosen iova on output. Otherwise is the iova to map to on input
385 * @uptr: User VA to map
386 * @length: Number of bytes to map
387 * @iommu_prot: Combination of IOMMU_READ/WRITE/etc bits for the mapping
388 * @flags: IOPT_ALLOC_IOVA or zero
390 * iova, uptr, and length must be aligned to iova_alignment. For domain backed
391 * page tables this will pin the pages and load them into the domain at iova.
392 * For non-domain page tables this will only setup a lazy reference and the
393 * caller must use iopt_access_pages() to touch them.
395 * iopt_unmap_iova() must be called to undo this before the io_pagetable can be
398 int iopt_map_user_pages(struct iommufd_ctx *ictx, struct io_pagetable *iopt,
399 unsigned long *iova, void __user *uptr,
400 unsigned long length, int iommu_prot,
403 struct iopt_pages_list elm = {};
404 LIST_HEAD(pages_list);
407 elm.pages = iopt_alloc_pages(uptr, length, iommu_prot & IOMMU_WRITE);
408 if (IS_ERR(elm.pages))
409 return PTR_ERR(elm.pages);
410 if (ictx->account_mode == IOPT_PAGES_ACCOUNT_MM &&
411 elm.pages->account_mode == IOPT_PAGES_ACCOUNT_USER)
412 elm.pages->account_mode = IOPT_PAGES_ACCOUNT_MM;
413 elm.start_byte = uptr - elm.pages->uptr;
415 list_add(&elm.next, &pages_list);
417 rc = iopt_map_pages(iopt, &pages_list, length, iova, iommu_prot, flags);
420 iopt_abort_area(elm.area);
422 iopt_put_pages(elm.pages);
428 struct iova_bitmap_fn_arg {
430 struct io_pagetable *iopt;
431 struct iommu_domain *domain;
432 struct iommu_dirty_bitmap *dirty;
435 static int __iommu_read_and_clear_dirty(struct iova_bitmap *bitmap,
436 unsigned long iova, size_t length,
439 struct iopt_area *area;
440 struct iopt_area_contig_iter iter;
441 struct iova_bitmap_fn_arg *arg = opaque;
442 struct iommu_domain *domain = arg->domain;
443 struct iommu_dirty_bitmap *dirty = arg->dirty;
444 const struct iommu_dirty_ops *ops = domain->dirty_ops;
445 unsigned long last_iova = iova + length - 1;
446 unsigned long flags = arg->flags;
449 iopt_for_each_contig_area(&iter, area, arg->iopt, iova, last_iova) {
450 unsigned long last = min(last_iova, iopt_area_last_iova(area));
452 ret = ops->read_and_clear_dirty(domain, iter.cur_iova,
453 last - iter.cur_iova + 1, flags,
459 if (!iopt_area_contig_done(&iter))
465 iommu_read_and_clear_dirty(struct iommu_domain *domain,
466 struct io_pagetable *iopt, unsigned long flags,
467 struct iommu_hwpt_get_dirty_bitmap *bitmap)
469 const struct iommu_dirty_ops *ops = domain->dirty_ops;
470 struct iommu_iotlb_gather gather;
471 struct iommu_dirty_bitmap dirty;
472 struct iova_bitmap_fn_arg arg;
473 struct iova_bitmap *iter;
476 if (!ops || !ops->read_and_clear_dirty)
479 iter = iova_bitmap_alloc(bitmap->iova, bitmap->length,
481 u64_to_user_ptr(bitmap->data));
485 iommu_dirty_bitmap_init(&dirty, iter, &gather);
491 iova_bitmap_for_each(iter, &arg, __iommu_read_and_clear_dirty);
493 if (!(flags & IOMMU_DIRTY_NO_CLEAR))
494 iommu_iotlb_sync(domain, &gather);
496 iova_bitmap_free(iter);
501 int iommufd_check_iova_range(struct io_pagetable *iopt,
502 struct iommu_hwpt_get_dirty_bitmap *bitmap)
504 size_t iommu_pgsize = iopt->iova_alignment;
507 if (check_add_overflow(bitmap->iova, bitmap->length - 1, &last_iova))
510 if (bitmap->iova > ULONG_MAX || last_iova > ULONG_MAX)
513 if ((bitmap->iova & (iommu_pgsize - 1)) ||
514 ((last_iova + 1) & (iommu_pgsize - 1)))
517 if (!bitmap->page_size)
520 if ((bitmap->iova & (bitmap->page_size - 1)) ||
521 ((last_iova + 1) & (bitmap->page_size - 1)))
527 int iopt_read_and_clear_dirty_data(struct io_pagetable *iopt,
528 struct iommu_domain *domain,
530 struct iommu_hwpt_get_dirty_bitmap *bitmap)
534 ret = iommufd_check_iova_range(iopt, bitmap);
538 down_read(&iopt->iova_rwsem);
539 ret = iommu_read_and_clear_dirty(domain, iopt, flags, bitmap);
540 up_read(&iopt->iova_rwsem);
545 static int iopt_clear_dirty_data(struct io_pagetable *iopt,
546 struct iommu_domain *domain)
548 const struct iommu_dirty_ops *ops = domain->dirty_ops;
549 struct iommu_iotlb_gather gather;
550 struct iommu_dirty_bitmap dirty;
551 struct iopt_area *area;
554 lockdep_assert_held_read(&iopt->iova_rwsem);
556 iommu_dirty_bitmap_init(&dirty, NULL, &gather);
558 for (area = iopt_area_iter_first(iopt, 0, ULONG_MAX); area;
559 area = iopt_area_iter_next(area, 0, ULONG_MAX)) {
563 ret = ops->read_and_clear_dirty(domain, iopt_area_iova(area),
564 iopt_area_length(area), 0,
570 iommu_iotlb_sync(domain, &gather);
574 int iopt_set_dirty_tracking(struct io_pagetable *iopt,
575 struct iommu_domain *domain, bool enable)
577 const struct iommu_dirty_ops *ops = domain->dirty_ops;
583 down_read(&iopt->iova_rwsem);
585 /* Clear dirty bits from PTEs to ensure a clean snapshot */
587 ret = iopt_clear_dirty_data(iopt, domain);
592 ret = ops->set_dirty_tracking(domain, enable);
595 up_read(&iopt->iova_rwsem);
599 int iopt_get_pages(struct io_pagetable *iopt, unsigned long iova,
600 unsigned long length, struct list_head *pages_list)
602 struct iopt_area_contig_iter iter;
603 unsigned long last_iova;
604 struct iopt_area *area;
609 if (check_add_overflow(iova, length - 1, &last_iova))
612 down_read(&iopt->iova_rwsem);
613 iopt_for_each_contig_area(&iter, area, iopt, iova, last_iova) {
614 struct iopt_pages_list *elm;
615 unsigned long last = min(last_iova, iopt_area_last_iova(area));
617 elm = kzalloc(sizeof(*elm), GFP_KERNEL_ACCOUNT);
622 elm->start_byte = iopt_area_start_byte(area, iter.cur_iova);
623 elm->pages = area->pages;
624 elm->length = (last - iter.cur_iova) + 1;
625 kref_get(&elm->pages->kref);
626 list_add_tail(&elm->next, pages_list);
628 if (!iopt_area_contig_done(&iter)) {
632 up_read(&iopt->iova_rwsem);
635 up_read(&iopt->iova_rwsem);
636 iopt_free_pages_list(pages_list);
640 static int iopt_unmap_iova_range(struct io_pagetable *iopt, unsigned long start,
641 unsigned long last, unsigned long *unmapped)
643 struct iopt_area *area;
644 unsigned long unmapped_bytes = 0;
645 unsigned int tries = 0;
649 * The domains_rwsem must be held in read mode any time any area->pages
650 * is NULL. This prevents domain attach/detatch from running
651 * concurrently with cleaning up the area.
654 down_read(&iopt->domains_rwsem);
655 down_write(&iopt->iova_rwsem);
656 while ((area = iopt_area_iter_first(iopt, start, last))) {
657 unsigned long area_last = iopt_area_last_iova(area);
658 unsigned long area_first = iopt_area_iova(area);
659 struct iopt_pages *pages;
661 /* Userspace should not race map/unmap's of the same area */
664 goto out_unlock_iova;
667 if (area_first < start || area_last > last) {
669 goto out_unlock_iova;
672 if (area_first != start)
676 * num_accesses writers must hold the iova_rwsem too, so we can
677 * safely read it under the write side of the iovam_rwsem
678 * without the pages->mutex.
680 if (area->num_accesses) {
681 size_t length = iopt_area_length(area);
684 area->prevent_access = true;
685 up_write(&iopt->iova_rwsem);
686 up_read(&iopt->domains_rwsem);
688 iommufd_access_notify_unmap(iopt, area_first, length);
689 /* Something is not responding to unmap requests. */
691 if (WARN_ON(tries > 100))
698 up_write(&iopt->iova_rwsem);
700 iopt_area_unfill_domains(area, pages);
701 iopt_abort_area(area);
702 iopt_put_pages(pages);
704 unmapped_bytes += area_last - area_first + 1;
706 down_write(&iopt->iova_rwsem);
712 up_write(&iopt->iova_rwsem);
713 up_read(&iopt->domains_rwsem);
715 *unmapped = unmapped_bytes;
720 * iopt_unmap_iova() - Remove a range of iova
721 * @iopt: io_pagetable to act on
722 * @iova: Starting iova to unmap
723 * @length: Number of bytes to unmap
724 * @unmapped: Return number of bytes unmapped
726 * The requested range must be a superset of existing ranges.
727 * Splitting/truncating IOVA mappings is not allowed.
729 int iopt_unmap_iova(struct io_pagetable *iopt, unsigned long iova,
730 unsigned long length, unsigned long *unmapped)
732 unsigned long iova_last;
737 if (check_add_overflow(iova, length - 1, &iova_last))
740 return iopt_unmap_iova_range(iopt, iova, iova_last, unmapped);
743 int iopt_unmap_all(struct io_pagetable *iopt, unsigned long *unmapped)
747 rc = iopt_unmap_iova_range(iopt, 0, ULONG_MAX, unmapped);
748 /* If the IOVAs are empty then unmap all succeeds */
754 /* The caller must always free all the nodes in the allowed_iova rb_root. */
755 int iopt_set_allow_iova(struct io_pagetable *iopt,
756 struct rb_root_cached *allowed_iova)
758 struct iopt_allowed *allowed;
760 down_write(&iopt->iova_rwsem);
761 swap(*allowed_iova, iopt->allowed_itree);
763 for (allowed = iopt_allowed_iter_first(iopt, 0, ULONG_MAX); allowed;
764 allowed = iopt_allowed_iter_next(allowed, 0, ULONG_MAX)) {
765 if (iopt_reserved_iter_first(iopt, allowed->node.start,
766 allowed->node.last)) {
767 swap(*allowed_iova, iopt->allowed_itree);
768 up_write(&iopt->iova_rwsem);
772 up_write(&iopt->iova_rwsem);
776 int iopt_reserve_iova(struct io_pagetable *iopt, unsigned long start,
777 unsigned long last, void *owner)
779 struct iopt_reserved *reserved;
781 lockdep_assert_held_write(&iopt->iova_rwsem);
783 if (iopt_area_iter_first(iopt, start, last) ||
784 iopt_allowed_iter_first(iopt, start, last))
787 reserved = kzalloc(sizeof(*reserved), GFP_KERNEL_ACCOUNT);
790 reserved->node.start = start;
791 reserved->node.last = last;
792 reserved->owner = owner;
793 interval_tree_insert(&reserved->node, &iopt->reserved_itree);
797 static void __iopt_remove_reserved_iova(struct io_pagetable *iopt, void *owner)
799 struct iopt_reserved *reserved, *next;
801 lockdep_assert_held_write(&iopt->iova_rwsem);
803 for (reserved = iopt_reserved_iter_first(iopt, 0, ULONG_MAX); reserved;
805 next = iopt_reserved_iter_next(reserved, 0, ULONG_MAX);
807 if (reserved->owner == owner) {
808 interval_tree_remove(&reserved->node,
809 &iopt->reserved_itree);
815 void iopt_remove_reserved_iova(struct io_pagetable *iopt, void *owner)
817 down_write(&iopt->iova_rwsem);
818 __iopt_remove_reserved_iova(iopt, owner);
819 up_write(&iopt->iova_rwsem);
822 void iopt_init_table(struct io_pagetable *iopt)
824 init_rwsem(&iopt->iova_rwsem);
825 init_rwsem(&iopt->domains_rwsem);
826 iopt->area_itree = RB_ROOT_CACHED;
827 iopt->allowed_itree = RB_ROOT_CACHED;
828 iopt->reserved_itree = RB_ROOT_CACHED;
829 xa_init_flags(&iopt->domains, XA_FLAGS_ACCOUNT);
830 xa_init_flags(&iopt->access_list, XA_FLAGS_ALLOC);
833 * iopt's start as SW tables that can use the entire size_t IOVA space
834 * due to the use of size_t in the APIs. They have no alignment
837 iopt->iova_alignment = 1;
840 void iopt_destroy_table(struct io_pagetable *iopt)
842 struct interval_tree_node *node;
844 if (IS_ENABLED(CONFIG_IOMMUFD_TEST))
845 iopt_remove_reserved_iova(iopt, NULL);
847 while ((node = interval_tree_iter_first(&iopt->allowed_itree, 0,
849 interval_tree_remove(node, &iopt->allowed_itree);
850 kfree(container_of(node, struct iopt_allowed, node));
853 WARN_ON(!RB_EMPTY_ROOT(&iopt->reserved_itree.rb_root));
854 WARN_ON(!xa_empty(&iopt->domains));
855 WARN_ON(!xa_empty(&iopt->access_list));
856 WARN_ON(!RB_EMPTY_ROOT(&iopt->area_itree.rb_root));
860 * iopt_unfill_domain() - Unfill a domain with PFNs
861 * @iopt: io_pagetable to act on
862 * @domain: domain to unfill
864 * This is used when removing a domain from the iopt. Every area in the iopt
865 * will be unmapped from the domain. The domain must already be removed from the
868 static void iopt_unfill_domain(struct io_pagetable *iopt,
869 struct iommu_domain *domain)
871 struct iopt_area *area;
873 lockdep_assert_held(&iopt->iova_rwsem);
874 lockdep_assert_held_write(&iopt->domains_rwsem);
877 * Some other domain is holding all the pfns still, rapidly unmap this
880 if (iopt->next_domain_id != 0) {
881 /* Pick an arbitrary remaining domain to act as storage */
882 struct iommu_domain *storage_domain =
883 xa_load(&iopt->domains, 0);
885 for (area = iopt_area_iter_first(iopt, 0, ULONG_MAX); area;
886 area = iopt_area_iter_next(area, 0, ULONG_MAX)) {
887 struct iopt_pages *pages = area->pages;
892 mutex_lock(&pages->mutex);
893 if (IS_ENABLED(CONFIG_IOMMUFD_TEST))
894 WARN_ON(!area->storage_domain);
895 if (area->storage_domain == domain)
896 area->storage_domain = storage_domain;
897 mutex_unlock(&pages->mutex);
899 iopt_area_unmap_domain(area, domain);
904 for (area = iopt_area_iter_first(iopt, 0, ULONG_MAX); area;
905 area = iopt_area_iter_next(area, 0, ULONG_MAX)) {
906 struct iopt_pages *pages = area->pages;
911 mutex_lock(&pages->mutex);
912 interval_tree_remove(&area->pages_node, &pages->domains_itree);
913 WARN_ON(area->storage_domain != domain);
914 area->storage_domain = NULL;
915 iopt_area_unfill_domain(area, pages, domain);
916 mutex_unlock(&pages->mutex);
921 * iopt_fill_domain() - Fill a domain with PFNs
922 * @iopt: io_pagetable to act on
923 * @domain: domain to fill
925 * Fill the domain with PFNs from every area in the iopt. On failure the domain
928 static int iopt_fill_domain(struct io_pagetable *iopt,
929 struct iommu_domain *domain)
931 struct iopt_area *end_area;
932 struct iopt_area *area;
935 lockdep_assert_held(&iopt->iova_rwsem);
936 lockdep_assert_held_write(&iopt->domains_rwsem);
938 for (area = iopt_area_iter_first(iopt, 0, ULONG_MAX); area;
939 area = iopt_area_iter_next(area, 0, ULONG_MAX)) {
940 struct iopt_pages *pages = area->pages;
945 mutex_lock(&pages->mutex);
946 rc = iopt_area_fill_domain(area, domain);
948 mutex_unlock(&pages->mutex);
951 if (!area->storage_domain) {
952 WARN_ON(iopt->next_domain_id != 0);
953 area->storage_domain = domain;
954 interval_tree_insert(&area->pages_node,
955 &pages->domains_itree);
957 mutex_unlock(&pages->mutex);
963 for (area = iopt_area_iter_first(iopt, 0, ULONG_MAX); area;
964 area = iopt_area_iter_next(area, 0, ULONG_MAX)) {
965 struct iopt_pages *pages = area->pages;
967 if (area == end_area)
971 mutex_lock(&pages->mutex);
972 if (iopt->next_domain_id == 0) {
973 interval_tree_remove(&area->pages_node,
974 &pages->domains_itree);
975 area->storage_domain = NULL;
977 iopt_area_unfill_domain(area, pages, domain);
978 mutex_unlock(&pages->mutex);
983 /* All existing area's conform to an increased page size */
984 static int iopt_check_iova_alignment(struct io_pagetable *iopt,
985 unsigned long new_iova_alignment)
987 unsigned long align_mask = new_iova_alignment - 1;
988 struct iopt_area *area;
990 lockdep_assert_held(&iopt->iova_rwsem);
991 lockdep_assert_held(&iopt->domains_rwsem);
993 for (area = iopt_area_iter_first(iopt, 0, ULONG_MAX); area;
994 area = iopt_area_iter_next(area, 0, ULONG_MAX))
995 if ((iopt_area_iova(area) & align_mask) ||
996 (iopt_area_length(area) & align_mask) ||
997 (area->page_offset & align_mask))
1000 if (IS_ENABLED(CONFIG_IOMMUFD_TEST)) {
1001 struct iommufd_access *access;
1002 unsigned long index;
1004 xa_for_each(&iopt->access_list, index, access)
1005 if (WARN_ON(access->iova_alignment >
1006 new_iova_alignment))
1012 int iopt_table_add_domain(struct io_pagetable *iopt,
1013 struct iommu_domain *domain)
1015 const struct iommu_domain_geometry *geometry = &domain->geometry;
1016 struct iommu_domain *iter_domain;
1017 unsigned int new_iova_alignment;
1018 unsigned long index;
1021 down_write(&iopt->domains_rwsem);
1022 down_write(&iopt->iova_rwsem);
1024 xa_for_each(&iopt->domains, index, iter_domain) {
1025 if (WARN_ON(iter_domain == domain)) {
1032 * The io page size drives the iova_alignment. Internally the iopt_pages
1033 * works in PAGE_SIZE units and we adjust when mapping sub-PAGE_SIZE
1034 * objects into the iommu_domain.
1036 * A iommu_domain must always be able to accept PAGE_SIZE to be
1037 * compatible as we can't guarantee higher contiguity.
1039 new_iova_alignment = max_t(unsigned long,
1040 1UL << __ffs(domain->pgsize_bitmap),
1041 iopt->iova_alignment);
1042 if (new_iova_alignment > PAGE_SIZE) {
1046 if (new_iova_alignment != iopt->iova_alignment) {
1047 rc = iopt_check_iova_alignment(iopt, new_iova_alignment);
1052 /* No area exists that is outside the allowed domain aperture */
1053 if (geometry->aperture_start != 0) {
1054 rc = iopt_reserve_iova(iopt, 0, geometry->aperture_start - 1,
1059 if (geometry->aperture_end != ULONG_MAX) {
1060 rc = iopt_reserve_iova(iopt, geometry->aperture_end + 1,
1066 rc = xa_reserve(&iopt->domains, iopt->next_domain_id, GFP_KERNEL);
1070 rc = iopt_fill_domain(iopt, domain);
1074 iopt->iova_alignment = new_iova_alignment;
1075 xa_store(&iopt->domains, iopt->next_domain_id, domain, GFP_KERNEL);
1076 iopt->next_domain_id++;
1077 up_write(&iopt->iova_rwsem);
1078 up_write(&iopt->domains_rwsem);
1081 xa_release(&iopt->domains, iopt->next_domain_id);
1083 __iopt_remove_reserved_iova(iopt, domain);
1085 up_write(&iopt->iova_rwsem);
1086 up_write(&iopt->domains_rwsem);
1090 static int iopt_calculate_iova_alignment(struct io_pagetable *iopt)
1092 unsigned long new_iova_alignment;
1093 struct iommufd_access *access;
1094 struct iommu_domain *domain;
1095 unsigned long index;
1097 lockdep_assert_held_write(&iopt->iova_rwsem);
1098 lockdep_assert_held(&iopt->domains_rwsem);
1100 /* See batch_iommu_map_small() */
1101 if (iopt->disable_large_pages)
1102 new_iova_alignment = PAGE_SIZE;
1104 new_iova_alignment = 1;
1106 xa_for_each(&iopt->domains, index, domain)
1107 new_iova_alignment = max_t(unsigned long,
1108 1UL << __ffs(domain->pgsize_bitmap),
1109 new_iova_alignment);
1110 xa_for_each(&iopt->access_list, index, access)
1111 new_iova_alignment = max_t(unsigned long,
1112 access->iova_alignment,
1113 new_iova_alignment);
1115 if (new_iova_alignment > iopt->iova_alignment) {
1118 rc = iopt_check_iova_alignment(iopt, new_iova_alignment);
1122 iopt->iova_alignment = new_iova_alignment;
1126 void iopt_table_remove_domain(struct io_pagetable *iopt,
1127 struct iommu_domain *domain)
1129 struct iommu_domain *iter_domain = NULL;
1130 unsigned long index;
1132 down_write(&iopt->domains_rwsem);
1133 down_write(&iopt->iova_rwsem);
1135 xa_for_each(&iopt->domains, index, iter_domain)
1136 if (iter_domain == domain)
1138 if (WARN_ON(iter_domain != domain) || index >= iopt->next_domain_id)
1142 * Compress the xarray to keep it linear by swapping the entry to erase
1143 * with the tail entry and shrinking the tail.
1145 iopt->next_domain_id--;
1146 iter_domain = xa_erase(&iopt->domains, iopt->next_domain_id);
1147 if (index != iopt->next_domain_id)
1148 xa_store(&iopt->domains, index, iter_domain, GFP_KERNEL);
1150 iopt_unfill_domain(iopt, domain);
1151 __iopt_remove_reserved_iova(iopt, domain);
1153 WARN_ON(iopt_calculate_iova_alignment(iopt));
1155 up_write(&iopt->iova_rwsem);
1156 up_write(&iopt->domains_rwsem);
1160 * iopt_area_split - Split an area into two parts at iova
1161 * @area: The area to split
1162 * @iova: Becomes the last of a new area
1164 * This splits an area into two. It is part of the VFIO compatibility to allow
1165 * poking a hole in the mapping. The two areas continue to point at the same
1166 * iopt_pages, just with different starting bytes.
1168 static int iopt_area_split(struct iopt_area *area, unsigned long iova)
1170 unsigned long alignment = area->iopt->iova_alignment;
1171 unsigned long last_iova = iopt_area_last_iova(area);
1172 unsigned long start_iova = iopt_area_iova(area);
1173 unsigned long new_start = iova + 1;
1174 struct io_pagetable *iopt = area->iopt;
1175 struct iopt_pages *pages = area->pages;
1176 struct iopt_area *lhs;
1177 struct iopt_area *rhs;
1180 lockdep_assert_held_write(&iopt->iova_rwsem);
1182 if (iova == start_iova || iova == last_iova)
1185 if (!pages || area->prevent_access)
1188 if (new_start & (alignment - 1) ||
1189 iopt_area_start_byte(area, new_start) & (alignment - 1))
1192 lhs = iopt_area_alloc();
1196 rhs = iopt_area_alloc();
1202 mutex_lock(&pages->mutex);
1204 * Splitting is not permitted if an access exists, we don't track enough
1205 * information to split existing accesses.
1207 if (area->num_accesses) {
1213 * Splitting is not permitted if a domain could have been mapped with
1216 if (area->storage_domain && !iopt->disable_large_pages) {
1221 interval_tree_remove(&area->node, &iopt->area_itree);
1222 rc = iopt_insert_area(iopt, lhs, area->pages, start_iova,
1223 iopt_area_start_byte(area, start_iova),
1224 (new_start - 1) - start_iova + 1,
1229 rc = iopt_insert_area(iopt, rhs, area->pages, new_start,
1230 iopt_area_start_byte(area, new_start),
1231 last_iova - new_start + 1, area->iommu_prot);
1233 goto err_remove_lhs;
1236 * If the original area has filled a domain, domains_itree has to be
1239 if (area->storage_domain) {
1240 interval_tree_remove(&area->pages_node, &pages->domains_itree);
1241 interval_tree_insert(&lhs->pages_node, &pages->domains_itree);
1242 interval_tree_insert(&rhs->pages_node, &pages->domains_itree);
1245 lhs->storage_domain = area->storage_domain;
1246 lhs->pages = area->pages;
1247 rhs->storage_domain = area->storage_domain;
1248 rhs->pages = area->pages;
1249 kref_get(&rhs->pages->kref);
1251 mutex_unlock(&pages->mutex);
1254 * No change to domains or accesses because the pages hasn't been
1260 interval_tree_remove(&lhs->node, &iopt->area_itree);
1262 interval_tree_insert(&area->node, &iopt->area_itree);
1264 mutex_unlock(&pages->mutex);
1271 int iopt_cut_iova(struct io_pagetable *iopt, unsigned long *iovas,
1277 down_write(&iopt->iova_rwsem);
1278 for (i = 0; i < num_iovas; i++) {
1279 struct iopt_area *area;
1281 area = iopt_area_iter_first(iopt, iovas[i], iovas[i]);
1284 rc = iopt_area_split(area, iovas[i]);
1288 up_write(&iopt->iova_rwsem);
1292 void iopt_enable_large_pages(struct io_pagetable *iopt)
1296 down_write(&iopt->domains_rwsem);
1297 down_write(&iopt->iova_rwsem);
1298 WRITE_ONCE(iopt->disable_large_pages, false);
1299 rc = iopt_calculate_iova_alignment(iopt);
1301 up_write(&iopt->iova_rwsem);
1302 up_write(&iopt->domains_rwsem);
1305 int iopt_disable_large_pages(struct io_pagetable *iopt)
1309 down_write(&iopt->domains_rwsem);
1310 down_write(&iopt->iova_rwsem);
1311 if (iopt->disable_large_pages)
1314 /* Won't do it if domains already have pages mapped in them */
1315 if (!xa_empty(&iopt->domains) &&
1316 !RB_EMPTY_ROOT(&iopt->area_itree.rb_root)) {
1321 WRITE_ONCE(iopt->disable_large_pages, true);
1322 rc = iopt_calculate_iova_alignment(iopt);
1324 WRITE_ONCE(iopt->disable_large_pages, false);
1326 up_write(&iopt->iova_rwsem);
1327 up_write(&iopt->domains_rwsem);
1331 int iopt_add_access(struct io_pagetable *iopt, struct iommufd_access *access)
1336 down_write(&iopt->domains_rwsem);
1337 down_write(&iopt->iova_rwsem);
1338 rc = xa_alloc(&iopt->access_list, &new_id, access, xa_limit_16b,
1339 GFP_KERNEL_ACCOUNT);
1344 rc = iopt_calculate_iova_alignment(iopt);
1346 xa_erase(&iopt->access_list, new_id);
1349 access->iopt_access_list_id = new_id;
1352 up_write(&iopt->iova_rwsem);
1353 up_write(&iopt->domains_rwsem);
1357 void iopt_remove_access(struct io_pagetable *iopt,
1358 struct iommufd_access *access,
1359 u32 iopt_access_list_id)
1361 down_write(&iopt->domains_rwsem);
1362 down_write(&iopt->iova_rwsem);
1363 WARN_ON(xa_erase(&iopt->access_list, iopt_access_list_id) != access);
1364 WARN_ON(iopt_calculate_iova_alignment(iopt));
1365 up_write(&iopt->iova_rwsem);
1366 up_write(&iopt->domains_rwsem);
1369 /* Narrow the valid_iova_itree to include reserved ranges from a device. */
1370 int iopt_table_enforce_dev_resv_regions(struct io_pagetable *iopt,
1372 phys_addr_t *sw_msi_start)
1374 struct iommu_resv_region *resv;
1375 LIST_HEAD(resv_regions);
1376 unsigned int num_hw_msi = 0;
1377 unsigned int num_sw_msi = 0;
1380 if (iommufd_should_fail())
1383 down_write(&iopt->iova_rwsem);
1384 /* FIXME: drivers allocate memory but there is no failure propogated */
1385 iommu_get_resv_regions(dev, &resv_regions);
1387 list_for_each_entry(resv, &resv_regions, list) {
1388 if (resv->type == IOMMU_RESV_DIRECT_RELAXABLE)
1391 if (sw_msi_start && resv->type == IOMMU_RESV_MSI)
1393 if (sw_msi_start && resv->type == IOMMU_RESV_SW_MSI) {
1394 *sw_msi_start = resv->start;
1398 rc = iopt_reserve_iova(iopt, resv->start,
1399 resv->length - 1 + resv->start, dev);
1404 /* Drivers must offer sane combinations of regions */
1405 if (WARN_ON(num_sw_msi && num_hw_msi) || WARN_ON(num_sw_msi > 1)) {
1414 __iopt_remove_reserved_iova(iopt, dev);
1416 iommu_put_resv_regions(dev, &resv_regions);
1417 up_write(&iopt->iova_rwsem);