1 // SPDX-License-Identifier: MIT
3 * Copyright © 2021 Intel Corporation
8 #include <linux/dma-fence-array.h>
9 #include <linux/nospec.h>
11 #include <drm/drm_exec.h>
12 #include <drm/drm_print.h>
13 #include <drm/ttm/ttm_execbuf_util.h>
14 #include <drm/ttm/ttm_tt.h>
15 #include <drm/xe_drm.h>
16 #include <linux/delay.h>
17 #include <linux/kthread.h>
19 #include <linux/swap.h>
21 #include "xe_assert.h"
23 #include "xe_device.h"
24 #include "xe_drm_client.h"
25 #include "xe_exec_queue.h"
27 #include "xe_gt_pagefault.h"
28 #include "xe_gt_tlb_invalidation.h"
29 #include "xe_migrate.h"
32 #include "xe_preempt_fence.h"
34 #include "xe_res_cursor.h"
37 #include "generated/xe_wa_oob.h"
40 #define TEST_VM_ASYNC_OPS_ERROR
42 static struct drm_gem_object *xe_vm_obj(struct xe_vm *vm)
44 return vm->gpuvm.r_obj;
48 * xe_vma_userptr_check_repin() - Advisory check for repin needed
49 * @vma: The userptr vma
51 * Check if the userptr vma has been invalidated since last successful
52 * repin. The check is advisory only and can the function can be called
53 * without the vm->userptr.notifier_lock held. There is no guarantee that the
54 * vma userptr will remain valid after a lockless check, so typically
55 * the call needs to be followed by a proper check under the notifier_lock.
57 * Return: 0 if userptr vma is valid, -EAGAIN otherwise; repin recommended.
59 int xe_vma_userptr_check_repin(struct xe_vma *vma)
61 return mmu_interval_check_retry(&vma->userptr.notifier,
62 vma->userptr.notifier_seq) ?
66 int xe_vma_userptr_pin_pages(struct xe_vma *vma)
68 struct xe_vm *vm = xe_vma_vm(vma);
69 struct xe_device *xe = vm->xe;
70 const unsigned long num_pages = xe_vma_size(vma) >> PAGE_SHIFT;
72 bool in_kthread = !current->mm;
73 unsigned long notifier_seq;
75 bool read_only = xe_vma_read_only(vma);
77 lockdep_assert_held(&vm->lock);
78 xe_assert(xe, xe_vma_is_userptr(vma));
80 if (vma->gpuva.flags & XE_VMA_DESTROYED)
83 notifier_seq = mmu_interval_read_begin(&vma->userptr.notifier);
84 if (notifier_seq == vma->userptr.notifier_seq)
87 pages = kvmalloc_array(num_pages, sizeof(*pages), GFP_KERNEL);
91 if (vma->userptr.sg) {
92 dma_unmap_sgtable(xe->drm.dev,
94 read_only ? DMA_TO_DEVICE :
95 DMA_BIDIRECTIONAL, 0);
96 sg_free_table(vma->userptr.sg);
97 vma->userptr.sg = NULL;
102 if (!mmget_not_zero(vma->userptr.notifier.mm)) {
106 kthread_use_mm(vma->userptr.notifier.mm);
109 while (pinned < num_pages) {
110 ret = get_user_pages_fast(xe_vma_userptr(vma) +
113 read_only ? 0 : FOLL_WRITE,
126 kthread_unuse_mm(vma->userptr.notifier.mm);
127 mmput(vma->userptr.notifier.mm);
133 ret = sg_alloc_table_from_pages_segment(&vma->userptr.sgt, pages,
135 (u64)pinned << PAGE_SHIFT,
136 xe_sg_segment_size(xe->drm.dev),
139 vma->userptr.sg = NULL;
142 vma->userptr.sg = &vma->userptr.sgt;
144 ret = dma_map_sgtable(xe->drm.dev, vma->userptr.sg,
145 read_only ? DMA_TO_DEVICE :
147 DMA_ATTR_SKIP_CPU_SYNC |
148 DMA_ATTR_NO_KERNEL_MAPPING);
150 sg_free_table(vma->userptr.sg);
151 vma->userptr.sg = NULL;
155 for (i = 0; i < pinned; ++i) {
158 set_page_dirty(pages[i]);
159 unlock_page(pages[i]);
162 mark_page_accessed(pages[i]);
166 release_pages(pages, pinned);
170 vma->userptr.notifier_seq = notifier_seq;
171 if (xe_vma_userptr_check_repin(vma) == -EAGAIN)
175 return ret < 0 ? ret : 0;
178 static bool preempt_fences_waiting(struct xe_vm *vm)
180 struct xe_exec_queue *q;
182 lockdep_assert_held(&vm->lock);
183 xe_vm_assert_held(vm);
185 list_for_each_entry(q, &vm->preempt.exec_queues, compute.link) {
186 if (!q->compute.pfence ||
187 (q->compute.pfence && test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT,
188 &q->compute.pfence->flags))) {
196 static void free_preempt_fences(struct list_head *list)
198 struct list_head *link, *next;
200 list_for_each_safe(link, next, list)
201 xe_preempt_fence_free(to_preempt_fence_from_link(link));
204 static int alloc_preempt_fences(struct xe_vm *vm, struct list_head *list,
207 lockdep_assert_held(&vm->lock);
208 xe_vm_assert_held(vm);
210 if (*count >= vm->preempt.num_exec_queues)
213 for (; *count < vm->preempt.num_exec_queues; ++(*count)) {
214 struct xe_preempt_fence *pfence = xe_preempt_fence_alloc();
217 return PTR_ERR(pfence);
219 list_move_tail(xe_preempt_fence_link(pfence), list);
225 static int wait_for_existing_preempt_fences(struct xe_vm *vm)
227 struct xe_exec_queue *q;
229 xe_vm_assert_held(vm);
231 list_for_each_entry(q, &vm->preempt.exec_queues, compute.link) {
232 if (q->compute.pfence) {
233 long timeout = dma_fence_wait(q->compute.pfence, false);
237 dma_fence_put(q->compute.pfence);
238 q->compute.pfence = NULL;
245 static bool xe_vm_is_idle(struct xe_vm *vm)
247 struct xe_exec_queue *q;
249 xe_vm_assert_held(vm);
250 list_for_each_entry(q, &vm->preempt.exec_queues, compute.link) {
251 if (!xe_exec_queue_is_idle(q))
258 static void arm_preempt_fences(struct xe_vm *vm, struct list_head *list)
260 struct list_head *link;
261 struct xe_exec_queue *q;
263 list_for_each_entry(q, &vm->preempt.exec_queues, compute.link) {
264 struct dma_fence *fence;
267 xe_assert(vm->xe, link != list);
269 fence = xe_preempt_fence_arm(to_preempt_fence_from_link(link),
270 q, q->compute.context,
272 dma_fence_put(q->compute.pfence);
273 q->compute.pfence = fence;
277 static int add_preempt_fences(struct xe_vm *vm, struct xe_bo *bo)
279 struct xe_exec_queue *q;
282 if (!vm->preempt.num_exec_queues)
285 err = xe_bo_lock(bo, true);
289 err = dma_resv_reserve_fences(bo->ttm.base.resv, vm->preempt.num_exec_queues);
293 list_for_each_entry(q, &vm->preempt.exec_queues, compute.link)
294 if (q->compute.pfence) {
295 dma_resv_add_fence(bo->ttm.base.resv,
297 DMA_RESV_USAGE_BOOKKEEP);
305 static void resume_and_reinstall_preempt_fences(struct xe_vm *vm,
306 struct drm_exec *exec)
308 struct xe_exec_queue *q;
310 lockdep_assert_held(&vm->lock);
311 xe_vm_assert_held(vm);
313 list_for_each_entry(q, &vm->preempt.exec_queues, compute.link) {
316 drm_gpuvm_resv_add_fence(&vm->gpuvm, exec, q->compute.pfence,
317 DMA_RESV_USAGE_BOOKKEEP, DMA_RESV_USAGE_BOOKKEEP);
321 int xe_vm_add_compute_exec_queue(struct xe_vm *vm, struct xe_exec_queue *q)
323 struct drm_gpuvm_exec vm_exec = {
325 .flags = DRM_EXEC_INTERRUPTIBLE_WAIT,
328 struct drm_exec *exec = &vm_exec.exec;
329 struct dma_fence *pfence;
333 xe_assert(vm->xe, xe_vm_in_preempt_fence_mode(vm));
335 down_write(&vm->lock);
336 err = drm_gpuvm_exec_lock(&vm_exec);
340 pfence = xe_preempt_fence_create(q, q->compute.context,
347 list_add(&q->compute.link, &vm->preempt.exec_queues);
348 ++vm->preempt.num_exec_queues;
349 q->compute.pfence = pfence;
351 down_read(&vm->userptr.notifier_lock);
353 drm_gpuvm_resv_add_fence(&vm->gpuvm, exec, pfence,
354 DMA_RESV_USAGE_BOOKKEEP, DMA_RESV_USAGE_BOOKKEEP);
357 * Check to see if a preemption on VM is in flight or userptr
358 * invalidation, if so trigger this preempt fence to sync state with
359 * other preempt fences on the VM.
361 wait = __xe_vm_userptr_needs_repin(vm) || preempt_fences_waiting(vm);
363 dma_fence_enable_sw_signaling(pfence);
365 up_read(&vm->userptr.notifier_lock);
376 * xe_vm_remove_compute_exec_queue() - Remove compute exec queue from VM
380 void xe_vm_remove_compute_exec_queue(struct xe_vm *vm, struct xe_exec_queue *q)
382 if (!xe_vm_in_preempt_fence_mode(vm))
385 down_write(&vm->lock);
386 list_del(&q->compute.link);
387 --vm->preempt.num_exec_queues;
388 if (q->compute.pfence) {
389 dma_fence_enable_sw_signaling(q->compute.pfence);
390 dma_fence_put(q->compute.pfence);
391 q->compute.pfence = NULL;
397 * __xe_vm_userptr_needs_repin() - Check whether the VM does have userptrs
398 * that need repinning.
401 * This function checks for whether the VM has userptrs that need repinning,
402 * and provides a release-type barrier on the userptr.notifier_lock after
405 * Return: 0 if there are no userptrs needing repinning, -EAGAIN if there are.
407 int __xe_vm_userptr_needs_repin(struct xe_vm *vm)
409 lockdep_assert_held_read(&vm->userptr.notifier_lock);
411 return (list_empty(&vm->userptr.repin_list) &&
412 list_empty(&vm->userptr.invalidated)) ? 0 : -EAGAIN;
415 #define XE_VM_REBIND_RETRY_TIMEOUT_MS 1000
417 static void xe_vm_kill(struct xe_vm *vm)
419 struct xe_exec_queue *q;
421 lockdep_assert_held(&vm->lock);
423 xe_vm_lock(vm, false);
424 vm->flags |= XE_VM_FLAG_BANNED;
425 trace_xe_vm_kill(vm);
427 list_for_each_entry(q, &vm->preempt.exec_queues, compute.link)
431 /* TODO: Inform user the VM is banned */
435 * xe_vm_validate_should_retry() - Whether to retry after a validate error.
436 * @exec: The drm_exec object used for locking before validation.
437 * @err: The error returned from ttm_bo_validate().
438 * @end: A ktime_t cookie that should be set to 0 before first use and
439 * that should be reused on subsequent calls.
441 * With multiple active VMs, under memory pressure, it is possible that
442 * ttm_bo_validate() run into -EDEADLK and in such case returns -ENOMEM.
443 * Until ttm properly handles locking in such scenarios, best thing the
444 * driver can do is retry with a timeout. Check if that is necessary, and
445 * if so unlock the drm_exec's objects while keeping the ticket to prepare
448 * Return: true if a retry after drm_exec_init() is recommended;
451 bool xe_vm_validate_should_retry(struct drm_exec *exec, int err, ktime_t *end)
459 *end = *end ? : ktime_add_ms(cur, XE_VM_REBIND_RETRY_TIMEOUT_MS);
460 if (!ktime_before(cur, *end))
467 static int xe_gpuvm_validate(struct drm_gpuvm_bo *vm_bo, struct drm_exec *exec)
469 struct xe_vm *vm = gpuvm_to_vm(vm_bo->vm);
470 struct drm_gpuva *gpuva;
473 lockdep_assert_held(&vm->lock);
474 drm_gpuvm_bo_for_each_va(gpuva, vm_bo)
475 list_move_tail(&gpuva_to_vma(gpuva)->combined_links.rebind,
478 ret = xe_bo_validate(gem_to_xe_bo(vm_bo->obj), vm, false);
482 vm_bo->evicted = false;
486 static int xe_preempt_work_begin(struct drm_exec *exec, struct xe_vm *vm,
492 * 1 fence for each preempt fence plus a fence for each tile from a
495 err = drm_gpuvm_prepare_vm(&vm->gpuvm, exec, vm->preempt.num_exec_queues +
496 vm->xe->info.tile_count);
500 if (xe_vm_is_idle(vm)) {
501 vm->preempt.rebind_deactivated = true;
506 if (!preempt_fences_waiting(vm)) {
511 err = drm_gpuvm_prepare_objects(&vm->gpuvm, exec, vm->preempt.num_exec_queues);
515 err = wait_for_existing_preempt_fences(vm);
519 return drm_gpuvm_validate(&vm->gpuvm, exec);
522 static void preempt_rebind_work_func(struct work_struct *w)
524 struct xe_vm *vm = container_of(w, struct xe_vm, preempt.rebind_work);
525 struct drm_exec exec;
526 struct dma_fence *rebind_fence;
527 unsigned int fence_count = 0;
528 LIST_HEAD(preempt_fences);
532 int __maybe_unused tries = 0;
534 xe_assert(vm->xe, xe_vm_in_preempt_fence_mode(vm));
535 trace_xe_vm_rebind_worker_enter(vm);
537 down_write(&vm->lock);
539 if (xe_vm_is_closed_or_banned(vm)) {
541 trace_xe_vm_rebind_worker_exit(vm);
546 if (xe_vm_userptr_check_repin(vm)) {
547 err = xe_vm_userptr_pin(vm);
549 goto out_unlock_outer;
552 drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT, 0);
554 drm_exec_until_all_locked(&exec) {
557 err = xe_preempt_work_begin(&exec, vm, &done);
558 drm_exec_retry_on_contention(&exec);
560 drm_exec_fini(&exec);
561 if (err && xe_vm_validate_should_retry(&exec, err, &end))
564 goto out_unlock_outer;
568 err = alloc_preempt_fences(vm, &preempt_fences, &fence_count);
572 rebind_fence = xe_vm_rebind(vm, true);
573 if (IS_ERR(rebind_fence)) {
574 err = PTR_ERR(rebind_fence);
579 dma_fence_wait(rebind_fence, false);
580 dma_fence_put(rebind_fence);
583 /* Wait on munmap style VM unbinds */
584 wait = dma_resv_wait_timeout(xe_vm_resv(vm),
585 DMA_RESV_USAGE_KERNEL,
586 false, MAX_SCHEDULE_TIMEOUT);
592 #define retry_required(__tries, __vm) \
593 (IS_ENABLED(CONFIG_DRM_XE_USERPTR_INVAL_INJECT) ? \
594 (!(__tries)++ || __xe_vm_userptr_needs_repin(__vm)) : \
595 __xe_vm_userptr_needs_repin(__vm))
597 down_read(&vm->userptr.notifier_lock);
598 if (retry_required(tries, vm)) {
599 up_read(&vm->userptr.notifier_lock);
604 #undef retry_required
606 spin_lock(&vm->xe->ttm.lru_lock);
607 ttm_lru_bulk_move_tail(&vm->lru_bulk_move);
608 spin_unlock(&vm->xe->ttm.lru_lock);
610 /* Point of no return. */
611 arm_preempt_fences(vm, &preempt_fences);
612 resume_and_reinstall_preempt_fences(vm, &exec);
613 up_read(&vm->userptr.notifier_lock);
616 drm_exec_fini(&exec);
618 if (err == -EAGAIN) {
619 trace_xe_vm_rebind_worker_retry(vm);
624 drm_warn(&vm->xe->drm, "VM worker error: %d\n", err);
629 free_preempt_fences(&preempt_fences);
631 trace_xe_vm_rebind_worker_exit(vm);
634 static bool vma_userptr_invalidate(struct mmu_interval_notifier *mni,
635 const struct mmu_notifier_range *range,
636 unsigned long cur_seq)
638 struct xe_vma *vma = container_of(mni, struct xe_vma, userptr.notifier);
639 struct xe_vm *vm = xe_vma_vm(vma);
640 struct dma_resv_iter cursor;
641 struct dma_fence *fence;
644 xe_assert(vm->xe, xe_vma_is_userptr(vma));
645 trace_xe_vma_userptr_invalidate(vma);
647 if (!mmu_notifier_range_blockable(range))
650 down_write(&vm->userptr.notifier_lock);
651 mmu_interval_set_seq(mni, cur_seq);
653 /* No need to stop gpu access if the userptr is not yet bound. */
654 if (!vma->userptr.initial_bind) {
655 up_write(&vm->userptr.notifier_lock);
660 * Tell exec and rebind worker they need to repin and rebind this
663 if (!xe_vm_in_fault_mode(vm) &&
664 !(vma->gpuva.flags & XE_VMA_DESTROYED) && vma->tile_present) {
665 spin_lock(&vm->userptr.invalidated_lock);
666 list_move_tail(&vma->userptr.invalidate_link,
667 &vm->userptr.invalidated);
668 spin_unlock(&vm->userptr.invalidated_lock);
671 up_write(&vm->userptr.notifier_lock);
674 * Preempt fences turn into schedule disables, pipeline these.
675 * Note that even in fault mode, we need to wait for binds and
676 * unbinds to complete, and those are attached as BOOKMARK fences
679 dma_resv_iter_begin(&cursor, xe_vm_resv(vm),
680 DMA_RESV_USAGE_BOOKKEEP);
681 dma_resv_for_each_fence_unlocked(&cursor, fence)
682 dma_fence_enable_sw_signaling(fence);
683 dma_resv_iter_end(&cursor);
685 err = dma_resv_wait_timeout(xe_vm_resv(vm),
686 DMA_RESV_USAGE_BOOKKEEP,
687 false, MAX_SCHEDULE_TIMEOUT);
688 XE_WARN_ON(err <= 0);
690 if (xe_vm_in_fault_mode(vm)) {
691 err = xe_vm_invalidate_vma(vma);
695 trace_xe_vma_userptr_invalidate_complete(vma);
700 static const struct mmu_interval_notifier_ops vma_userptr_notifier_ops = {
701 .invalidate = vma_userptr_invalidate,
704 int xe_vm_userptr_pin(struct xe_vm *vm)
706 struct xe_vma *vma, *next;
708 LIST_HEAD(tmp_evict);
710 lockdep_assert_held_write(&vm->lock);
712 /* Collect invalidated userptrs */
713 spin_lock(&vm->userptr.invalidated_lock);
714 list_for_each_entry_safe(vma, next, &vm->userptr.invalidated,
715 userptr.invalidate_link) {
716 list_del_init(&vma->userptr.invalidate_link);
717 list_move_tail(&vma->combined_links.userptr,
718 &vm->userptr.repin_list);
720 spin_unlock(&vm->userptr.invalidated_lock);
722 /* Pin and move to temporary list */
723 list_for_each_entry_safe(vma, next, &vm->userptr.repin_list,
724 combined_links.userptr) {
725 err = xe_vma_userptr_pin_pages(vma);
729 list_move_tail(&vma->combined_links.userptr, &vm->rebind_list);
736 * xe_vm_userptr_check_repin() - Check whether the VM might have userptrs
737 * that need repinning.
740 * This function does an advisory check for whether the VM has userptrs that
743 * Return: 0 if there are no indications of userptrs needing repinning,
744 * -EAGAIN if there are.
746 int xe_vm_userptr_check_repin(struct xe_vm *vm)
748 return (list_empty_careful(&vm->userptr.repin_list) &&
749 list_empty_careful(&vm->userptr.invalidated)) ? 0 : -EAGAIN;
752 static struct dma_fence *
753 xe_vm_bind_vma(struct xe_vma *vma, struct xe_exec_queue *q,
754 struct xe_sync_entry *syncs, u32 num_syncs,
755 bool first_op, bool last_op);
757 struct dma_fence *xe_vm_rebind(struct xe_vm *vm, bool rebind_worker)
759 struct dma_fence *fence = NULL;
760 struct xe_vma *vma, *next;
762 lockdep_assert_held(&vm->lock);
763 if (xe_vm_in_lr_mode(vm) && !rebind_worker)
766 xe_vm_assert_held(vm);
767 list_for_each_entry_safe(vma, next, &vm->rebind_list,
768 combined_links.rebind) {
769 xe_assert(vm->xe, vma->tile_present);
771 list_del_init(&vma->combined_links.rebind);
772 dma_fence_put(fence);
774 trace_xe_vma_rebind_worker(vma);
776 trace_xe_vma_rebind_exec(vma);
777 fence = xe_vm_bind_vma(vma, NULL, NULL, 0, false, false);
785 #define VMA_CREATE_FLAG_READ_ONLY BIT(0)
786 #define VMA_CREATE_FLAG_IS_NULL BIT(1)
788 static struct xe_vma *xe_vma_create(struct xe_vm *vm,
790 u64 bo_offset_or_userptr,
792 u16 pat_index, unsigned int flags)
795 struct xe_tile *tile;
797 bool read_only = (flags & VMA_CREATE_FLAG_READ_ONLY);
798 bool is_null = (flags & VMA_CREATE_FLAG_IS_NULL);
800 xe_assert(vm->xe, start < end);
801 xe_assert(vm->xe, end < vm->size);
803 if (!bo && !is_null) /* userptr */
804 vma = kzalloc(sizeof(*vma), GFP_KERNEL);
806 vma = kzalloc(sizeof(*vma) - sizeof(struct xe_userptr),
809 vma = ERR_PTR(-ENOMEM);
813 INIT_LIST_HEAD(&vma->combined_links.rebind);
815 INIT_LIST_HEAD(&vma->gpuva.gem.entry);
816 vma->gpuva.vm = &vm->gpuvm;
817 vma->gpuva.va.addr = start;
818 vma->gpuva.va.range = end - start + 1;
820 vma->gpuva.flags |= XE_VMA_READ_ONLY;
822 vma->gpuva.flags |= DRM_GPUVA_SPARSE;
824 for_each_tile(tile, vm->xe, id)
825 vma->tile_mask |= 0x1 << id;
827 if (GRAPHICS_VER(vm->xe) >= 20 || vm->xe->info.platform == XE_PVC)
828 vma->gpuva.flags |= XE_VMA_ATOMIC_PTE_BIT;
830 vma->pat_index = pat_index;
833 struct drm_gpuvm_bo *vm_bo;
835 xe_bo_assert_held(bo);
837 vm_bo = drm_gpuvm_bo_obtain(vma->gpuva.vm, &bo->ttm.base);
840 return ERR_CAST(vm_bo);
843 drm_gpuvm_bo_extobj_add(vm_bo);
844 drm_gem_object_get(&bo->ttm.base);
845 vma->gpuva.gem.obj = &bo->ttm.base;
846 vma->gpuva.gem.offset = bo_offset_or_userptr;
847 drm_gpuva_link(&vma->gpuva, vm_bo);
848 drm_gpuvm_bo_put(vm_bo);
849 } else /* userptr or null */ {
851 u64 size = end - start + 1;
854 INIT_LIST_HEAD(&vma->userptr.invalidate_link);
855 vma->gpuva.gem.offset = bo_offset_or_userptr;
857 err = mmu_interval_notifier_insert(&vma->userptr.notifier,
859 xe_vma_userptr(vma), size,
860 &vma_userptr_notifier_ops);
867 vma->userptr.notifier_seq = LONG_MAX;
876 static void xe_vma_destroy_late(struct xe_vma *vma)
878 struct xe_vm *vm = xe_vma_vm(vma);
879 struct xe_device *xe = vm->xe;
880 bool read_only = xe_vma_read_only(vma);
882 if (xe_vma_is_userptr(vma)) {
883 if (vma->userptr.sg) {
884 dma_unmap_sgtable(xe->drm.dev,
886 read_only ? DMA_TO_DEVICE :
887 DMA_BIDIRECTIONAL, 0);
888 sg_free_table(vma->userptr.sg);
889 vma->userptr.sg = NULL;
893 * Since userptr pages are not pinned, we can't remove
894 * the notifer until we're sure the GPU is not accessing
897 mmu_interval_notifier_remove(&vma->userptr.notifier);
899 } else if (xe_vma_is_null(vma)) {
902 xe_bo_put(xe_vma_bo(vma));
908 static void vma_destroy_work_func(struct work_struct *w)
911 container_of(w, struct xe_vma, destroy_work);
913 xe_vma_destroy_late(vma);
916 static void vma_destroy_cb(struct dma_fence *fence,
917 struct dma_fence_cb *cb)
919 struct xe_vma *vma = container_of(cb, struct xe_vma, destroy_cb);
921 INIT_WORK(&vma->destroy_work, vma_destroy_work_func);
922 queue_work(system_unbound_wq, &vma->destroy_work);
925 static void xe_vma_destroy(struct xe_vma *vma, struct dma_fence *fence)
927 struct xe_vm *vm = xe_vma_vm(vma);
929 lockdep_assert_held_write(&vm->lock);
930 xe_assert(vm->xe, list_empty(&vma->combined_links.destroy));
932 if (xe_vma_is_userptr(vma)) {
933 xe_assert(vm->xe, vma->gpuva.flags & XE_VMA_DESTROYED);
935 spin_lock(&vm->userptr.invalidated_lock);
936 list_del(&vma->userptr.invalidate_link);
937 spin_unlock(&vm->userptr.invalidated_lock);
938 } else if (!xe_vma_is_null(vma)) {
939 xe_bo_assert_held(xe_vma_bo(vma));
941 drm_gpuva_unlink(&vma->gpuva);
944 xe_vm_assert_held(vm);
946 int ret = dma_fence_add_callback(fence, &vma->destroy_cb,
950 XE_WARN_ON(ret != -ENOENT);
951 xe_vma_destroy_late(vma);
954 xe_vma_destroy_late(vma);
959 * xe_vm_prepare_vma() - drm_exec utility to lock a vma
960 * @exec: The drm_exec object we're currently locking for.
961 * @vma: The vma for witch we want to lock the vm resv and any attached
963 * @num_shared: The number of dma-fence slots to pre-allocate in the
964 * objects' reservation objects.
966 * Return: 0 on success, negative error code on error. In particular
967 * may return -EDEADLK on WW transaction contention and -EINTR if
968 * an interruptible wait is terminated by a signal.
970 int xe_vm_prepare_vma(struct drm_exec *exec, struct xe_vma *vma,
971 unsigned int num_shared)
973 struct xe_vm *vm = xe_vma_vm(vma);
974 struct xe_bo *bo = xe_vma_bo(vma);
978 err = drm_exec_prepare_obj(exec, xe_vm_obj(vm), num_shared);
979 if (!err && bo && !bo->vm)
980 err = drm_exec_prepare_obj(exec, &bo->ttm.base, num_shared);
985 static void xe_vma_destroy_unlocked(struct xe_vma *vma)
987 struct drm_exec exec;
990 drm_exec_init(&exec, 0, 0);
991 drm_exec_until_all_locked(&exec) {
992 err = xe_vm_prepare_vma(&exec, vma, 0);
993 drm_exec_retry_on_contention(&exec);
998 xe_vma_destroy(vma, NULL);
1000 drm_exec_fini(&exec);
1004 xe_vm_find_overlapping_vma(struct xe_vm *vm, u64 start, u64 range)
1006 struct drm_gpuva *gpuva;
1008 lockdep_assert_held(&vm->lock);
1010 if (xe_vm_is_closed_or_banned(vm))
1013 xe_assert(vm->xe, start + range <= vm->size);
1015 gpuva = drm_gpuva_find_first(&vm->gpuvm, start, range);
1017 return gpuva ? gpuva_to_vma(gpuva) : NULL;
1020 static int xe_vm_insert_vma(struct xe_vm *vm, struct xe_vma *vma)
1024 xe_assert(vm->xe, xe_vma_vm(vma) == vm);
1025 lockdep_assert_held(&vm->lock);
1027 err = drm_gpuva_insert(&vm->gpuvm, &vma->gpuva);
1028 XE_WARN_ON(err); /* Shouldn't be possible */
1033 static void xe_vm_remove_vma(struct xe_vm *vm, struct xe_vma *vma)
1035 xe_assert(vm->xe, xe_vma_vm(vma) == vm);
1036 lockdep_assert_held(&vm->lock);
1038 drm_gpuva_remove(&vma->gpuva);
1039 if (vm->usm.last_fault_vma == vma)
1040 vm->usm.last_fault_vma = NULL;
1043 static struct drm_gpuva_op *xe_vm_op_alloc(void)
1045 struct xe_vma_op *op;
1047 op = kzalloc(sizeof(*op), GFP_KERNEL);
1055 static void xe_vm_free(struct drm_gpuvm *gpuvm);
1057 static const struct drm_gpuvm_ops gpuvm_ops = {
1058 .op_alloc = xe_vm_op_alloc,
1059 .vm_bo_validate = xe_gpuvm_validate,
1060 .vm_free = xe_vm_free,
1063 static u64 pde_encode_pat_index(struct xe_device *xe, u16 pat_index)
1067 if (pat_index & BIT(0))
1068 pte |= XE_PPGTT_PTE_PAT0;
1070 if (pat_index & BIT(1))
1071 pte |= XE_PPGTT_PTE_PAT1;
1076 static u64 pte_encode_pat_index(struct xe_device *xe, u16 pat_index,
1081 if (pat_index & BIT(0))
1082 pte |= XE_PPGTT_PTE_PAT0;
1084 if (pat_index & BIT(1))
1085 pte |= XE_PPGTT_PTE_PAT1;
1087 if (pat_index & BIT(2)) {
1089 pte |= XE_PPGTT_PDE_PDPE_PAT2;
1091 pte |= XE_PPGTT_PTE_PAT2;
1094 if (pat_index & BIT(3))
1095 pte |= XELPG_PPGTT_PTE_PAT3;
1097 if (pat_index & (BIT(4)))
1098 pte |= XE2_PPGTT_PTE_PAT4;
1103 static u64 pte_encode_ps(u32 pt_level)
1105 XE_WARN_ON(pt_level > MAX_HUGEPTE_LEVEL);
1108 return XE_PDE_PS_2M;
1109 else if (pt_level == 2)
1110 return XE_PDPE_PS_1G;
1115 static u64 xelp_pde_encode_bo(struct xe_bo *bo, u64 bo_offset,
1116 const u16 pat_index)
1118 struct xe_device *xe = xe_bo_device(bo);
1121 pde = xe_bo_addr(bo, bo_offset, XE_PAGE_SIZE);
1122 pde |= XE_PAGE_PRESENT | XE_PAGE_RW;
1123 pde |= pde_encode_pat_index(xe, pat_index);
1128 static u64 xelp_pte_encode_bo(struct xe_bo *bo, u64 bo_offset,
1129 u16 pat_index, u32 pt_level)
1131 struct xe_device *xe = xe_bo_device(bo);
1134 pte = xe_bo_addr(bo, bo_offset, XE_PAGE_SIZE);
1135 pte |= XE_PAGE_PRESENT | XE_PAGE_RW;
1136 pte |= pte_encode_pat_index(xe, pat_index, pt_level);
1137 pte |= pte_encode_ps(pt_level);
1139 if (xe_bo_is_vram(bo) || xe_bo_is_stolen_devmem(bo))
1140 pte |= XE_PPGTT_PTE_DM;
1145 static u64 xelp_pte_encode_vma(u64 pte, struct xe_vma *vma,
1146 u16 pat_index, u32 pt_level)
1148 struct xe_device *xe = xe_vma_vm(vma)->xe;
1150 pte |= XE_PAGE_PRESENT;
1152 if (likely(!xe_vma_read_only(vma)))
1155 pte |= pte_encode_pat_index(xe, pat_index, pt_level);
1156 pte |= pte_encode_ps(pt_level);
1158 if (unlikely(xe_vma_is_null(vma)))
1164 static u64 xelp_pte_encode_addr(struct xe_device *xe, u64 addr,
1166 u32 pt_level, bool devmem, u64 flags)
1170 /* Avoid passing random bits directly as flags */
1171 xe_assert(xe, !(flags & ~XE_PTE_PS64));
1174 pte |= XE_PAGE_PRESENT | XE_PAGE_RW;
1175 pte |= pte_encode_pat_index(xe, pat_index, pt_level);
1176 pte |= pte_encode_ps(pt_level);
1179 pte |= XE_PPGTT_PTE_DM;
1186 static const struct xe_pt_ops xelp_pt_ops = {
1187 .pte_encode_bo = xelp_pte_encode_bo,
1188 .pte_encode_vma = xelp_pte_encode_vma,
1189 .pte_encode_addr = xelp_pte_encode_addr,
1190 .pde_encode_bo = xelp_pde_encode_bo,
1193 static void vm_destroy_work_func(struct work_struct *w);
1196 * xe_vm_create_scratch() - Setup a scratch memory pagetable tree for the
1197 * given tile and vm.
1199 * @tile: tile to set up for.
1200 * @vm: vm to set up for.
1202 * Sets up a pagetable tree with one page-table per level and a single
1203 * leaf PTE. All pagetable entries point to the single page-table or,
1204 * for MAX_HUGEPTE_LEVEL, a NULL huge PTE returning 0 on read and
1205 * writes become NOPs.
1207 * Return: 0 on success, negative error code on error.
1209 static int xe_vm_create_scratch(struct xe_device *xe, struct xe_tile *tile,
1215 for (i = MAX_HUGEPTE_LEVEL; i < vm->pt_root[id]->level; i++) {
1216 vm->scratch_pt[id][i] = xe_pt_create(vm, tile, i);
1217 if (IS_ERR(vm->scratch_pt[id][i]))
1218 return PTR_ERR(vm->scratch_pt[id][i]);
1220 xe_pt_populate_empty(tile, vm, vm->scratch_pt[id][i]);
1226 static void xe_vm_free_scratch(struct xe_vm *vm)
1228 struct xe_tile *tile;
1231 if (!xe_vm_has_scratch(vm))
1234 for_each_tile(tile, vm->xe, id) {
1237 if (!vm->pt_root[id])
1240 for (i = MAX_HUGEPTE_LEVEL; i < vm->pt_root[id]->level; ++i)
1241 if (vm->scratch_pt[id][i])
1242 xe_pt_destroy(vm->scratch_pt[id][i], vm->flags, NULL);
1246 struct xe_vm *xe_vm_create(struct xe_device *xe, u32 flags)
1248 struct drm_gem_object *vm_resv_obj;
1250 int err, number_tiles = 0;
1251 struct xe_tile *tile;
1254 vm = kzalloc(sizeof(*vm), GFP_KERNEL);
1256 return ERR_PTR(-ENOMEM);
1260 vm->size = 1ull << xe->info.va_bits;
1264 init_rwsem(&vm->lock);
1266 INIT_LIST_HEAD(&vm->rebind_list);
1268 INIT_LIST_HEAD(&vm->userptr.repin_list);
1269 INIT_LIST_HEAD(&vm->userptr.invalidated);
1270 init_rwsem(&vm->userptr.notifier_lock);
1271 spin_lock_init(&vm->userptr.invalidated_lock);
1273 INIT_WORK(&vm->destroy_work, vm_destroy_work_func);
1275 INIT_LIST_HEAD(&vm->preempt.exec_queues);
1276 vm->preempt.min_run_period_ms = 10; /* FIXME: Wire up to uAPI */
1278 for_each_tile(tile, xe, id)
1279 xe_range_fence_tree_init(&vm->rftree[id]);
1281 vm->pt_ops = &xelp_pt_ops;
1283 if (!(flags & XE_VM_FLAG_MIGRATION))
1284 xe_device_mem_access_get(xe);
1286 vm_resv_obj = drm_gpuvm_resv_object_alloc(&xe->drm);
1292 drm_gpuvm_init(&vm->gpuvm, "Xe VM", DRM_GPUVM_RESV_PROTECTED, &xe->drm,
1293 vm_resv_obj, 0, vm->size, 0, 0, &gpuvm_ops);
1295 drm_gem_object_put(vm_resv_obj);
1297 err = dma_resv_lock_interruptible(xe_vm_resv(vm), NULL);
1301 if (IS_DGFX(xe) && xe->info.vram_flags & XE_VRAM_FLAGS_NEED64K)
1302 vm->flags |= XE_VM_FLAG_64K;
1304 for_each_tile(tile, xe, id) {
1305 if (flags & XE_VM_FLAG_MIGRATION &&
1306 tile->id != XE_VM_FLAG_TILE_ID(flags))
1309 vm->pt_root[id] = xe_pt_create(vm, tile, xe->info.vm_max_level);
1310 if (IS_ERR(vm->pt_root[id])) {
1311 err = PTR_ERR(vm->pt_root[id]);
1312 vm->pt_root[id] = NULL;
1313 goto err_unlock_close;
1317 if (xe_vm_has_scratch(vm)) {
1318 for_each_tile(tile, xe, id) {
1319 if (!vm->pt_root[id])
1322 err = xe_vm_create_scratch(xe, tile, vm);
1324 goto err_unlock_close;
1326 vm->batch_invalidate_tlb = true;
1329 if (flags & XE_VM_FLAG_LR_MODE) {
1330 INIT_WORK(&vm->preempt.rebind_work, preempt_rebind_work_func);
1331 vm->flags |= XE_VM_FLAG_LR_MODE;
1332 vm->batch_invalidate_tlb = false;
1335 /* Fill pt_root after allocating scratch tables */
1336 for_each_tile(tile, xe, id) {
1337 if (!vm->pt_root[id])
1340 xe_pt_populate_empty(tile, vm, vm->pt_root[id]);
1342 dma_resv_unlock(xe_vm_resv(vm));
1344 /* Kernel migration VM shouldn't have a circular loop.. */
1345 if (!(flags & XE_VM_FLAG_MIGRATION)) {
1346 for_each_tile(tile, xe, id) {
1347 struct xe_gt *gt = tile->primary_gt;
1348 struct xe_vm *migrate_vm;
1349 struct xe_exec_queue *q;
1350 u32 create_flags = EXEC_QUEUE_FLAG_VM;
1352 if (!vm->pt_root[id])
1355 migrate_vm = xe_migrate_get_vm(tile->migrate);
1356 q = xe_exec_queue_create_class(xe, gt, migrate_vm,
1357 XE_ENGINE_CLASS_COPY,
1359 xe_vm_put(migrate_vm);
1369 if (number_tiles > 1)
1370 vm->composite_fence_ctx = dma_fence_context_alloc(1);
1372 mutex_lock(&xe->usm.lock);
1373 if (flags & XE_VM_FLAG_FAULT_MODE)
1374 xe->usm.num_vm_in_fault_mode++;
1375 else if (!(flags & XE_VM_FLAG_MIGRATION))
1376 xe->usm.num_vm_in_non_fault_mode++;
1377 mutex_unlock(&xe->usm.lock);
1379 trace_xe_vm_create(vm);
1384 dma_resv_unlock(xe_vm_resv(vm));
1386 xe_vm_close_and_put(vm);
1387 return ERR_PTR(err);
1390 for_each_tile(tile, xe, id)
1391 xe_range_fence_tree_fini(&vm->rftree[id]);
1393 if (!(flags & XE_VM_FLAG_MIGRATION))
1394 xe_device_mem_access_put(xe);
1395 return ERR_PTR(err);
1398 static void xe_vm_close(struct xe_vm *vm)
1400 down_write(&vm->lock);
1402 up_write(&vm->lock);
1405 void xe_vm_close_and_put(struct xe_vm *vm)
1407 LIST_HEAD(contested);
1408 struct xe_device *xe = vm->xe;
1409 struct xe_tile *tile;
1410 struct xe_vma *vma, *next_vma;
1411 struct drm_gpuva *gpuva, *next;
1414 xe_assert(xe, !vm->preempt.num_exec_queues);
1417 if (xe_vm_in_preempt_fence_mode(vm))
1418 flush_work(&vm->preempt.rebind_work);
1420 down_write(&vm->lock);
1421 for_each_tile(tile, xe, id) {
1423 xe_exec_queue_last_fence_put(vm->q[id], vm);
1425 up_write(&vm->lock);
1427 for_each_tile(tile, xe, id) {
1429 xe_exec_queue_kill(vm->q[id]);
1430 xe_exec_queue_put(vm->q[id]);
1435 down_write(&vm->lock);
1436 xe_vm_lock(vm, false);
1437 drm_gpuvm_for_each_va_safe(gpuva, next, &vm->gpuvm) {
1438 vma = gpuva_to_vma(gpuva);
1440 if (xe_vma_has_no_bo(vma)) {
1441 down_read(&vm->userptr.notifier_lock);
1442 vma->gpuva.flags |= XE_VMA_DESTROYED;
1443 up_read(&vm->userptr.notifier_lock);
1446 xe_vm_remove_vma(vm, vma);
1448 /* easy case, remove from VMA? */
1449 if (xe_vma_has_no_bo(vma) || xe_vma_bo(vma)->vm) {
1450 list_del_init(&vma->combined_links.rebind);
1451 xe_vma_destroy(vma, NULL);
1455 list_move_tail(&vma->combined_links.destroy, &contested);
1456 vma->gpuva.flags |= XE_VMA_DESTROYED;
1460 * All vm operations will add shared fences to resv.
1461 * The only exception is eviction for a shared object,
1462 * but even so, the unbind when evicted would still
1463 * install a fence to resv. Hence it's safe to
1464 * destroy the pagetables immediately.
1466 xe_vm_free_scratch(vm);
1468 for_each_tile(tile, xe, id) {
1469 if (vm->pt_root[id]) {
1470 xe_pt_destroy(vm->pt_root[id], vm->flags, NULL);
1471 vm->pt_root[id] = NULL;
1477 * VM is now dead, cannot re-add nodes to vm->vmas if it's NULL
1478 * Since we hold a refcount to the bo, we can remove and free
1479 * the members safely without locking.
1481 list_for_each_entry_safe(vma, next_vma, &contested,
1482 combined_links.destroy) {
1483 list_del_init(&vma->combined_links.destroy);
1484 xe_vma_destroy_unlocked(vma);
1487 up_write(&vm->lock);
1489 mutex_lock(&xe->usm.lock);
1490 if (vm->flags & XE_VM_FLAG_FAULT_MODE)
1491 xe->usm.num_vm_in_fault_mode--;
1492 else if (!(vm->flags & XE_VM_FLAG_MIGRATION))
1493 xe->usm.num_vm_in_non_fault_mode--;
1494 mutex_unlock(&xe->usm.lock);
1496 for_each_tile(tile, xe, id)
1497 xe_range_fence_tree_fini(&vm->rftree[id]);
1502 static void vm_destroy_work_func(struct work_struct *w)
1505 container_of(w, struct xe_vm, destroy_work);
1506 struct xe_device *xe = vm->xe;
1507 struct xe_tile *tile;
1511 /* xe_vm_close_and_put was not called? */
1512 xe_assert(xe, !vm->size);
1514 if (!(vm->flags & XE_VM_FLAG_MIGRATION)) {
1515 xe_device_mem_access_put(xe);
1517 if (xe->info.has_asid && vm->usm.asid) {
1518 mutex_lock(&xe->usm.lock);
1519 lookup = xa_erase(&xe->usm.asid_to_vm, vm->usm.asid);
1520 xe_assert(xe, lookup == vm);
1521 mutex_unlock(&xe->usm.lock);
1525 for_each_tile(tile, xe, id)
1526 XE_WARN_ON(vm->pt_root[id]);
1528 trace_xe_vm_free(vm);
1529 dma_fence_put(vm->rebind_fence);
1533 static void xe_vm_free(struct drm_gpuvm *gpuvm)
1535 struct xe_vm *vm = container_of(gpuvm, struct xe_vm, gpuvm);
1537 /* To destroy the VM we need to be able to sleep */
1538 queue_work(system_unbound_wq, &vm->destroy_work);
1541 struct xe_vm *xe_vm_lookup(struct xe_file *xef, u32 id)
1545 mutex_lock(&xef->vm.lock);
1546 vm = xa_load(&xef->vm.xa, id);
1549 mutex_unlock(&xef->vm.lock);
1554 u64 xe_vm_pdp4_descriptor(struct xe_vm *vm, struct xe_tile *tile)
1556 return vm->pt_ops->pde_encode_bo(vm->pt_root[tile->id]->bo, 0,
1557 tile_to_xe(tile)->pat.idx[XE_CACHE_WB]);
1560 static struct xe_exec_queue *
1561 to_wait_exec_queue(struct xe_vm *vm, struct xe_exec_queue *q)
1563 return q ? q : vm->q[0];
1566 static struct dma_fence *
1567 xe_vm_unbind_vma(struct xe_vma *vma, struct xe_exec_queue *q,
1568 struct xe_sync_entry *syncs, u32 num_syncs,
1569 bool first_op, bool last_op)
1571 struct xe_vm *vm = xe_vma_vm(vma);
1572 struct xe_exec_queue *wait_exec_queue = to_wait_exec_queue(vm, q);
1573 struct xe_tile *tile;
1574 struct dma_fence *fence = NULL;
1575 struct dma_fence **fences = NULL;
1576 struct dma_fence_array *cf = NULL;
1577 int cur_fence = 0, i;
1578 int number_tiles = hweight8(vma->tile_present);
1582 trace_xe_vma_unbind(vma);
1584 if (number_tiles > 1) {
1585 fences = kmalloc_array(number_tiles, sizeof(*fences),
1588 return ERR_PTR(-ENOMEM);
1591 for_each_tile(tile, vm->xe, id) {
1592 if (!(vma->tile_present & BIT(id)))
1595 fence = __xe_pt_unbind_vma(tile, vma, q ? q : vm->q[id],
1596 first_op ? syncs : NULL,
1597 first_op ? num_syncs : 0);
1598 if (IS_ERR(fence)) {
1599 err = PTR_ERR(fence);
1604 fences[cur_fence++] = fence;
1607 if (q && vm->pt_root[id] && !list_empty(&q->multi_gt_list))
1608 q = list_next_entry(q, multi_gt_list);
1612 cf = dma_fence_array_create(number_tiles, fences,
1613 vm->composite_fence_ctx,
1614 vm->composite_fence_seqno++,
1617 --vm->composite_fence_seqno;
1623 fence = cf ? &cf->base : !fence ?
1624 xe_exec_queue_last_fence_get(wait_exec_queue, vm) : fence;
1626 for (i = 0; i < num_syncs; i++)
1627 xe_sync_entry_signal(&syncs[i], NULL, fence);
1635 dma_fence_put(fences[--cur_fence]);
1639 return ERR_PTR(err);
1642 static struct dma_fence *
1643 xe_vm_bind_vma(struct xe_vma *vma, struct xe_exec_queue *q,
1644 struct xe_sync_entry *syncs, u32 num_syncs,
1645 bool first_op, bool last_op)
1647 struct xe_tile *tile;
1648 struct dma_fence *fence;
1649 struct dma_fence **fences = NULL;
1650 struct dma_fence_array *cf = NULL;
1651 struct xe_vm *vm = xe_vma_vm(vma);
1652 int cur_fence = 0, i;
1653 int number_tiles = hweight8(vma->tile_mask);
1657 trace_xe_vma_bind(vma);
1659 if (number_tiles > 1) {
1660 fences = kmalloc_array(number_tiles, sizeof(*fences),
1663 return ERR_PTR(-ENOMEM);
1666 for_each_tile(tile, vm->xe, id) {
1667 if (!(vma->tile_mask & BIT(id)))
1670 fence = __xe_pt_bind_vma(tile, vma, q ? q : vm->q[id],
1671 first_op ? syncs : NULL,
1672 first_op ? num_syncs : 0,
1673 vma->tile_present & BIT(id));
1674 if (IS_ERR(fence)) {
1675 err = PTR_ERR(fence);
1680 fences[cur_fence++] = fence;
1683 if (q && vm->pt_root[id] && !list_empty(&q->multi_gt_list))
1684 q = list_next_entry(q, multi_gt_list);
1688 cf = dma_fence_array_create(number_tiles, fences,
1689 vm->composite_fence_ctx,
1690 vm->composite_fence_seqno++,
1693 --vm->composite_fence_seqno;
1700 for (i = 0; i < num_syncs; i++)
1701 xe_sync_entry_signal(&syncs[i], NULL,
1702 cf ? &cf->base : fence);
1705 return cf ? &cf->base : fence;
1710 dma_fence_put(fences[--cur_fence]);
1714 return ERR_PTR(err);
1717 static int __xe_vm_bind(struct xe_vm *vm, struct xe_vma *vma,
1718 struct xe_exec_queue *q, struct xe_sync_entry *syncs,
1719 u32 num_syncs, bool immediate, bool first_op,
1722 struct dma_fence *fence;
1723 struct xe_exec_queue *wait_exec_queue = to_wait_exec_queue(vm, q);
1725 xe_vm_assert_held(vm);
1728 fence = xe_vm_bind_vma(vma, q, syncs, num_syncs, first_op,
1731 return PTR_ERR(fence);
1735 xe_assert(vm->xe, xe_vm_in_fault_mode(vm));
1737 fence = xe_exec_queue_last_fence_get(wait_exec_queue, vm);
1739 for (i = 0; i < num_syncs; i++)
1740 xe_sync_entry_signal(&syncs[i], NULL, fence);
1745 xe_exec_queue_last_fence_set(wait_exec_queue, vm, fence);
1746 dma_fence_put(fence);
1751 static int xe_vm_bind(struct xe_vm *vm, struct xe_vma *vma, struct xe_exec_queue *q,
1752 struct xe_bo *bo, struct xe_sync_entry *syncs,
1753 u32 num_syncs, bool immediate, bool first_op,
1758 xe_vm_assert_held(vm);
1759 xe_bo_assert_held(bo);
1761 if (bo && immediate) {
1762 err = xe_bo_validate(bo, vm, true);
1767 return __xe_vm_bind(vm, vma, q, syncs, num_syncs, immediate, first_op,
1771 static int xe_vm_unbind(struct xe_vm *vm, struct xe_vma *vma,
1772 struct xe_exec_queue *q, struct xe_sync_entry *syncs,
1773 u32 num_syncs, bool first_op, bool last_op)
1775 struct dma_fence *fence;
1776 struct xe_exec_queue *wait_exec_queue = to_wait_exec_queue(vm, q);
1778 xe_vm_assert_held(vm);
1779 xe_bo_assert_held(xe_vma_bo(vma));
1781 fence = xe_vm_unbind_vma(vma, q, syncs, num_syncs, first_op, last_op);
1783 return PTR_ERR(fence);
1785 xe_vma_destroy(vma, fence);
1787 xe_exec_queue_last_fence_set(wait_exec_queue, vm, fence);
1788 dma_fence_put(fence);
1793 #define ALL_DRM_XE_VM_CREATE_FLAGS (DRM_XE_VM_CREATE_FLAG_SCRATCH_PAGE | \
1794 DRM_XE_VM_CREATE_FLAG_LR_MODE | \
1795 DRM_XE_VM_CREATE_FLAG_FAULT_MODE)
1797 int xe_vm_create_ioctl(struct drm_device *dev, void *data,
1798 struct drm_file *file)
1800 struct xe_device *xe = to_xe_device(dev);
1801 struct xe_file *xef = to_xe_file(file);
1802 struct drm_xe_vm_create *args = data;
1803 struct xe_tile *tile;
1809 if (XE_IOCTL_DBG(xe, args->extensions))
1812 if (XE_WA(xe_root_mmio_gt(xe), 14016763929))
1813 args->flags |= DRM_XE_VM_CREATE_FLAG_SCRATCH_PAGE;
1815 if (XE_IOCTL_DBG(xe, args->flags & DRM_XE_VM_CREATE_FLAG_FAULT_MODE &&
1819 if (XE_IOCTL_DBG(xe, args->reserved[0] || args->reserved[1]))
1822 if (XE_IOCTL_DBG(xe, args->flags & ~ALL_DRM_XE_VM_CREATE_FLAGS))
1825 if (XE_IOCTL_DBG(xe, args->flags & DRM_XE_VM_CREATE_FLAG_SCRATCH_PAGE &&
1826 args->flags & DRM_XE_VM_CREATE_FLAG_FAULT_MODE))
1829 if (XE_IOCTL_DBG(xe, !(args->flags & DRM_XE_VM_CREATE_FLAG_LR_MODE) &&
1830 args->flags & DRM_XE_VM_CREATE_FLAG_FAULT_MODE))
1833 if (XE_IOCTL_DBG(xe, args->flags & DRM_XE_VM_CREATE_FLAG_FAULT_MODE &&
1834 xe_device_in_non_fault_mode(xe)))
1837 if (XE_IOCTL_DBG(xe, !(args->flags & DRM_XE_VM_CREATE_FLAG_FAULT_MODE) &&
1838 xe_device_in_fault_mode(xe)))
1841 if (XE_IOCTL_DBG(xe, args->extensions))
1844 if (args->flags & DRM_XE_VM_CREATE_FLAG_SCRATCH_PAGE)
1845 flags |= XE_VM_FLAG_SCRATCH_PAGE;
1846 if (args->flags & DRM_XE_VM_CREATE_FLAG_LR_MODE)
1847 flags |= XE_VM_FLAG_LR_MODE;
1848 if (args->flags & DRM_XE_VM_CREATE_FLAG_FAULT_MODE)
1849 flags |= XE_VM_FLAG_FAULT_MODE;
1851 vm = xe_vm_create(xe, flags);
1855 mutex_lock(&xef->vm.lock);
1856 err = xa_alloc(&xef->vm.xa, &id, vm, xa_limit_32b, GFP_KERNEL);
1857 mutex_unlock(&xef->vm.lock);
1859 goto err_close_and_put;
1861 if (xe->info.has_asid) {
1862 mutex_lock(&xe->usm.lock);
1863 err = xa_alloc_cyclic(&xe->usm.asid_to_vm, &asid, vm,
1864 XA_LIMIT(1, XE_MAX_ASID - 1),
1865 &xe->usm.next_asid, GFP_KERNEL);
1866 mutex_unlock(&xe->usm.lock);
1870 vm->usm.asid = asid;
1876 /* Record BO memory for VM pagetable created against client */
1877 for_each_tile(tile, xe, id)
1878 if (vm->pt_root[id])
1879 xe_drm_client_add_bo(vm->xef->client, vm->pt_root[id]->bo);
1881 #if IS_ENABLED(CONFIG_DRM_XE_DEBUG_MEM)
1882 /* Warning: Security issue - never enable by default */
1883 args->reserved[0] = xe_bo_main_addr(vm->pt_root[0]->bo, XE_PAGE_SIZE);
1889 mutex_lock(&xef->vm.lock);
1890 xa_erase(&xef->vm.xa, id);
1891 mutex_unlock(&xef->vm.lock);
1893 xe_vm_close_and_put(vm);
1898 int xe_vm_destroy_ioctl(struct drm_device *dev, void *data,
1899 struct drm_file *file)
1901 struct xe_device *xe = to_xe_device(dev);
1902 struct xe_file *xef = to_xe_file(file);
1903 struct drm_xe_vm_destroy *args = data;
1907 if (XE_IOCTL_DBG(xe, args->pad) ||
1908 XE_IOCTL_DBG(xe, args->reserved[0] || args->reserved[1]))
1911 mutex_lock(&xef->vm.lock);
1912 vm = xa_load(&xef->vm.xa, args->vm_id);
1913 if (XE_IOCTL_DBG(xe, !vm))
1915 else if (XE_IOCTL_DBG(xe, vm->preempt.num_exec_queues))
1918 xa_erase(&xef->vm.xa, args->vm_id);
1919 mutex_unlock(&xef->vm.lock);
1922 xe_vm_close_and_put(vm);
1927 static const u32 region_to_mem_type[] = {
1933 static int xe_vm_prefetch(struct xe_vm *vm, struct xe_vma *vma,
1934 struct xe_exec_queue *q, u32 region,
1935 struct xe_sync_entry *syncs, u32 num_syncs,
1936 bool first_op, bool last_op)
1938 struct xe_exec_queue *wait_exec_queue = to_wait_exec_queue(vm, q);
1941 xe_assert(vm->xe, region <= ARRAY_SIZE(region_to_mem_type));
1943 if (!xe_vma_has_no_bo(vma)) {
1944 err = xe_bo_migrate(xe_vma_bo(vma), region_to_mem_type[region]);
1949 if (vma->tile_mask != (vma->tile_present & ~vma->usm.tile_invalidated)) {
1950 return xe_vm_bind(vm, vma, q, xe_vma_bo(vma), syncs, num_syncs,
1951 true, first_op, last_op);
1955 /* Nothing to do, signal fences now */
1957 for (i = 0; i < num_syncs; i++) {
1958 struct dma_fence *fence =
1959 xe_exec_queue_last_fence_get(wait_exec_queue, vm);
1961 xe_sync_entry_signal(&syncs[i], NULL, fence);
1969 static void prep_vma_destroy(struct xe_vm *vm, struct xe_vma *vma,
1972 down_read(&vm->userptr.notifier_lock);
1973 vma->gpuva.flags |= XE_VMA_DESTROYED;
1974 up_read(&vm->userptr.notifier_lock);
1976 xe_vm_remove_vma(vm, vma);
1980 #define ULL unsigned long long
1982 #if IS_ENABLED(CONFIG_DRM_XE_DEBUG_VM)
1983 static void print_op(struct xe_device *xe, struct drm_gpuva_op *op)
1988 case DRM_GPUVA_OP_MAP:
1989 vm_dbg(&xe->drm, "MAP: addr=0x%016llx, range=0x%016llx",
1990 (ULL)op->map.va.addr, (ULL)op->map.va.range);
1992 case DRM_GPUVA_OP_REMAP:
1993 vma = gpuva_to_vma(op->remap.unmap->va);
1994 vm_dbg(&xe->drm, "REMAP:UNMAP: addr=0x%016llx, range=0x%016llx, keep=%d",
1995 (ULL)xe_vma_start(vma), (ULL)xe_vma_size(vma),
1996 op->remap.unmap->keep ? 1 : 0);
1999 "REMAP:PREV: addr=0x%016llx, range=0x%016llx",
2000 (ULL)op->remap.prev->va.addr,
2001 (ULL)op->remap.prev->va.range);
2004 "REMAP:NEXT: addr=0x%016llx, range=0x%016llx",
2005 (ULL)op->remap.next->va.addr,
2006 (ULL)op->remap.next->va.range);
2008 case DRM_GPUVA_OP_UNMAP:
2009 vma = gpuva_to_vma(op->unmap.va);
2010 vm_dbg(&xe->drm, "UNMAP: addr=0x%016llx, range=0x%016llx, keep=%d",
2011 (ULL)xe_vma_start(vma), (ULL)xe_vma_size(vma),
2012 op->unmap.keep ? 1 : 0);
2014 case DRM_GPUVA_OP_PREFETCH:
2015 vma = gpuva_to_vma(op->prefetch.va);
2016 vm_dbg(&xe->drm, "PREFETCH: addr=0x%016llx, range=0x%016llx",
2017 (ULL)xe_vma_start(vma), (ULL)xe_vma_size(vma));
2020 drm_warn(&xe->drm, "NOT POSSIBLE");
2024 static void print_op(struct xe_device *xe, struct drm_gpuva_op *op)
2030 * Create operations list from IOCTL arguments, setup operations fields so parse
2031 * and commit steps are decoupled from IOCTL arguments. This step can fail.
2033 static struct drm_gpuva_ops *
2034 vm_bind_ioctl_ops_create(struct xe_vm *vm, struct xe_bo *bo,
2035 u64 bo_offset_or_userptr, u64 addr, u64 range,
2036 u32 operation, u32 flags,
2037 u32 prefetch_region, u16 pat_index)
2039 struct drm_gem_object *obj = bo ? &bo->ttm.base : NULL;
2040 struct drm_gpuva_ops *ops;
2041 struct drm_gpuva_op *__op;
2042 struct xe_vma_op *op;
2043 struct drm_gpuvm_bo *vm_bo;
2046 lockdep_assert_held_write(&vm->lock);
2048 vm_dbg(&vm->xe->drm,
2049 "op=%d, addr=0x%016llx, range=0x%016llx, bo_offset_or_userptr=0x%016llx",
2050 operation, (ULL)addr, (ULL)range,
2051 (ULL)bo_offset_or_userptr);
2053 switch (operation) {
2054 case DRM_XE_VM_BIND_OP_MAP:
2055 case DRM_XE_VM_BIND_OP_MAP_USERPTR:
2056 ops = drm_gpuvm_sm_map_ops_create(&vm->gpuvm, addr, range,
2057 obj, bo_offset_or_userptr);
2059 case DRM_XE_VM_BIND_OP_UNMAP:
2060 ops = drm_gpuvm_sm_unmap_ops_create(&vm->gpuvm, addr, range);
2062 case DRM_XE_VM_BIND_OP_PREFETCH:
2063 ops = drm_gpuvm_prefetch_ops_create(&vm->gpuvm, addr, range);
2065 case DRM_XE_VM_BIND_OP_UNMAP_ALL:
2066 xe_assert(vm->xe, bo);
2068 err = xe_bo_lock(bo, true);
2070 return ERR_PTR(err);
2072 vm_bo = drm_gpuvm_bo_obtain(&vm->gpuvm, obj);
2073 if (IS_ERR(vm_bo)) {
2075 return ERR_CAST(vm_bo);
2078 ops = drm_gpuvm_bo_unmap_ops_create(vm_bo);
2079 drm_gpuvm_bo_put(vm_bo);
2083 drm_warn(&vm->xe->drm, "NOT POSSIBLE");
2084 ops = ERR_PTR(-EINVAL);
2089 #ifdef TEST_VM_ASYNC_OPS_ERROR
2090 if (operation & FORCE_ASYNC_OP_ERROR) {
2091 op = list_first_entry_or_null(&ops->list, struct xe_vma_op,
2094 op->inject_error = true;
2098 drm_gpuva_for_each_op(__op, ops) {
2099 struct xe_vma_op *op = gpuva_op_to_vma_op(__op);
2101 if (__op->op == DRM_GPUVA_OP_MAP) {
2103 flags & DRM_XE_VM_BIND_FLAG_IMMEDIATE;
2105 flags & DRM_XE_VM_BIND_FLAG_READONLY;
2106 op->map.is_null = flags & DRM_XE_VM_BIND_FLAG_NULL;
2107 op->map.pat_index = pat_index;
2108 } else if (__op->op == DRM_GPUVA_OP_PREFETCH) {
2109 op->prefetch.region = prefetch_region;
2112 print_op(vm->xe, __op);
2118 static struct xe_vma *new_vma(struct xe_vm *vm, struct drm_gpuva_op_map *op,
2119 u16 pat_index, unsigned int flags)
2121 struct xe_bo *bo = op->gem.obj ? gem_to_xe_bo(op->gem.obj) : NULL;
2122 struct drm_exec exec;
2126 lockdep_assert_held_write(&vm->lock);
2129 drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT, 0);
2130 drm_exec_until_all_locked(&exec) {
2133 err = drm_exec_lock_obj(&exec, xe_vm_obj(vm));
2134 drm_exec_retry_on_contention(&exec);
2137 err = drm_exec_lock_obj(&exec, &bo->ttm.base);
2138 drm_exec_retry_on_contention(&exec);
2141 drm_exec_fini(&exec);
2142 return ERR_PTR(err);
2146 vma = xe_vma_create(vm, bo, op->gem.offset,
2147 op->va.addr, op->va.addr +
2148 op->va.range - 1, pat_index, flags);
2150 drm_exec_fini(&exec);
2152 if (xe_vma_is_userptr(vma)) {
2153 err = xe_vma_userptr_pin_pages(vma);
2155 prep_vma_destroy(vm, vma, false);
2156 xe_vma_destroy_unlocked(vma);
2157 return ERR_PTR(err);
2159 } else if (!xe_vma_has_no_bo(vma) && !bo->vm) {
2160 err = add_preempt_fences(vm, bo);
2162 prep_vma_destroy(vm, vma, false);
2163 xe_vma_destroy_unlocked(vma);
2164 return ERR_PTR(err);
2171 static u64 xe_vma_max_pte_size(struct xe_vma *vma)
2173 if (vma->gpuva.flags & XE_VMA_PTE_1G)
2175 else if (vma->gpuva.flags & XE_VMA_PTE_2M)
2181 static u64 xe_vma_set_pte_size(struct xe_vma *vma, u64 size)
2185 vma->gpuva.flags |= XE_VMA_PTE_1G;
2188 vma->gpuva.flags |= XE_VMA_PTE_2M;
2195 static int xe_vma_op_commit(struct xe_vm *vm, struct xe_vma_op *op)
2199 lockdep_assert_held_write(&vm->lock);
2201 switch (op->base.op) {
2202 case DRM_GPUVA_OP_MAP:
2203 err |= xe_vm_insert_vma(vm, op->map.vma);
2205 op->flags |= XE_VMA_OP_COMMITTED;
2207 case DRM_GPUVA_OP_REMAP:
2210 gpuva_to_vma(op->base.remap.unmap->va)->tile_present;
2212 prep_vma_destroy(vm, gpuva_to_vma(op->base.remap.unmap->va),
2214 op->flags |= XE_VMA_OP_COMMITTED;
2216 if (op->remap.prev) {
2217 err |= xe_vm_insert_vma(vm, op->remap.prev);
2219 op->flags |= XE_VMA_OP_PREV_COMMITTED;
2220 if (!err && op->remap.skip_prev) {
2221 op->remap.prev->tile_present =
2223 op->remap.prev = NULL;
2226 if (op->remap.next) {
2227 err |= xe_vm_insert_vma(vm, op->remap.next);
2229 op->flags |= XE_VMA_OP_NEXT_COMMITTED;
2230 if (!err && op->remap.skip_next) {
2231 op->remap.next->tile_present =
2233 op->remap.next = NULL;
2237 /* Adjust for partial unbind after removin VMA from VM */
2239 op->base.remap.unmap->va->va.addr = op->remap.start;
2240 op->base.remap.unmap->va->va.range = op->remap.range;
2244 case DRM_GPUVA_OP_UNMAP:
2245 prep_vma_destroy(vm, gpuva_to_vma(op->base.unmap.va), true);
2246 op->flags |= XE_VMA_OP_COMMITTED;
2248 case DRM_GPUVA_OP_PREFETCH:
2249 op->flags |= XE_VMA_OP_COMMITTED;
2252 drm_warn(&vm->xe->drm, "NOT POSSIBLE");
2259 static int vm_bind_ioctl_ops_parse(struct xe_vm *vm, struct xe_exec_queue *q,
2260 struct drm_gpuva_ops *ops,
2261 struct xe_sync_entry *syncs, u32 num_syncs,
2262 struct list_head *ops_list, bool last)
2264 struct xe_vma_op *last_op = NULL;
2265 struct drm_gpuva_op *__op;
2268 lockdep_assert_held_write(&vm->lock);
2270 drm_gpuva_for_each_op(__op, ops) {
2271 struct xe_vma_op *op = gpuva_op_to_vma_op(__op);
2273 bool first = list_empty(ops_list);
2274 unsigned int flags = 0;
2276 INIT_LIST_HEAD(&op->link);
2277 list_add_tail(&op->link, ops_list);
2280 op->flags |= XE_VMA_OP_FIRST;
2281 op->num_syncs = num_syncs;
2287 switch (op->base.op) {
2288 case DRM_GPUVA_OP_MAP:
2290 flags |= op->map.read_only ?
2291 VMA_CREATE_FLAG_READ_ONLY : 0;
2292 flags |= op->map.is_null ?
2293 VMA_CREATE_FLAG_IS_NULL : 0;
2295 vma = new_vma(vm, &op->base.map, op->map.pat_index,
2298 return PTR_ERR(vma);
2303 case DRM_GPUVA_OP_REMAP:
2305 struct xe_vma *old =
2306 gpuva_to_vma(op->base.remap.unmap->va);
2308 op->remap.start = xe_vma_start(old);
2309 op->remap.range = xe_vma_size(old);
2311 if (op->base.remap.prev) {
2312 flags |= op->base.remap.unmap->va->flags &
2314 VMA_CREATE_FLAG_READ_ONLY : 0;
2315 flags |= op->base.remap.unmap->va->flags &
2317 VMA_CREATE_FLAG_IS_NULL : 0;
2319 vma = new_vma(vm, op->base.remap.prev,
2320 old->pat_index, flags);
2322 return PTR_ERR(vma);
2324 op->remap.prev = vma;
2327 * Userptr creates a new SG mapping so
2328 * we must also rebind.
2330 op->remap.skip_prev = !xe_vma_is_userptr(old) &&
2331 IS_ALIGNED(xe_vma_end(vma),
2332 xe_vma_max_pte_size(old));
2333 if (op->remap.skip_prev) {
2334 xe_vma_set_pte_size(vma, xe_vma_max_pte_size(old));
2338 op->remap.start = xe_vma_end(vma);
2342 if (op->base.remap.next) {
2343 flags |= op->base.remap.unmap->va->flags &
2345 VMA_CREATE_FLAG_READ_ONLY : 0;
2346 flags |= op->base.remap.unmap->va->flags &
2348 VMA_CREATE_FLAG_IS_NULL : 0;
2350 vma = new_vma(vm, op->base.remap.next,
2351 old->pat_index, flags);
2353 return PTR_ERR(vma);
2355 op->remap.next = vma;
2358 * Userptr creates a new SG mapping so
2359 * we must also rebind.
2361 op->remap.skip_next = !xe_vma_is_userptr(old) &&
2362 IS_ALIGNED(xe_vma_start(vma),
2363 xe_vma_max_pte_size(old));
2364 if (op->remap.skip_next) {
2365 xe_vma_set_pte_size(vma, xe_vma_max_pte_size(old));
2373 case DRM_GPUVA_OP_UNMAP:
2374 case DRM_GPUVA_OP_PREFETCH:
2378 drm_warn(&vm->xe->drm, "NOT POSSIBLE");
2383 err = xe_vma_op_commit(vm, op);
2388 /* FIXME: Unhandled corner case */
2389 XE_WARN_ON(!last_op && last && !list_empty(ops_list));
2396 last_op->flags |= XE_VMA_OP_LAST;
2397 last_op->num_syncs = num_syncs;
2398 last_op->syncs = syncs;
2404 static int op_execute(struct drm_exec *exec, struct xe_vm *vm,
2405 struct xe_vma *vma, struct xe_vma_op *op)
2409 lockdep_assert_held_write(&vm->lock);
2411 err = xe_vm_prepare_vma(exec, vma, 1);
2415 xe_vm_assert_held(vm);
2416 xe_bo_assert_held(xe_vma_bo(vma));
2418 switch (op->base.op) {
2419 case DRM_GPUVA_OP_MAP:
2420 err = xe_vm_bind(vm, vma, op->q, xe_vma_bo(vma),
2421 op->syncs, op->num_syncs,
2422 op->map.immediate || !xe_vm_in_fault_mode(vm),
2423 op->flags & XE_VMA_OP_FIRST,
2424 op->flags & XE_VMA_OP_LAST);
2426 case DRM_GPUVA_OP_REMAP:
2428 bool prev = !!op->remap.prev;
2429 bool next = !!op->remap.next;
2431 if (!op->remap.unmap_done) {
2433 vma->gpuva.flags |= XE_VMA_FIRST_REBIND;
2434 err = xe_vm_unbind(vm, vma, op->q, op->syncs,
2436 op->flags & XE_VMA_OP_FIRST,
2437 op->flags & XE_VMA_OP_LAST &&
2441 op->remap.unmap_done = true;
2445 op->remap.prev->gpuva.flags |= XE_VMA_LAST_REBIND;
2446 err = xe_vm_bind(vm, op->remap.prev, op->q,
2447 xe_vma_bo(op->remap.prev), op->syncs,
2448 op->num_syncs, true, false,
2449 op->flags & XE_VMA_OP_LAST && !next);
2450 op->remap.prev->gpuva.flags &= ~XE_VMA_LAST_REBIND;
2453 op->remap.prev = NULL;
2457 op->remap.next->gpuva.flags |= XE_VMA_LAST_REBIND;
2458 err = xe_vm_bind(vm, op->remap.next, op->q,
2459 xe_vma_bo(op->remap.next),
2460 op->syncs, op->num_syncs,
2462 op->flags & XE_VMA_OP_LAST);
2463 op->remap.next->gpuva.flags &= ~XE_VMA_LAST_REBIND;
2466 op->remap.next = NULL;
2471 case DRM_GPUVA_OP_UNMAP:
2472 err = xe_vm_unbind(vm, vma, op->q, op->syncs,
2473 op->num_syncs, op->flags & XE_VMA_OP_FIRST,
2474 op->flags & XE_VMA_OP_LAST);
2476 case DRM_GPUVA_OP_PREFETCH:
2477 err = xe_vm_prefetch(vm, vma, op->q, op->prefetch.region,
2478 op->syncs, op->num_syncs,
2479 op->flags & XE_VMA_OP_FIRST,
2480 op->flags & XE_VMA_OP_LAST);
2483 drm_warn(&vm->xe->drm, "NOT POSSIBLE");
2487 trace_xe_vma_fail(vma);
2492 static int __xe_vma_op_execute(struct xe_vm *vm, struct xe_vma *vma,
2493 struct xe_vma_op *op)
2495 struct drm_exec exec;
2499 drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT, 0);
2500 drm_exec_until_all_locked(&exec) {
2501 err = op_execute(&exec, vm, vma, op);
2502 drm_exec_retry_on_contention(&exec);
2506 drm_exec_fini(&exec);
2508 if (err == -EAGAIN && xe_vma_is_userptr(vma)) {
2509 lockdep_assert_held_write(&vm->lock);
2510 err = xe_vma_userptr_pin_pages(vma);
2514 trace_xe_vma_fail(vma);
2520 static int xe_vma_op_execute(struct xe_vm *vm, struct xe_vma_op *op)
2524 lockdep_assert_held_write(&vm->lock);
2526 #ifdef TEST_VM_ASYNC_OPS_ERROR
2527 if (op->inject_error) {
2528 op->inject_error = false;
2533 switch (op->base.op) {
2534 case DRM_GPUVA_OP_MAP:
2535 ret = __xe_vma_op_execute(vm, op->map.vma, op);
2537 case DRM_GPUVA_OP_REMAP:
2541 if (!op->remap.unmap_done)
2542 vma = gpuva_to_vma(op->base.remap.unmap->va);
2543 else if (op->remap.prev)
2544 vma = op->remap.prev;
2546 vma = op->remap.next;
2548 ret = __xe_vma_op_execute(vm, vma, op);
2551 case DRM_GPUVA_OP_UNMAP:
2552 ret = __xe_vma_op_execute(vm, gpuva_to_vma(op->base.unmap.va),
2555 case DRM_GPUVA_OP_PREFETCH:
2556 ret = __xe_vma_op_execute(vm,
2557 gpuva_to_vma(op->base.prefetch.va),
2561 drm_warn(&vm->xe->drm, "NOT POSSIBLE");
2567 static void xe_vma_op_cleanup(struct xe_vm *vm, struct xe_vma_op *op)
2569 bool last = op->flags & XE_VMA_OP_LAST;
2572 while (op->num_syncs--)
2573 xe_sync_entry_cleanup(&op->syncs[op->num_syncs]);
2576 xe_exec_queue_put(op->q);
2578 if (!list_empty(&op->link))
2579 list_del(&op->link);
2581 drm_gpuva_ops_free(&vm->gpuvm, op->ops);
2586 static void xe_vma_op_unwind(struct xe_vm *vm, struct xe_vma_op *op,
2587 bool post_commit, bool prev_post_commit,
2588 bool next_post_commit)
2590 lockdep_assert_held_write(&vm->lock);
2592 switch (op->base.op) {
2593 case DRM_GPUVA_OP_MAP:
2595 prep_vma_destroy(vm, op->map.vma, post_commit);
2596 xe_vma_destroy_unlocked(op->map.vma);
2599 case DRM_GPUVA_OP_UNMAP:
2601 struct xe_vma *vma = gpuva_to_vma(op->base.unmap.va);
2604 down_read(&vm->userptr.notifier_lock);
2605 vma->gpuva.flags &= ~XE_VMA_DESTROYED;
2606 up_read(&vm->userptr.notifier_lock);
2608 xe_vm_insert_vma(vm, vma);
2612 case DRM_GPUVA_OP_REMAP:
2614 struct xe_vma *vma = gpuva_to_vma(op->base.remap.unmap->va);
2616 if (op->remap.prev) {
2617 prep_vma_destroy(vm, op->remap.prev, prev_post_commit);
2618 xe_vma_destroy_unlocked(op->remap.prev);
2620 if (op->remap.next) {
2621 prep_vma_destroy(vm, op->remap.next, next_post_commit);
2622 xe_vma_destroy_unlocked(op->remap.next);
2625 down_read(&vm->userptr.notifier_lock);
2626 vma->gpuva.flags &= ~XE_VMA_DESTROYED;
2627 up_read(&vm->userptr.notifier_lock);
2629 xe_vm_insert_vma(vm, vma);
2633 case DRM_GPUVA_OP_PREFETCH:
2637 drm_warn(&vm->xe->drm, "NOT POSSIBLE");
2641 static void vm_bind_ioctl_ops_unwind(struct xe_vm *vm,
2642 struct drm_gpuva_ops **ops,
2647 for (i = num_ops_list - 1; i; ++i) {
2648 struct drm_gpuva_ops *__ops = ops[i];
2649 struct drm_gpuva_op *__op;
2654 drm_gpuva_for_each_op_reverse(__op, __ops) {
2655 struct xe_vma_op *op = gpuva_op_to_vma_op(__op);
2657 xe_vma_op_unwind(vm, op,
2658 op->flags & XE_VMA_OP_COMMITTED,
2659 op->flags & XE_VMA_OP_PREV_COMMITTED,
2660 op->flags & XE_VMA_OP_NEXT_COMMITTED);
2663 drm_gpuva_ops_free(&vm->gpuvm, __ops);
2667 static int vm_bind_ioctl_ops_execute(struct xe_vm *vm,
2668 struct list_head *ops_list)
2670 struct xe_vma_op *op, *next;
2673 lockdep_assert_held_write(&vm->lock);
2675 list_for_each_entry_safe(op, next, ops_list, link) {
2676 err = xe_vma_op_execute(vm, op);
2678 drm_warn(&vm->xe->drm, "VM op(%d) failed with %d",
2681 * FIXME: Killing VM rather than proper error handling
2686 xe_vma_op_cleanup(vm, op);
2692 #ifdef TEST_VM_ASYNC_OPS_ERROR
2693 #define SUPPORTED_FLAGS \
2694 (FORCE_ASYNC_OP_ERROR | DRM_XE_VM_BIND_FLAG_READONLY | \
2695 DRM_XE_VM_BIND_FLAG_IMMEDIATE | DRM_XE_VM_BIND_FLAG_NULL | 0xffff)
2697 #define SUPPORTED_FLAGS \
2698 (DRM_XE_VM_BIND_FLAG_READONLY | \
2699 DRM_XE_VM_BIND_FLAG_IMMEDIATE | DRM_XE_VM_BIND_FLAG_NULL | \
2702 #define XE_64K_PAGE_MASK 0xffffull
2703 #define ALL_DRM_XE_SYNCS_FLAGS (DRM_XE_SYNCS_FLAG_WAIT_FOR_OP)
2705 #define MAX_BINDS 512 /* FIXME: Picking random upper limit */
2707 static int vm_bind_ioctl_check_args(struct xe_device *xe,
2708 struct drm_xe_vm_bind *args,
2709 struct drm_xe_vm_bind_op **bind_ops)
2714 if (XE_IOCTL_DBG(xe, args->pad || args->pad2) ||
2715 XE_IOCTL_DBG(xe, args->reserved[0] || args->reserved[1]))
2718 if (XE_IOCTL_DBG(xe, args->extensions) ||
2719 XE_IOCTL_DBG(xe, args->num_binds > MAX_BINDS))
2722 if (args->num_binds > 1) {
2723 u64 __user *bind_user =
2724 u64_to_user_ptr(args->vector_of_binds);
2726 *bind_ops = kmalloc(sizeof(struct drm_xe_vm_bind_op) *
2727 args->num_binds, GFP_KERNEL);
2731 err = __copy_from_user(*bind_ops, bind_user,
2732 sizeof(struct drm_xe_vm_bind_op) *
2734 if (XE_IOCTL_DBG(xe, err)) {
2739 *bind_ops = &args->bind;
2742 for (i = 0; i < args->num_binds; ++i) {
2743 u64 range = (*bind_ops)[i].range;
2744 u64 addr = (*bind_ops)[i].addr;
2745 u32 op = (*bind_ops)[i].op;
2746 u32 flags = (*bind_ops)[i].flags;
2747 u32 obj = (*bind_ops)[i].obj;
2748 u64 obj_offset = (*bind_ops)[i].obj_offset;
2749 u32 prefetch_region = (*bind_ops)[i].prefetch_mem_region_instance;
2750 bool is_null = flags & DRM_XE_VM_BIND_FLAG_NULL;
2751 u16 pat_index = (*bind_ops)[i].pat_index;
2754 if (XE_IOCTL_DBG(xe, pat_index >= xe->pat.n_entries)) {
2759 pat_index = array_index_nospec(pat_index, xe->pat.n_entries);
2760 (*bind_ops)[i].pat_index = pat_index;
2761 coh_mode = xe_pat_index_get_coh_mode(xe, pat_index);
2762 if (XE_IOCTL_DBG(xe, !coh_mode)) { /* hw reserved */
2767 if (XE_WARN_ON(coh_mode > XE_COH_AT_LEAST_1WAY)) {
2772 if (XE_IOCTL_DBG(xe, op > DRM_XE_VM_BIND_OP_PREFETCH) ||
2773 XE_IOCTL_DBG(xe, flags & ~SUPPORTED_FLAGS) ||
2774 XE_IOCTL_DBG(xe, obj && is_null) ||
2775 XE_IOCTL_DBG(xe, obj_offset && is_null) ||
2776 XE_IOCTL_DBG(xe, op != DRM_XE_VM_BIND_OP_MAP &&
2778 XE_IOCTL_DBG(xe, !obj &&
2779 op == DRM_XE_VM_BIND_OP_MAP &&
2781 XE_IOCTL_DBG(xe, !obj &&
2782 op == DRM_XE_VM_BIND_OP_UNMAP_ALL) ||
2783 XE_IOCTL_DBG(xe, addr &&
2784 op == DRM_XE_VM_BIND_OP_UNMAP_ALL) ||
2785 XE_IOCTL_DBG(xe, range &&
2786 op == DRM_XE_VM_BIND_OP_UNMAP_ALL) ||
2787 XE_IOCTL_DBG(xe, obj &&
2788 op == DRM_XE_VM_BIND_OP_MAP_USERPTR) ||
2789 XE_IOCTL_DBG(xe, coh_mode == XE_COH_NONE &&
2790 op == DRM_XE_VM_BIND_OP_MAP_USERPTR) ||
2791 XE_IOCTL_DBG(xe, obj &&
2792 op == DRM_XE_VM_BIND_OP_PREFETCH) ||
2793 XE_IOCTL_DBG(xe, prefetch_region &&
2794 op != DRM_XE_VM_BIND_OP_PREFETCH) ||
2795 XE_IOCTL_DBG(xe, !(BIT(prefetch_region) &
2796 xe->info.mem_region_mask)) ||
2797 XE_IOCTL_DBG(xe, obj &&
2798 op == DRM_XE_VM_BIND_OP_UNMAP)) {
2803 if (XE_IOCTL_DBG(xe, obj_offset & ~PAGE_MASK) ||
2804 XE_IOCTL_DBG(xe, addr & ~PAGE_MASK) ||
2805 XE_IOCTL_DBG(xe, range & ~PAGE_MASK) ||
2806 XE_IOCTL_DBG(xe, !range &&
2807 op != DRM_XE_VM_BIND_OP_UNMAP_ALL)) {
2816 if (args->num_binds > 1)
2821 static int vm_bind_ioctl_signal_fences(struct xe_vm *vm,
2822 struct xe_exec_queue *q,
2823 struct xe_sync_entry *syncs,
2826 struct dma_fence *fence;
2829 fence = xe_sync_in_fence_get(syncs, num_syncs,
2830 to_wait_exec_queue(vm, q), vm);
2832 return PTR_ERR(fence);
2834 for (i = 0; i < num_syncs; i++)
2835 xe_sync_entry_signal(&syncs[i], NULL, fence);
2837 xe_exec_queue_last_fence_set(to_wait_exec_queue(vm, q), vm,
2839 dma_fence_put(fence);
2844 int xe_vm_bind_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
2846 struct xe_device *xe = to_xe_device(dev);
2847 struct xe_file *xef = to_xe_file(file);
2848 struct drm_xe_vm_bind *args = data;
2849 struct drm_xe_sync __user *syncs_user;
2850 struct xe_bo **bos = NULL;
2851 struct drm_gpuva_ops **ops = NULL;
2853 struct xe_exec_queue *q = NULL;
2855 struct xe_sync_entry *syncs = NULL;
2856 struct drm_xe_vm_bind_op *bind_ops;
2857 LIST_HEAD(ops_list);
2861 err = vm_bind_ioctl_check_args(xe, args, &bind_ops);
2865 if (args->exec_queue_id) {
2866 q = xe_exec_queue_lookup(xef, args->exec_queue_id);
2867 if (XE_IOCTL_DBG(xe, !q)) {
2872 if (XE_IOCTL_DBG(xe, !(q->flags & EXEC_QUEUE_FLAG_VM))) {
2874 goto put_exec_queue;
2878 vm = xe_vm_lookup(xef, args->vm_id);
2879 if (XE_IOCTL_DBG(xe, !vm)) {
2881 goto put_exec_queue;
2884 err = down_write_killable(&vm->lock);
2888 if (XE_IOCTL_DBG(xe, xe_vm_is_closed_or_banned(vm))) {
2890 goto release_vm_lock;
2893 for (i = 0; i < args->num_binds; ++i) {
2894 u64 range = bind_ops[i].range;
2895 u64 addr = bind_ops[i].addr;
2897 if (XE_IOCTL_DBG(xe, range > vm->size) ||
2898 XE_IOCTL_DBG(xe, addr > vm->size - range)) {
2900 goto release_vm_lock;
2904 if (args->num_binds) {
2905 bos = kcalloc(args->num_binds, sizeof(*bos), GFP_KERNEL);
2908 goto release_vm_lock;
2911 ops = kcalloc(args->num_binds, sizeof(*ops), GFP_KERNEL);
2914 goto release_vm_lock;
2918 for (i = 0; i < args->num_binds; ++i) {
2919 struct drm_gem_object *gem_obj;
2920 u64 range = bind_ops[i].range;
2921 u64 addr = bind_ops[i].addr;
2922 u32 obj = bind_ops[i].obj;
2923 u64 obj_offset = bind_ops[i].obj_offset;
2924 u16 pat_index = bind_ops[i].pat_index;
2930 gem_obj = drm_gem_object_lookup(file, obj);
2931 if (XE_IOCTL_DBG(xe, !gem_obj)) {
2935 bos[i] = gem_to_xe_bo(gem_obj);
2937 if (XE_IOCTL_DBG(xe, range > bos[i]->size) ||
2938 XE_IOCTL_DBG(xe, obj_offset >
2939 bos[i]->size - range)) {
2944 if (bos[i]->flags & XE_BO_INTERNAL_64K) {
2945 if (XE_IOCTL_DBG(xe, obj_offset &
2946 XE_64K_PAGE_MASK) ||
2947 XE_IOCTL_DBG(xe, addr & XE_64K_PAGE_MASK) ||
2948 XE_IOCTL_DBG(xe, range & XE_64K_PAGE_MASK)) {
2954 coh_mode = xe_pat_index_get_coh_mode(xe, pat_index);
2955 if (bos[i]->cpu_caching) {
2956 if (XE_IOCTL_DBG(xe, coh_mode == XE_COH_NONE &&
2957 bos[i]->cpu_caching == DRM_XE_GEM_CPU_CACHING_WB)) {
2961 } else if (XE_IOCTL_DBG(xe, coh_mode == XE_COH_NONE)) {
2963 * Imported dma-buf from a different device should
2964 * require 1way or 2way coherency since we don't know
2965 * how it was mapped on the CPU. Just assume is it
2966 * potentially cached on CPU side.
2973 if (args->num_syncs) {
2974 syncs = kcalloc(args->num_syncs, sizeof(*syncs), GFP_KERNEL);
2981 syncs_user = u64_to_user_ptr(args->syncs);
2982 for (num_syncs = 0; num_syncs < args->num_syncs; num_syncs++) {
2983 err = xe_sync_entry_parse(xe, xef, &syncs[num_syncs],
2984 &syncs_user[num_syncs],
2985 (xe_vm_in_lr_mode(vm) ?
2986 SYNC_PARSE_FLAG_LR_MODE : 0) |
2988 SYNC_PARSE_FLAG_DISALLOW_USER_FENCE : 0));
2993 if (!args->num_binds) {
2998 for (i = 0; i < args->num_binds; ++i) {
2999 u64 range = bind_ops[i].range;
3000 u64 addr = bind_ops[i].addr;
3001 u32 op = bind_ops[i].op;
3002 u32 flags = bind_ops[i].flags;
3003 u64 obj_offset = bind_ops[i].obj_offset;
3004 u32 prefetch_region = bind_ops[i].prefetch_mem_region_instance;
3005 u16 pat_index = bind_ops[i].pat_index;
3007 ops[i] = vm_bind_ioctl_ops_create(vm, bos[i], obj_offset,
3008 addr, range, op, flags,
3009 prefetch_region, pat_index);
3010 if (IS_ERR(ops[i])) {
3011 err = PTR_ERR(ops[i]);
3016 err = vm_bind_ioctl_ops_parse(vm, q, ops[i], syncs, num_syncs,
3018 i == args->num_binds - 1);
3024 if (list_empty(&ops_list)) {
3031 xe_exec_queue_get(q);
3033 err = vm_bind_ioctl_ops_execute(vm, &ops_list);
3035 up_write(&vm->lock);
3038 xe_exec_queue_put(q);
3041 for (i = 0; bos && i < args->num_binds; ++i)
3046 if (args->num_binds > 1)
3052 vm_bind_ioctl_ops_unwind(vm, ops, args->num_binds);
3054 if (err == -ENODATA)
3055 err = vm_bind_ioctl_signal_fences(vm, q, syncs, num_syncs);
3057 xe_sync_entry_cleanup(&syncs[num_syncs]);
3061 for (i = 0; i < args->num_binds; ++i)
3064 up_write(&vm->lock);
3069 xe_exec_queue_put(q);
3073 if (args->num_binds > 1)
3079 * xe_vm_lock() - Lock the vm's dma_resv object
3080 * @vm: The struct xe_vm whose lock is to be locked
3081 * @intr: Whether to perform any wait interruptible
3083 * Return: 0 on success, -EINTR if @intr is true and the wait for a
3084 * contended lock was interrupted. If @intr is false, the function
3087 int xe_vm_lock(struct xe_vm *vm, bool intr)
3090 return dma_resv_lock_interruptible(xe_vm_resv(vm), NULL);
3092 return dma_resv_lock(xe_vm_resv(vm), NULL);
3096 * xe_vm_unlock() - Unlock the vm's dma_resv object
3097 * @vm: The struct xe_vm whose lock is to be released.
3099 * Unlock a buffer object lock that was locked by xe_vm_lock().
3101 void xe_vm_unlock(struct xe_vm *vm)
3103 dma_resv_unlock(xe_vm_resv(vm));
3107 * xe_vm_invalidate_vma - invalidate GPU mappings for VMA without a lock
3108 * @vma: VMA to invalidate
3110 * Walks a list of page tables leaves which it memset the entries owned by this
3111 * VMA to zero, invalidates the TLBs, and block until TLBs invalidation is
3114 * Returns 0 for success, negative error code otherwise.
3116 int xe_vm_invalidate_vma(struct xe_vma *vma)
3118 struct xe_device *xe = xe_vma_vm(vma)->xe;
3119 struct xe_tile *tile;
3120 u32 tile_needs_invalidate = 0;
3121 int seqno[XE_MAX_TILES_PER_DEVICE];
3125 xe_assert(xe, xe_vm_in_fault_mode(xe_vma_vm(vma)));
3126 xe_assert(xe, !xe_vma_is_null(vma));
3127 trace_xe_vma_usm_invalidate(vma);
3129 /* Check that we don't race with page-table updates */
3130 if (IS_ENABLED(CONFIG_PROVE_LOCKING)) {
3131 if (xe_vma_is_userptr(vma)) {
3132 WARN_ON_ONCE(!mmu_interval_check_retry
3133 (&vma->userptr.notifier,
3134 vma->userptr.notifier_seq));
3135 WARN_ON_ONCE(!dma_resv_test_signaled(xe_vm_resv(xe_vma_vm(vma)),
3136 DMA_RESV_USAGE_BOOKKEEP));
3139 xe_bo_assert_held(xe_vma_bo(vma));
3143 for_each_tile(tile, xe, id) {
3144 if (xe_pt_zap_ptes(tile, vma)) {
3145 tile_needs_invalidate |= BIT(id);
3148 * FIXME: We potentially need to invalidate multiple
3149 * GTs within the tile
3151 seqno[id] = xe_gt_tlb_invalidation_vma(tile->primary_gt, NULL, vma);
3157 for_each_tile(tile, xe, id) {
3158 if (tile_needs_invalidate & BIT(id)) {
3159 ret = xe_gt_tlb_invalidation_wait(tile->primary_gt, seqno[id]);
3165 vma->usm.tile_invalidated = vma->tile_mask;
3170 int xe_analyze_vm(struct drm_printer *p, struct xe_vm *vm, int gt_id)
3172 struct drm_gpuva *gpuva;
3176 if (!down_read_trylock(&vm->lock)) {
3177 drm_printf(p, " Failed to acquire VM lock to dump capture");
3180 if (vm->pt_root[gt_id]) {
3181 addr = xe_bo_addr(vm->pt_root[gt_id]->bo, 0, XE_PAGE_SIZE);
3182 is_vram = xe_bo_is_vram(vm->pt_root[gt_id]->bo);
3183 drm_printf(p, " VM root: A:0x%llx %s\n", addr,
3184 is_vram ? "VRAM" : "SYS");
3187 drm_gpuvm_for_each_va(gpuva, &vm->gpuvm) {
3188 struct xe_vma *vma = gpuva_to_vma(gpuva);
3189 bool is_userptr = xe_vma_is_userptr(vma);
3190 bool is_null = xe_vma_is_null(vma);
3194 } else if (is_userptr) {
3195 struct xe_res_cursor cur;
3197 if (vma->userptr.sg) {
3198 xe_res_first_sg(vma->userptr.sg, 0, XE_PAGE_SIZE,
3200 addr = xe_res_dma(&cur);
3205 addr = __xe_bo_addr(xe_vma_bo(vma), 0, XE_PAGE_SIZE);
3206 is_vram = xe_bo_is_vram(xe_vma_bo(vma));
3208 drm_printf(p, " [%016llx-%016llx] S:0x%016llx A:%016llx %s\n",
3209 xe_vma_start(vma), xe_vma_end(vma) - 1,
3211 addr, is_null ? "NULL" : is_userptr ? "USR" :
3212 is_vram ? "VRAM" : "SYS");