1 // SPDX-License-Identifier: MIT
3 * Copyright © 2021 Intel Corporation
8 #include <linux/dma-fence-array.h>
9 #include <linux/nospec.h>
11 #include <drm/drm_exec.h>
12 #include <drm/drm_print.h>
13 #include <drm/ttm/ttm_execbuf_util.h>
14 #include <drm/ttm/ttm_tt.h>
15 #include <drm/xe_drm.h>
16 #include <linux/ascii85.h>
17 #include <linux/delay.h>
18 #include <linux/kthread.h>
20 #include <linux/swap.h>
22 #include <generated/xe_wa_oob.h>
24 #include "xe_assert.h"
26 #include "xe_device.h"
27 #include "xe_drm_client.h"
28 #include "xe_exec_queue.h"
30 #include "xe_gt_pagefault.h"
31 #include "xe_gt_tlb_invalidation.h"
32 #include "xe_migrate.h"
35 #include "xe_preempt_fence.h"
37 #include "xe_res_cursor.h"
42 static struct drm_gem_object *xe_vm_obj(struct xe_vm *vm)
44 return vm->gpuvm.r_obj;
48 * xe_vma_userptr_check_repin() - Advisory check for repin needed
49 * @uvma: The userptr vma
51 * Check if the userptr vma has been invalidated since last successful
52 * repin. The check is advisory only and can the function can be called
53 * without the vm->userptr.notifier_lock held. There is no guarantee that the
54 * vma userptr will remain valid after a lockless check, so typically
55 * the call needs to be followed by a proper check under the notifier_lock.
57 * Return: 0 if userptr vma is valid, -EAGAIN otherwise; repin recommended.
59 int xe_vma_userptr_check_repin(struct xe_userptr_vma *uvma)
61 return mmu_interval_check_retry(&uvma->userptr.notifier,
62 uvma->userptr.notifier_seq) ?
66 int xe_vma_userptr_pin_pages(struct xe_userptr_vma *uvma)
68 struct xe_userptr *userptr = &uvma->userptr;
69 struct xe_vma *vma = &uvma->vma;
70 struct xe_vm *vm = xe_vma_vm(vma);
71 struct xe_device *xe = vm->xe;
72 const unsigned long num_pages = xe_vma_size(vma) >> PAGE_SHIFT;
74 bool in_kthread = !current->mm;
75 unsigned long notifier_seq;
77 bool read_only = xe_vma_read_only(vma);
79 lockdep_assert_held(&vm->lock);
80 xe_assert(xe, xe_vma_is_userptr(vma));
82 if (vma->gpuva.flags & XE_VMA_DESTROYED)
85 notifier_seq = mmu_interval_read_begin(&userptr->notifier);
86 if (notifier_seq == userptr->notifier_seq)
89 pages = kvmalloc_array(num_pages, sizeof(*pages), GFP_KERNEL);
94 dma_unmap_sgtable(xe->drm.dev,
96 read_only ? DMA_TO_DEVICE :
97 DMA_BIDIRECTIONAL, 0);
98 sg_free_table(userptr->sg);
104 if (!mmget_not_zero(userptr->notifier.mm)) {
108 kthread_use_mm(userptr->notifier.mm);
111 while (pinned < num_pages) {
112 ret = get_user_pages_fast(xe_vma_userptr(vma) +
115 read_only ? 0 : FOLL_WRITE,
125 kthread_unuse_mm(userptr->notifier.mm);
126 mmput(userptr->notifier.mm);
132 ret = sg_alloc_table_from_pages_segment(&userptr->sgt, pages,
134 (u64)pinned << PAGE_SHIFT,
135 xe_sg_segment_size(xe->drm.dev),
141 userptr->sg = &userptr->sgt;
143 ret = dma_map_sgtable(xe->drm.dev, userptr->sg,
144 read_only ? DMA_TO_DEVICE :
146 DMA_ATTR_SKIP_CPU_SYNC |
147 DMA_ATTR_NO_KERNEL_MAPPING);
149 sg_free_table(userptr->sg);
154 for (i = 0; i < pinned; ++i) {
157 set_page_dirty(pages[i]);
158 unlock_page(pages[i]);
161 mark_page_accessed(pages[i]);
165 release_pages(pages, pinned);
169 userptr->notifier_seq = notifier_seq;
170 if (xe_vma_userptr_check_repin(uvma) == -EAGAIN)
174 return ret < 0 ? ret : 0;
177 static bool preempt_fences_waiting(struct xe_vm *vm)
179 struct xe_exec_queue *q;
181 lockdep_assert_held(&vm->lock);
182 xe_vm_assert_held(vm);
184 list_for_each_entry(q, &vm->preempt.exec_queues, compute.link) {
185 if (!q->compute.pfence ||
186 (q->compute.pfence && test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT,
187 &q->compute.pfence->flags))) {
195 static void free_preempt_fences(struct list_head *list)
197 struct list_head *link, *next;
199 list_for_each_safe(link, next, list)
200 xe_preempt_fence_free(to_preempt_fence_from_link(link));
203 static int alloc_preempt_fences(struct xe_vm *vm, struct list_head *list,
206 lockdep_assert_held(&vm->lock);
207 xe_vm_assert_held(vm);
209 if (*count >= vm->preempt.num_exec_queues)
212 for (; *count < vm->preempt.num_exec_queues; ++(*count)) {
213 struct xe_preempt_fence *pfence = xe_preempt_fence_alloc();
216 return PTR_ERR(pfence);
218 list_move_tail(xe_preempt_fence_link(pfence), list);
224 static int wait_for_existing_preempt_fences(struct xe_vm *vm)
226 struct xe_exec_queue *q;
228 xe_vm_assert_held(vm);
230 list_for_each_entry(q, &vm->preempt.exec_queues, compute.link) {
231 if (q->compute.pfence) {
232 long timeout = dma_fence_wait(q->compute.pfence, false);
236 dma_fence_put(q->compute.pfence);
237 q->compute.pfence = NULL;
244 static bool xe_vm_is_idle(struct xe_vm *vm)
246 struct xe_exec_queue *q;
248 xe_vm_assert_held(vm);
249 list_for_each_entry(q, &vm->preempt.exec_queues, compute.link) {
250 if (!xe_exec_queue_is_idle(q))
257 static void arm_preempt_fences(struct xe_vm *vm, struct list_head *list)
259 struct list_head *link;
260 struct xe_exec_queue *q;
262 list_for_each_entry(q, &vm->preempt.exec_queues, compute.link) {
263 struct dma_fence *fence;
266 xe_assert(vm->xe, link != list);
268 fence = xe_preempt_fence_arm(to_preempt_fence_from_link(link),
269 q, q->compute.context,
271 dma_fence_put(q->compute.pfence);
272 q->compute.pfence = fence;
276 static int add_preempt_fences(struct xe_vm *vm, struct xe_bo *bo)
278 struct xe_exec_queue *q;
281 if (!vm->preempt.num_exec_queues)
284 err = xe_bo_lock(bo, true);
288 err = dma_resv_reserve_fences(bo->ttm.base.resv, vm->preempt.num_exec_queues);
292 list_for_each_entry(q, &vm->preempt.exec_queues, compute.link)
293 if (q->compute.pfence) {
294 dma_resv_add_fence(bo->ttm.base.resv,
296 DMA_RESV_USAGE_BOOKKEEP);
304 static void resume_and_reinstall_preempt_fences(struct xe_vm *vm,
305 struct drm_exec *exec)
307 struct xe_exec_queue *q;
309 lockdep_assert_held(&vm->lock);
310 xe_vm_assert_held(vm);
312 list_for_each_entry(q, &vm->preempt.exec_queues, compute.link) {
315 drm_gpuvm_resv_add_fence(&vm->gpuvm, exec, q->compute.pfence,
316 DMA_RESV_USAGE_BOOKKEEP, DMA_RESV_USAGE_BOOKKEEP);
320 int xe_vm_add_compute_exec_queue(struct xe_vm *vm, struct xe_exec_queue *q)
322 struct drm_gpuvm_exec vm_exec = {
324 .flags = DRM_EXEC_INTERRUPTIBLE_WAIT,
327 struct drm_exec *exec = &vm_exec.exec;
328 struct dma_fence *pfence;
332 xe_assert(vm->xe, xe_vm_in_preempt_fence_mode(vm));
334 down_write(&vm->lock);
335 err = drm_gpuvm_exec_lock(&vm_exec);
339 pfence = xe_preempt_fence_create(q, q->compute.context,
346 list_add(&q->compute.link, &vm->preempt.exec_queues);
347 ++vm->preempt.num_exec_queues;
348 q->compute.pfence = pfence;
350 down_read(&vm->userptr.notifier_lock);
352 drm_gpuvm_resv_add_fence(&vm->gpuvm, exec, pfence,
353 DMA_RESV_USAGE_BOOKKEEP, DMA_RESV_USAGE_BOOKKEEP);
356 * Check to see if a preemption on VM is in flight or userptr
357 * invalidation, if so trigger this preempt fence to sync state with
358 * other preempt fences on the VM.
360 wait = __xe_vm_userptr_needs_repin(vm) || preempt_fences_waiting(vm);
362 dma_fence_enable_sw_signaling(pfence);
364 up_read(&vm->userptr.notifier_lock);
375 * xe_vm_remove_compute_exec_queue() - Remove compute exec queue from VM
379 void xe_vm_remove_compute_exec_queue(struct xe_vm *vm, struct xe_exec_queue *q)
381 if (!xe_vm_in_preempt_fence_mode(vm))
384 down_write(&vm->lock);
385 list_del(&q->compute.link);
386 --vm->preempt.num_exec_queues;
387 if (q->compute.pfence) {
388 dma_fence_enable_sw_signaling(q->compute.pfence);
389 dma_fence_put(q->compute.pfence);
390 q->compute.pfence = NULL;
396 * __xe_vm_userptr_needs_repin() - Check whether the VM does have userptrs
397 * that need repinning.
400 * This function checks for whether the VM has userptrs that need repinning,
401 * and provides a release-type barrier on the userptr.notifier_lock after
404 * Return: 0 if there are no userptrs needing repinning, -EAGAIN if there are.
406 int __xe_vm_userptr_needs_repin(struct xe_vm *vm)
408 lockdep_assert_held_read(&vm->userptr.notifier_lock);
410 return (list_empty(&vm->userptr.repin_list) &&
411 list_empty(&vm->userptr.invalidated)) ? 0 : -EAGAIN;
414 #define XE_VM_REBIND_RETRY_TIMEOUT_MS 1000
416 static void xe_vm_kill(struct xe_vm *vm)
418 struct xe_exec_queue *q;
420 lockdep_assert_held(&vm->lock);
422 xe_vm_lock(vm, false);
423 vm->flags |= XE_VM_FLAG_BANNED;
424 trace_xe_vm_kill(vm);
426 list_for_each_entry(q, &vm->preempt.exec_queues, compute.link)
430 /* TODO: Inform user the VM is banned */
434 * xe_vm_validate_should_retry() - Whether to retry after a validate error.
435 * @exec: The drm_exec object used for locking before validation.
436 * @err: The error returned from ttm_bo_validate().
437 * @end: A ktime_t cookie that should be set to 0 before first use and
438 * that should be reused on subsequent calls.
440 * With multiple active VMs, under memory pressure, it is possible that
441 * ttm_bo_validate() run into -EDEADLK and in such case returns -ENOMEM.
442 * Until ttm properly handles locking in such scenarios, best thing the
443 * driver can do is retry with a timeout. Check if that is necessary, and
444 * if so unlock the drm_exec's objects while keeping the ticket to prepare
447 * Return: true if a retry after drm_exec_init() is recommended;
450 bool xe_vm_validate_should_retry(struct drm_exec *exec, int err, ktime_t *end)
458 *end = *end ? : ktime_add_ms(cur, XE_VM_REBIND_RETRY_TIMEOUT_MS);
459 if (!ktime_before(cur, *end))
466 static int xe_gpuvm_validate(struct drm_gpuvm_bo *vm_bo, struct drm_exec *exec)
468 struct xe_vm *vm = gpuvm_to_vm(vm_bo->vm);
469 struct drm_gpuva *gpuva;
472 lockdep_assert_held(&vm->lock);
473 drm_gpuvm_bo_for_each_va(gpuva, vm_bo)
474 list_move_tail(&gpuva_to_vma(gpuva)->combined_links.rebind,
477 ret = xe_bo_validate(gem_to_xe_bo(vm_bo->obj), vm, false);
481 vm_bo->evicted = false;
485 static int xe_preempt_work_begin(struct drm_exec *exec, struct xe_vm *vm,
491 * 1 fence for each preempt fence plus a fence for each tile from a
494 err = drm_gpuvm_prepare_vm(&vm->gpuvm, exec, vm->preempt.num_exec_queues +
495 vm->xe->info.tile_count);
499 if (xe_vm_is_idle(vm)) {
500 vm->preempt.rebind_deactivated = true;
505 if (!preempt_fences_waiting(vm)) {
510 err = drm_gpuvm_prepare_objects(&vm->gpuvm, exec, vm->preempt.num_exec_queues);
514 err = wait_for_existing_preempt_fences(vm);
518 return drm_gpuvm_validate(&vm->gpuvm, exec);
521 static void preempt_rebind_work_func(struct work_struct *w)
523 struct xe_vm *vm = container_of(w, struct xe_vm, preempt.rebind_work);
524 struct drm_exec exec;
525 unsigned int fence_count = 0;
526 LIST_HEAD(preempt_fences);
530 int __maybe_unused tries = 0;
532 xe_assert(vm->xe, xe_vm_in_preempt_fence_mode(vm));
533 trace_xe_vm_rebind_worker_enter(vm);
535 down_write(&vm->lock);
537 if (xe_vm_is_closed_or_banned(vm)) {
539 trace_xe_vm_rebind_worker_exit(vm);
544 if (xe_vm_userptr_check_repin(vm)) {
545 err = xe_vm_userptr_pin(vm);
547 goto out_unlock_outer;
550 drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT, 0);
552 drm_exec_until_all_locked(&exec) {
555 err = xe_preempt_work_begin(&exec, vm, &done);
556 drm_exec_retry_on_contention(&exec);
558 drm_exec_fini(&exec);
559 if (err && xe_vm_validate_should_retry(&exec, err, &end))
562 goto out_unlock_outer;
566 err = alloc_preempt_fences(vm, &preempt_fences, &fence_count);
570 err = xe_vm_rebind(vm, true);
574 /* Wait on rebinds and munmap style VM unbinds */
575 wait = dma_resv_wait_timeout(xe_vm_resv(vm),
576 DMA_RESV_USAGE_KERNEL,
577 false, MAX_SCHEDULE_TIMEOUT);
583 #define retry_required(__tries, __vm) \
584 (IS_ENABLED(CONFIG_DRM_XE_USERPTR_INVAL_INJECT) ? \
585 (!(__tries)++ || __xe_vm_userptr_needs_repin(__vm)) : \
586 __xe_vm_userptr_needs_repin(__vm))
588 down_read(&vm->userptr.notifier_lock);
589 if (retry_required(tries, vm)) {
590 up_read(&vm->userptr.notifier_lock);
595 #undef retry_required
597 spin_lock(&vm->xe->ttm.lru_lock);
598 ttm_lru_bulk_move_tail(&vm->lru_bulk_move);
599 spin_unlock(&vm->xe->ttm.lru_lock);
601 /* Point of no return. */
602 arm_preempt_fences(vm, &preempt_fences);
603 resume_and_reinstall_preempt_fences(vm, &exec);
604 up_read(&vm->userptr.notifier_lock);
607 drm_exec_fini(&exec);
609 if (err == -EAGAIN) {
610 trace_xe_vm_rebind_worker_retry(vm);
615 drm_warn(&vm->xe->drm, "VM worker error: %d\n", err);
620 free_preempt_fences(&preempt_fences);
622 trace_xe_vm_rebind_worker_exit(vm);
625 static bool vma_userptr_invalidate(struct mmu_interval_notifier *mni,
626 const struct mmu_notifier_range *range,
627 unsigned long cur_seq)
629 struct xe_userptr *userptr = container_of(mni, typeof(*userptr), notifier);
630 struct xe_userptr_vma *uvma = container_of(userptr, typeof(*uvma), userptr);
631 struct xe_vma *vma = &uvma->vma;
632 struct xe_vm *vm = xe_vma_vm(vma);
633 struct dma_resv_iter cursor;
634 struct dma_fence *fence;
637 xe_assert(vm->xe, xe_vma_is_userptr(vma));
638 trace_xe_vma_userptr_invalidate(vma);
640 if (!mmu_notifier_range_blockable(range))
643 down_write(&vm->userptr.notifier_lock);
644 mmu_interval_set_seq(mni, cur_seq);
646 /* No need to stop gpu access if the userptr is not yet bound. */
647 if (!userptr->initial_bind) {
648 up_write(&vm->userptr.notifier_lock);
653 * Tell exec and rebind worker they need to repin and rebind this
656 if (!xe_vm_in_fault_mode(vm) &&
657 !(vma->gpuva.flags & XE_VMA_DESTROYED) && vma->tile_present) {
658 spin_lock(&vm->userptr.invalidated_lock);
659 list_move_tail(&userptr->invalidate_link,
660 &vm->userptr.invalidated);
661 spin_unlock(&vm->userptr.invalidated_lock);
664 up_write(&vm->userptr.notifier_lock);
667 * Preempt fences turn into schedule disables, pipeline these.
668 * Note that even in fault mode, we need to wait for binds and
669 * unbinds to complete, and those are attached as BOOKMARK fences
672 dma_resv_iter_begin(&cursor, xe_vm_resv(vm),
673 DMA_RESV_USAGE_BOOKKEEP);
674 dma_resv_for_each_fence_unlocked(&cursor, fence)
675 dma_fence_enable_sw_signaling(fence);
676 dma_resv_iter_end(&cursor);
678 err = dma_resv_wait_timeout(xe_vm_resv(vm),
679 DMA_RESV_USAGE_BOOKKEEP,
680 false, MAX_SCHEDULE_TIMEOUT);
681 XE_WARN_ON(err <= 0);
683 if (xe_vm_in_fault_mode(vm)) {
684 err = xe_vm_invalidate_vma(vma);
688 trace_xe_vma_userptr_invalidate_complete(vma);
693 static const struct mmu_interval_notifier_ops vma_userptr_notifier_ops = {
694 .invalidate = vma_userptr_invalidate,
697 int xe_vm_userptr_pin(struct xe_vm *vm)
699 struct xe_userptr_vma *uvma, *next;
701 LIST_HEAD(tmp_evict);
703 xe_assert(vm->xe, !xe_vm_in_fault_mode(vm));
704 lockdep_assert_held_write(&vm->lock);
706 /* Collect invalidated userptrs */
707 spin_lock(&vm->userptr.invalidated_lock);
708 list_for_each_entry_safe(uvma, next, &vm->userptr.invalidated,
709 userptr.invalidate_link) {
710 list_del_init(&uvma->userptr.invalidate_link);
711 list_move_tail(&uvma->userptr.repin_link,
712 &vm->userptr.repin_list);
714 spin_unlock(&vm->userptr.invalidated_lock);
716 /* Pin and move to temporary list */
717 list_for_each_entry_safe(uvma, next, &vm->userptr.repin_list,
718 userptr.repin_link) {
719 err = xe_vma_userptr_pin_pages(uvma);
720 if (err == -EFAULT) {
721 list_del_init(&uvma->userptr.repin_link);
723 /* Wait for pending binds */
724 xe_vm_lock(vm, false);
725 dma_resv_wait_timeout(xe_vm_resv(vm),
726 DMA_RESV_USAGE_BOOKKEEP,
727 false, MAX_SCHEDULE_TIMEOUT);
729 err = xe_vm_invalidate_vma(&uvma->vma);
737 list_del_init(&uvma->userptr.repin_link);
738 list_move_tail(&uvma->vma.combined_links.rebind,
747 * xe_vm_userptr_check_repin() - Check whether the VM might have userptrs
748 * that need repinning.
751 * This function does an advisory check for whether the VM has userptrs that
754 * Return: 0 if there are no indications of userptrs needing repinning,
755 * -EAGAIN if there are.
757 int xe_vm_userptr_check_repin(struct xe_vm *vm)
759 return (list_empty_careful(&vm->userptr.repin_list) &&
760 list_empty_careful(&vm->userptr.invalidated)) ? 0 : -EAGAIN;
763 static struct dma_fence *
764 xe_vm_bind_vma(struct xe_vma *vma, struct xe_exec_queue *q,
765 struct xe_sync_entry *syncs, u32 num_syncs,
766 bool first_op, bool last_op);
768 int xe_vm_rebind(struct xe_vm *vm, bool rebind_worker)
770 struct dma_fence *fence;
771 struct xe_vma *vma, *next;
773 lockdep_assert_held(&vm->lock);
774 if (xe_vm_in_lr_mode(vm) && !rebind_worker)
777 xe_vm_assert_held(vm);
778 list_for_each_entry_safe(vma, next, &vm->rebind_list,
779 combined_links.rebind) {
780 xe_assert(vm->xe, vma->tile_present);
782 list_del_init(&vma->combined_links.rebind);
784 trace_xe_vma_rebind_worker(vma);
786 trace_xe_vma_rebind_exec(vma);
787 fence = xe_vm_bind_vma(vma, NULL, NULL, 0, false, false);
789 return PTR_ERR(fence);
790 dma_fence_put(fence);
796 static void xe_vma_free(struct xe_vma *vma)
798 if (xe_vma_is_userptr(vma))
799 kfree(to_userptr_vma(vma));
804 #define VMA_CREATE_FLAG_READ_ONLY BIT(0)
805 #define VMA_CREATE_FLAG_IS_NULL BIT(1)
806 #define VMA_CREATE_FLAG_DUMPABLE BIT(2)
808 static struct xe_vma *xe_vma_create(struct xe_vm *vm,
810 u64 bo_offset_or_userptr,
812 u16 pat_index, unsigned int flags)
815 struct xe_tile *tile;
817 bool read_only = (flags & VMA_CREATE_FLAG_READ_ONLY);
818 bool is_null = (flags & VMA_CREATE_FLAG_IS_NULL);
819 bool dumpable = (flags & VMA_CREATE_FLAG_DUMPABLE);
821 xe_assert(vm->xe, start < end);
822 xe_assert(vm->xe, end < vm->size);
825 * Allocate and ensure that the xe_vma_is_userptr() return
826 * matches what was allocated.
828 if (!bo && !is_null) {
829 struct xe_userptr_vma *uvma = kzalloc(sizeof(*uvma), GFP_KERNEL);
832 return ERR_PTR(-ENOMEM);
836 vma = kzalloc(sizeof(*vma), GFP_KERNEL);
838 return ERR_PTR(-ENOMEM);
841 vma->gpuva.flags |= DRM_GPUVA_SPARSE;
843 vma->gpuva.gem.obj = &bo->ttm.base;
846 INIT_LIST_HEAD(&vma->combined_links.rebind);
848 INIT_LIST_HEAD(&vma->gpuva.gem.entry);
849 vma->gpuva.vm = &vm->gpuvm;
850 vma->gpuva.va.addr = start;
851 vma->gpuva.va.range = end - start + 1;
853 vma->gpuva.flags |= XE_VMA_READ_ONLY;
855 vma->gpuva.flags |= XE_VMA_DUMPABLE;
857 for_each_tile(tile, vm->xe, id)
858 vma->tile_mask |= 0x1 << id;
860 if (GRAPHICS_VER(vm->xe) >= 20 || vm->xe->info.platform == XE_PVC)
861 vma->gpuva.flags |= XE_VMA_ATOMIC_PTE_BIT;
863 vma->pat_index = pat_index;
866 struct drm_gpuvm_bo *vm_bo;
868 xe_bo_assert_held(bo);
870 vm_bo = drm_gpuvm_bo_obtain(vma->gpuva.vm, &bo->ttm.base);
873 return ERR_CAST(vm_bo);
876 drm_gpuvm_bo_extobj_add(vm_bo);
877 drm_gem_object_get(&bo->ttm.base);
878 vma->gpuva.gem.offset = bo_offset_or_userptr;
879 drm_gpuva_link(&vma->gpuva, vm_bo);
880 drm_gpuvm_bo_put(vm_bo);
881 } else /* userptr or null */ {
883 struct xe_userptr *userptr = &to_userptr_vma(vma)->userptr;
884 u64 size = end - start + 1;
887 INIT_LIST_HEAD(&userptr->invalidate_link);
888 INIT_LIST_HEAD(&userptr->repin_link);
889 vma->gpuva.gem.offset = bo_offset_or_userptr;
891 err = mmu_interval_notifier_insert(&userptr->notifier,
893 xe_vma_userptr(vma), size,
894 &vma_userptr_notifier_ops);
900 userptr->notifier_seq = LONG_MAX;
909 static void xe_vma_destroy_late(struct xe_vma *vma)
911 struct xe_vm *vm = xe_vma_vm(vma);
912 struct xe_device *xe = vm->xe;
913 bool read_only = xe_vma_read_only(vma);
916 xe_sync_ufence_put(vma->ufence);
920 if (xe_vma_is_userptr(vma)) {
921 struct xe_userptr *userptr = &to_userptr_vma(vma)->userptr;
924 dma_unmap_sgtable(xe->drm.dev,
926 read_only ? DMA_TO_DEVICE :
927 DMA_BIDIRECTIONAL, 0);
928 sg_free_table(userptr->sg);
933 * Since userptr pages are not pinned, we can't remove
934 * the notifer until we're sure the GPU is not accessing
937 mmu_interval_notifier_remove(&userptr->notifier);
939 } else if (xe_vma_is_null(vma)) {
942 xe_bo_put(xe_vma_bo(vma));
948 static void vma_destroy_work_func(struct work_struct *w)
951 container_of(w, struct xe_vma, destroy_work);
953 xe_vma_destroy_late(vma);
956 static void vma_destroy_cb(struct dma_fence *fence,
957 struct dma_fence_cb *cb)
959 struct xe_vma *vma = container_of(cb, struct xe_vma, destroy_cb);
961 INIT_WORK(&vma->destroy_work, vma_destroy_work_func);
962 queue_work(system_unbound_wq, &vma->destroy_work);
965 static void xe_vma_destroy(struct xe_vma *vma, struct dma_fence *fence)
967 struct xe_vm *vm = xe_vma_vm(vma);
969 lockdep_assert_held_write(&vm->lock);
970 xe_assert(vm->xe, list_empty(&vma->combined_links.destroy));
972 if (xe_vma_is_userptr(vma)) {
973 xe_assert(vm->xe, vma->gpuva.flags & XE_VMA_DESTROYED);
975 spin_lock(&vm->userptr.invalidated_lock);
976 list_del(&to_userptr_vma(vma)->userptr.invalidate_link);
977 spin_unlock(&vm->userptr.invalidated_lock);
978 } else if (!xe_vma_is_null(vma)) {
979 xe_bo_assert_held(xe_vma_bo(vma));
981 drm_gpuva_unlink(&vma->gpuva);
984 xe_vm_assert_held(vm);
986 int ret = dma_fence_add_callback(fence, &vma->destroy_cb,
990 XE_WARN_ON(ret != -ENOENT);
991 xe_vma_destroy_late(vma);
994 xe_vma_destroy_late(vma);
999 * xe_vm_prepare_vma() - drm_exec utility to lock a vma
1000 * @exec: The drm_exec object we're currently locking for.
1001 * @vma: The vma for witch we want to lock the vm resv and any attached
1003 * @num_shared: The number of dma-fence slots to pre-allocate in the
1004 * objects' reservation objects.
1006 * Return: 0 on success, negative error code on error. In particular
1007 * may return -EDEADLK on WW transaction contention and -EINTR if
1008 * an interruptible wait is terminated by a signal.
1010 int xe_vm_prepare_vma(struct drm_exec *exec, struct xe_vma *vma,
1011 unsigned int num_shared)
1013 struct xe_vm *vm = xe_vma_vm(vma);
1014 struct xe_bo *bo = xe_vma_bo(vma);
1019 err = drm_exec_prepare_obj(exec, xe_vm_obj(vm), num_shared);
1021 err = drm_exec_lock_obj(exec, xe_vm_obj(vm));
1022 if (!err && bo && !bo->vm) {
1024 err = drm_exec_prepare_obj(exec, &bo->ttm.base, num_shared);
1026 err = drm_exec_lock_obj(exec, &bo->ttm.base);
1032 static void xe_vma_destroy_unlocked(struct xe_vma *vma)
1034 struct drm_exec exec;
1037 drm_exec_init(&exec, 0, 0);
1038 drm_exec_until_all_locked(&exec) {
1039 err = xe_vm_prepare_vma(&exec, vma, 0);
1040 drm_exec_retry_on_contention(&exec);
1041 if (XE_WARN_ON(err))
1045 xe_vma_destroy(vma, NULL);
1047 drm_exec_fini(&exec);
1051 xe_vm_find_overlapping_vma(struct xe_vm *vm, u64 start, u64 range)
1053 struct drm_gpuva *gpuva;
1055 lockdep_assert_held(&vm->lock);
1057 if (xe_vm_is_closed_or_banned(vm))
1060 xe_assert(vm->xe, start + range <= vm->size);
1062 gpuva = drm_gpuva_find_first(&vm->gpuvm, start, range);
1064 return gpuva ? gpuva_to_vma(gpuva) : NULL;
1067 static int xe_vm_insert_vma(struct xe_vm *vm, struct xe_vma *vma)
1071 xe_assert(vm->xe, xe_vma_vm(vma) == vm);
1072 lockdep_assert_held(&vm->lock);
1074 mutex_lock(&vm->snap_mutex);
1075 err = drm_gpuva_insert(&vm->gpuvm, &vma->gpuva);
1076 mutex_unlock(&vm->snap_mutex);
1077 XE_WARN_ON(err); /* Shouldn't be possible */
1082 static void xe_vm_remove_vma(struct xe_vm *vm, struct xe_vma *vma)
1084 xe_assert(vm->xe, xe_vma_vm(vma) == vm);
1085 lockdep_assert_held(&vm->lock);
1087 mutex_lock(&vm->snap_mutex);
1088 drm_gpuva_remove(&vma->gpuva);
1089 mutex_unlock(&vm->snap_mutex);
1090 if (vm->usm.last_fault_vma == vma)
1091 vm->usm.last_fault_vma = NULL;
1094 static struct drm_gpuva_op *xe_vm_op_alloc(void)
1096 struct xe_vma_op *op;
1098 op = kzalloc(sizeof(*op), GFP_KERNEL);
1106 static void xe_vm_free(struct drm_gpuvm *gpuvm);
1108 static const struct drm_gpuvm_ops gpuvm_ops = {
1109 .op_alloc = xe_vm_op_alloc,
1110 .vm_bo_validate = xe_gpuvm_validate,
1111 .vm_free = xe_vm_free,
1114 static u64 pde_encode_pat_index(struct xe_device *xe, u16 pat_index)
1118 if (pat_index & BIT(0))
1119 pte |= XE_PPGTT_PTE_PAT0;
1121 if (pat_index & BIT(1))
1122 pte |= XE_PPGTT_PTE_PAT1;
1127 static u64 pte_encode_pat_index(struct xe_device *xe, u16 pat_index,
1132 if (pat_index & BIT(0))
1133 pte |= XE_PPGTT_PTE_PAT0;
1135 if (pat_index & BIT(1))
1136 pte |= XE_PPGTT_PTE_PAT1;
1138 if (pat_index & BIT(2)) {
1140 pte |= XE_PPGTT_PDE_PDPE_PAT2;
1142 pte |= XE_PPGTT_PTE_PAT2;
1145 if (pat_index & BIT(3))
1146 pte |= XELPG_PPGTT_PTE_PAT3;
1148 if (pat_index & (BIT(4)))
1149 pte |= XE2_PPGTT_PTE_PAT4;
1154 static u64 pte_encode_ps(u32 pt_level)
1156 XE_WARN_ON(pt_level > MAX_HUGEPTE_LEVEL);
1159 return XE_PDE_PS_2M;
1160 else if (pt_level == 2)
1161 return XE_PDPE_PS_1G;
1166 static u64 xelp_pde_encode_bo(struct xe_bo *bo, u64 bo_offset,
1167 const u16 pat_index)
1169 struct xe_device *xe = xe_bo_device(bo);
1172 pde = xe_bo_addr(bo, bo_offset, XE_PAGE_SIZE);
1173 pde |= XE_PAGE_PRESENT | XE_PAGE_RW;
1174 pde |= pde_encode_pat_index(xe, pat_index);
1179 static u64 xelp_pte_encode_bo(struct xe_bo *bo, u64 bo_offset,
1180 u16 pat_index, u32 pt_level)
1182 struct xe_device *xe = xe_bo_device(bo);
1185 pte = xe_bo_addr(bo, bo_offset, XE_PAGE_SIZE);
1186 pte |= XE_PAGE_PRESENT | XE_PAGE_RW;
1187 pte |= pte_encode_pat_index(xe, pat_index, pt_level);
1188 pte |= pte_encode_ps(pt_level);
1190 if (xe_bo_is_vram(bo) || xe_bo_is_stolen_devmem(bo))
1191 pte |= XE_PPGTT_PTE_DM;
1196 static u64 xelp_pte_encode_vma(u64 pte, struct xe_vma *vma,
1197 u16 pat_index, u32 pt_level)
1199 struct xe_device *xe = xe_vma_vm(vma)->xe;
1201 pte |= XE_PAGE_PRESENT;
1203 if (likely(!xe_vma_read_only(vma)))
1206 pte |= pte_encode_pat_index(xe, pat_index, pt_level);
1207 pte |= pte_encode_ps(pt_level);
1209 if (unlikely(xe_vma_is_null(vma)))
1215 static u64 xelp_pte_encode_addr(struct xe_device *xe, u64 addr,
1217 u32 pt_level, bool devmem, u64 flags)
1221 /* Avoid passing random bits directly as flags */
1222 xe_assert(xe, !(flags & ~XE_PTE_PS64));
1225 pte |= XE_PAGE_PRESENT | XE_PAGE_RW;
1226 pte |= pte_encode_pat_index(xe, pat_index, pt_level);
1227 pte |= pte_encode_ps(pt_level);
1230 pte |= XE_PPGTT_PTE_DM;
1237 static const struct xe_pt_ops xelp_pt_ops = {
1238 .pte_encode_bo = xelp_pte_encode_bo,
1239 .pte_encode_vma = xelp_pte_encode_vma,
1240 .pte_encode_addr = xelp_pte_encode_addr,
1241 .pde_encode_bo = xelp_pde_encode_bo,
1244 static void vm_destroy_work_func(struct work_struct *w);
1247 * xe_vm_create_scratch() - Setup a scratch memory pagetable tree for the
1248 * given tile and vm.
1250 * @tile: tile to set up for.
1251 * @vm: vm to set up for.
1253 * Sets up a pagetable tree with one page-table per level and a single
1254 * leaf PTE. All pagetable entries point to the single page-table or,
1255 * for MAX_HUGEPTE_LEVEL, a NULL huge PTE returning 0 on read and
1256 * writes become NOPs.
1258 * Return: 0 on success, negative error code on error.
1260 static int xe_vm_create_scratch(struct xe_device *xe, struct xe_tile *tile,
1266 for (i = MAX_HUGEPTE_LEVEL; i < vm->pt_root[id]->level; i++) {
1267 vm->scratch_pt[id][i] = xe_pt_create(vm, tile, i);
1268 if (IS_ERR(vm->scratch_pt[id][i]))
1269 return PTR_ERR(vm->scratch_pt[id][i]);
1271 xe_pt_populate_empty(tile, vm, vm->scratch_pt[id][i]);
1277 static void xe_vm_free_scratch(struct xe_vm *vm)
1279 struct xe_tile *tile;
1282 if (!xe_vm_has_scratch(vm))
1285 for_each_tile(tile, vm->xe, id) {
1288 if (!vm->pt_root[id])
1291 for (i = MAX_HUGEPTE_LEVEL; i < vm->pt_root[id]->level; ++i)
1292 if (vm->scratch_pt[id][i])
1293 xe_pt_destroy(vm->scratch_pt[id][i], vm->flags, NULL);
1297 struct xe_vm *xe_vm_create(struct xe_device *xe, u32 flags)
1299 struct drm_gem_object *vm_resv_obj;
1301 int err, number_tiles = 0;
1302 struct xe_tile *tile;
1305 vm = kzalloc(sizeof(*vm), GFP_KERNEL);
1307 return ERR_PTR(-ENOMEM);
1311 vm->size = 1ull << xe->info.va_bits;
1315 init_rwsem(&vm->lock);
1316 mutex_init(&vm->snap_mutex);
1318 INIT_LIST_HEAD(&vm->rebind_list);
1320 INIT_LIST_HEAD(&vm->userptr.repin_list);
1321 INIT_LIST_HEAD(&vm->userptr.invalidated);
1322 init_rwsem(&vm->userptr.notifier_lock);
1323 spin_lock_init(&vm->userptr.invalidated_lock);
1325 INIT_WORK(&vm->destroy_work, vm_destroy_work_func);
1327 INIT_LIST_HEAD(&vm->preempt.exec_queues);
1328 vm->preempt.min_run_period_ms = 10; /* FIXME: Wire up to uAPI */
1330 for_each_tile(tile, xe, id)
1331 xe_range_fence_tree_init(&vm->rftree[id]);
1333 vm->pt_ops = &xelp_pt_ops;
1335 if (!(flags & XE_VM_FLAG_MIGRATION))
1336 xe_device_mem_access_get(xe);
1338 vm_resv_obj = drm_gpuvm_resv_object_alloc(&xe->drm);
1344 drm_gpuvm_init(&vm->gpuvm, "Xe VM", DRM_GPUVM_RESV_PROTECTED, &xe->drm,
1345 vm_resv_obj, 0, vm->size, 0, 0, &gpuvm_ops);
1347 drm_gem_object_put(vm_resv_obj);
1349 err = dma_resv_lock_interruptible(xe_vm_resv(vm), NULL);
1353 if (IS_DGFX(xe) && xe->info.vram_flags & XE_VRAM_FLAGS_NEED64K)
1354 vm->flags |= XE_VM_FLAG_64K;
1356 for_each_tile(tile, xe, id) {
1357 if (flags & XE_VM_FLAG_MIGRATION &&
1358 tile->id != XE_VM_FLAG_TILE_ID(flags))
1361 vm->pt_root[id] = xe_pt_create(vm, tile, xe->info.vm_max_level);
1362 if (IS_ERR(vm->pt_root[id])) {
1363 err = PTR_ERR(vm->pt_root[id]);
1364 vm->pt_root[id] = NULL;
1365 goto err_unlock_close;
1369 if (xe_vm_has_scratch(vm)) {
1370 for_each_tile(tile, xe, id) {
1371 if (!vm->pt_root[id])
1374 err = xe_vm_create_scratch(xe, tile, vm);
1376 goto err_unlock_close;
1378 vm->batch_invalidate_tlb = true;
1381 if (flags & XE_VM_FLAG_LR_MODE) {
1382 INIT_WORK(&vm->preempt.rebind_work, preempt_rebind_work_func);
1383 vm->flags |= XE_VM_FLAG_LR_MODE;
1384 vm->batch_invalidate_tlb = false;
1387 /* Fill pt_root after allocating scratch tables */
1388 for_each_tile(tile, xe, id) {
1389 if (!vm->pt_root[id])
1392 xe_pt_populate_empty(tile, vm, vm->pt_root[id]);
1394 dma_resv_unlock(xe_vm_resv(vm));
1396 /* Kernel migration VM shouldn't have a circular loop.. */
1397 if (!(flags & XE_VM_FLAG_MIGRATION)) {
1398 for_each_tile(tile, xe, id) {
1399 struct xe_gt *gt = tile->primary_gt;
1400 struct xe_vm *migrate_vm;
1401 struct xe_exec_queue *q;
1402 u32 create_flags = EXEC_QUEUE_FLAG_VM;
1404 if (!vm->pt_root[id])
1407 migrate_vm = xe_migrate_get_vm(tile->migrate);
1408 q = xe_exec_queue_create_class(xe, gt, migrate_vm,
1409 XE_ENGINE_CLASS_COPY,
1411 xe_vm_put(migrate_vm);
1421 if (number_tiles > 1)
1422 vm->composite_fence_ctx = dma_fence_context_alloc(1);
1424 mutex_lock(&xe->usm.lock);
1425 if (flags & XE_VM_FLAG_FAULT_MODE)
1426 xe->usm.num_vm_in_fault_mode++;
1427 else if (!(flags & XE_VM_FLAG_MIGRATION))
1428 xe->usm.num_vm_in_non_fault_mode++;
1429 mutex_unlock(&xe->usm.lock);
1431 trace_xe_vm_create(vm);
1436 dma_resv_unlock(xe_vm_resv(vm));
1438 xe_vm_close_and_put(vm);
1439 return ERR_PTR(err);
1442 mutex_destroy(&vm->snap_mutex);
1443 for_each_tile(tile, xe, id)
1444 xe_range_fence_tree_fini(&vm->rftree[id]);
1446 if (!(flags & XE_VM_FLAG_MIGRATION))
1447 xe_device_mem_access_put(xe);
1448 return ERR_PTR(err);
1451 static void xe_vm_close(struct xe_vm *vm)
1453 down_write(&vm->lock);
1455 up_write(&vm->lock);
1458 void xe_vm_close_and_put(struct xe_vm *vm)
1460 LIST_HEAD(contested);
1461 struct xe_device *xe = vm->xe;
1462 struct xe_tile *tile;
1463 struct xe_vma *vma, *next_vma;
1464 struct drm_gpuva *gpuva, *next;
1467 xe_assert(xe, !vm->preempt.num_exec_queues);
1470 if (xe_vm_in_preempt_fence_mode(vm))
1471 flush_work(&vm->preempt.rebind_work);
1473 down_write(&vm->lock);
1474 for_each_tile(tile, xe, id) {
1476 xe_exec_queue_last_fence_put(vm->q[id], vm);
1478 up_write(&vm->lock);
1480 for_each_tile(tile, xe, id) {
1482 xe_exec_queue_kill(vm->q[id]);
1483 xe_exec_queue_put(vm->q[id]);
1488 down_write(&vm->lock);
1489 xe_vm_lock(vm, false);
1490 drm_gpuvm_for_each_va_safe(gpuva, next, &vm->gpuvm) {
1491 vma = gpuva_to_vma(gpuva);
1493 if (xe_vma_has_no_bo(vma)) {
1494 down_read(&vm->userptr.notifier_lock);
1495 vma->gpuva.flags |= XE_VMA_DESTROYED;
1496 up_read(&vm->userptr.notifier_lock);
1499 xe_vm_remove_vma(vm, vma);
1501 /* easy case, remove from VMA? */
1502 if (xe_vma_has_no_bo(vma) || xe_vma_bo(vma)->vm) {
1503 list_del_init(&vma->combined_links.rebind);
1504 xe_vma_destroy(vma, NULL);
1508 list_move_tail(&vma->combined_links.destroy, &contested);
1509 vma->gpuva.flags |= XE_VMA_DESTROYED;
1513 * All vm operations will add shared fences to resv.
1514 * The only exception is eviction for a shared object,
1515 * but even so, the unbind when evicted would still
1516 * install a fence to resv. Hence it's safe to
1517 * destroy the pagetables immediately.
1519 xe_vm_free_scratch(vm);
1521 for_each_tile(tile, xe, id) {
1522 if (vm->pt_root[id]) {
1523 xe_pt_destroy(vm->pt_root[id], vm->flags, NULL);
1524 vm->pt_root[id] = NULL;
1530 * VM is now dead, cannot re-add nodes to vm->vmas if it's NULL
1531 * Since we hold a refcount to the bo, we can remove and free
1532 * the members safely without locking.
1534 list_for_each_entry_safe(vma, next_vma, &contested,
1535 combined_links.destroy) {
1536 list_del_init(&vma->combined_links.destroy);
1537 xe_vma_destroy_unlocked(vma);
1540 up_write(&vm->lock);
1542 mutex_lock(&xe->usm.lock);
1543 if (vm->flags & XE_VM_FLAG_FAULT_MODE)
1544 xe->usm.num_vm_in_fault_mode--;
1545 else if (!(vm->flags & XE_VM_FLAG_MIGRATION))
1546 xe->usm.num_vm_in_non_fault_mode--;
1547 mutex_unlock(&xe->usm.lock);
1549 for_each_tile(tile, xe, id)
1550 xe_range_fence_tree_fini(&vm->rftree[id]);
1555 static void vm_destroy_work_func(struct work_struct *w)
1558 container_of(w, struct xe_vm, destroy_work);
1559 struct xe_device *xe = vm->xe;
1560 struct xe_tile *tile;
1564 /* xe_vm_close_and_put was not called? */
1565 xe_assert(xe, !vm->size);
1567 mutex_destroy(&vm->snap_mutex);
1569 if (!(vm->flags & XE_VM_FLAG_MIGRATION)) {
1570 xe_device_mem_access_put(xe);
1572 if (xe->info.has_asid && vm->usm.asid) {
1573 mutex_lock(&xe->usm.lock);
1574 lookup = xa_erase(&xe->usm.asid_to_vm, vm->usm.asid);
1575 xe_assert(xe, lookup == vm);
1576 mutex_unlock(&xe->usm.lock);
1580 for_each_tile(tile, xe, id)
1581 XE_WARN_ON(vm->pt_root[id]);
1583 trace_xe_vm_free(vm);
1587 static void xe_vm_free(struct drm_gpuvm *gpuvm)
1589 struct xe_vm *vm = container_of(gpuvm, struct xe_vm, gpuvm);
1591 /* To destroy the VM we need to be able to sleep */
1592 queue_work(system_unbound_wq, &vm->destroy_work);
1595 struct xe_vm *xe_vm_lookup(struct xe_file *xef, u32 id)
1599 mutex_lock(&xef->vm.lock);
1600 vm = xa_load(&xef->vm.xa, id);
1603 mutex_unlock(&xef->vm.lock);
1608 u64 xe_vm_pdp4_descriptor(struct xe_vm *vm, struct xe_tile *tile)
1610 return vm->pt_ops->pde_encode_bo(vm->pt_root[tile->id]->bo, 0,
1611 tile_to_xe(tile)->pat.idx[XE_CACHE_WB]);
1614 static struct xe_exec_queue *
1615 to_wait_exec_queue(struct xe_vm *vm, struct xe_exec_queue *q)
1617 return q ? q : vm->q[0];
1620 static struct dma_fence *
1621 xe_vm_unbind_vma(struct xe_vma *vma, struct xe_exec_queue *q,
1622 struct xe_sync_entry *syncs, u32 num_syncs,
1623 bool first_op, bool last_op)
1625 struct xe_vm *vm = xe_vma_vm(vma);
1626 struct xe_exec_queue *wait_exec_queue = to_wait_exec_queue(vm, q);
1627 struct xe_tile *tile;
1628 struct dma_fence *fence = NULL;
1629 struct dma_fence **fences = NULL;
1630 struct dma_fence_array *cf = NULL;
1631 int cur_fence = 0, i;
1632 int number_tiles = hweight8(vma->tile_present);
1636 trace_xe_vma_unbind(vma);
1639 struct xe_user_fence * const f = vma->ufence;
1641 if (!xe_sync_ufence_get_status(f))
1642 return ERR_PTR(-EBUSY);
1645 xe_sync_ufence_put(f);
1648 if (number_tiles > 1) {
1649 fences = kmalloc_array(number_tiles, sizeof(*fences),
1652 return ERR_PTR(-ENOMEM);
1655 for_each_tile(tile, vm->xe, id) {
1656 if (!(vma->tile_present & BIT(id)))
1659 fence = __xe_pt_unbind_vma(tile, vma, q ? q : vm->q[id],
1660 first_op ? syncs : NULL,
1661 first_op ? num_syncs : 0);
1662 if (IS_ERR(fence)) {
1663 err = PTR_ERR(fence);
1668 fences[cur_fence++] = fence;
1671 if (q && vm->pt_root[id] && !list_empty(&q->multi_gt_list))
1672 q = list_next_entry(q, multi_gt_list);
1676 cf = dma_fence_array_create(number_tiles, fences,
1677 vm->composite_fence_ctx,
1678 vm->composite_fence_seqno++,
1681 --vm->composite_fence_seqno;
1687 fence = cf ? &cf->base : !fence ?
1688 xe_exec_queue_last_fence_get(wait_exec_queue, vm) : fence;
1690 for (i = 0; i < num_syncs; i++)
1691 xe_sync_entry_signal(&syncs[i], NULL, fence);
1699 dma_fence_put(fences[--cur_fence]);
1703 return ERR_PTR(err);
1706 static struct dma_fence *
1707 xe_vm_bind_vma(struct xe_vma *vma, struct xe_exec_queue *q,
1708 struct xe_sync_entry *syncs, u32 num_syncs,
1709 bool first_op, bool last_op)
1711 struct xe_tile *tile;
1712 struct dma_fence *fence;
1713 struct dma_fence **fences = NULL;
1714 struct dma_fence_array *cf = NULL;
1715 struct xe_vm *vm = xe_vma_vm(vma);
1716 int cur_fence = 0, i;
1717 int number_tiles = hweight8(vma->tile_mask);
1721 trace_xe_vma_bind(vma);
1723 if (number_tiles > 1) {
1724 fences = kmalloc_array(number_tiles, sizeof(*fences),
1727 return ERR_PTR(-ENOMEM);
1730 for_each_tile(tile, vm->xe, id) {
1731 if (!(vma->tile_mask & BIT(id)))
1734 fence = __xe_pt_bind_vma(tile, vma, q ? q : vm->q[id],
1735 first_op ? syncs : NULL,
1736 first_op ? num_syncs : 0,
1737 vma->tile_present & BIT(id));
1738 if (IS_ERR(fence)) {
1739 err = PTR_ERR(fence);
1744 fences[cur_fence++] = fence;
1747 if (q && vm->pt_root[id] && !list_empty(&q->multi_gt_list))
1748 q = list_next_entry(q, multi_gt_list);
1752 cf = dma_fence_array_create(number_tiles, fences,
1753 vm->composite_fence_ctx,
1754 vm->composite_fence_seqno++,
1757 --vm->composite_fence_seqno;
1764 for (i = 0; i < num_syncs; i++)
1765 xe_sync_entry_signal(&syncs[i], NULL,
1766 cf ? &cf->base : fence);
1769 return cf ? &cf->base : fence;
1774 dma_fence_put(fences[--cur_fence]);
1778 return ERR_PTR(err);
1781 static struct xe_user_fence *
1782 find_ufence_get(struct xe_sync_entry *syncs, u32 num_syncs)
1786 for (i = 0; i < num_syncs; i++) {
1787 struct xe_sync_entry *e = &syncs[i];
1789 if (xe_sync_is_ufence(e))
1790 return xe_sync_ufence_get(e);
1796 static int __xe_vm_bind(struct xe_vm *vm, struct xe_vma *vma,
1797 struct xe_exec_queue *q, struct xe_sync_entry *syncs,
1798 u32 num_syncs, bool immediate, bool first_op,
1801 struct dma_fence *fence;
1802 struct xe_exec_queue *wait_exec_queue = to_wait_exec_queue(vm, q);
1803 struct xe_user_fence *ufence;
1805 xe_vm_assert_held(vm);
1807 ufence = find_ufence_get(syncs, num_syncs);
1808 if (vma->ufence && ufence)
1809 xe_sync_ufence_put(vma->ufence);
1811 vma->ufence = ufence ?: vma->ufence;
1814 fence = xe_vm_bind_vma(vma, q, syncs, num_syncs, first_op,
1817 return PTR_ERR(fence);
1821 xe_assert(vm->xe, xe_vm_in_fault_mode(vm));
1823 fence = xe_exec_queue_last_fence_get(wait_exec_queue, vm);
1825 for (i = 0; i < num_syncs; i++)
1826 xe_sync_entry_signal(&syncs[i], NULL, fence);
1831 xe_exec_queue_last_fence_set(wait_exec_queue, vm, fence);
1832 dma_fence_put(fence);
1837 static int xe_vm_bind(struct xe_vm *vm, struct xe_vma *vma, struct xe_exec_queue *q,
1838 struct xe_bo *bo, struct xe_sync_entry *syncs,
1839 u32 num_syncs, bool immediate, bool first_op,
1844 xe_vm_assert_held(vm);
1845 xe_bo_assert_held(bo);
1847 if (bo && immediate) {
1848 err = xe_bo_validate(bo, vm, true);
1853 return __xe_vm_bind(vm, vma, q, syncs, num_syncs, immediate, first_op,
1857 static int xe_vm_unbind(struct xe_vm *vm, struct xe_vma *vma,
1858 struct xe_exec_queue *q, struct xe_sync_entry *syncs,
1859 u32 num_syncs, bool first_op, bool last_op)
1861 struct dma_fence *fence;
1862 struct xe_exec_queue *wait_exec_queue = to_wait_exec_queue(vm, q);
1864 xe_vm_assert_held(vm);
1865 xe_bo_assert_held(xe_vma_bo(vma));
1867 fence = xe_vm_unbind_vma(vma, q, syncs, num_syncs, first_op, last_op);
1869 return PTR_ERR(fence);
1871 xe_vma_destroy(vma, fence);
1873 xe_exec_queue_last_fence_set(wait_exec_queue, vm, fence);
1874 dma_fence_put(fence);
1879 #define ALL_DRM_XE_VM_CREATE_FLAGS (DRM_XE_VM_CREATE_FLAG_SCRATCH_PAGE | \
1880 DRM_XE_VM_CREATE_FLAG_LR_MODE | \
1881 DRM_XE_VM_CREATE_FLAG_FAULT_MODE)
1883 int xe_vm_create_ioctl(struct drm_device *dev, void *data,
1884 struct drm_file *file)
1886 struct xe_device *xe = to_xe_device(dev);
1887 struct xe_file *xef = to_xe_file(file);
1888 struct drm_xe_vm_create *args = data;
1889 struct xe_tile *tile;
1895 if (XE_IOCTL_DBG(xe, args->extensions))
1898 if (XE_WA(xe_root_mmio_gt(xe), 14016763929))
1899 args->flags |= DRM_XE_VM_CREATE_FLAG_SCRATCH_PAGE;
1901 if (XE_IOCTL_DBG(xe, args->flags & DRM_XE_VM_CREATE_FLAG_FAULT_MODE &&
1905 if (XE_IOCTL_DBG(xe, args->reserved[0] || args->reserved[1]))
1908 if (XE_IOCTL_DBG(xe, args->flags & ~ALL_DRM_XE_VM_CREATE_FLAGS))
1911 if (XE_IOCTL_DBG(xe, args->flags & DRM_XE_VM_CREATE_FLAG_SCRATCH_PAGE &&
1912 args->flags & DRM_XE_VM_CREATE_FLAG_FAULT_MODE))
1915 if (XE_IOCTL_DBG(xe, !(args->flags & DRM_XE_VM_CREATE_FLAG_LR_MODE) &&
1916 args->flags & DRM_XE_VM_CREATE_FLAG_FAULT_MODE))
1919 if (XE_IOCTL_DBG(xe, args->flags & DRM_XE_VM_CREATE_FLAG_FAULT_MODE &&
1920 xe_device_in_non_fault_mode(xe)))
1923 if (XE_IOCTL_DBG(xe, !(args->flags & DRM_XE_VM_CREATE_FLAG_FAULT_MODE) &&
1924 xe_device_in_fault_mode(xe)))
1927 if (XE_IOCTL_DBG(xe, args->extensions))
1930 if (args->flags & DRM_XE_VM_CREATE_FLAG_SCRATCH_PAGE)
1931 flags |= XE_VM_FLAG_SCRATCH_PAGE;
1932 if (args->flags & DRM_XE_VM_CREATE_FLAG_LR_MODE)
1933 flags |= XE_VM_FLAG_LR_MODE;
1934 if (args->flags & DRM_XE_VM_CREATE_FLAG_FAULT_MODE)
1935 flags |= XE_VM_FLAG_FAULT_MODE;
1937 vm = xe_vm_create(xe, flags);
1941 mutex_lock(&xef->vm.lock);
1942 err = xa_alloc(&xef->vm.xa, &id, vm, xa_limit_32b, GFP_KERNEL);
1943 mutex_unlock(&xef->vm.lock);
1945 goto err_close_and_put;
1947 if (xe->info.has_asid) {
1948 mutex_lock(&xe->usm.lock);
1949 err = xa_alloc_cyclic(&xe->usm.asid_to_vm, &asid, vm,
1950 XA_LIMIT(1, XE_MAX_ASID - 1),
1951 &xe->usm.next_asid, GFP_KERNEL);
1952 mutex_unlock(&xe->usm.lock);
1956 vm->usm.asid = asid;
1962 /* Record BO memory for VM pagetable created against client */
1963 for_each_tile(tile, xe, id)
1964 if (vm->pt_root[id])
1965 xe_drm_client_add_bo(vm->xef->client, vm->pt_root[id]->bo);
1967 #if IS_ENABLED(CONFIG_DRM_XE_DEBUG_MEM)
1968 /* Warning: Security issue - never enable by default */
1969 args->reserved[0] = xe_bo_main_addr(vm->pt_root[0]->bo, XE_PAGE_SIZE);
1975 mutex_lock(&xef->vm.lock);
1976 xa_erase(&xef->vm.xa, id);
1977 mutex_unlock(&xef->vm.lock);
1979 xe_vm_close_and_put(vm);
1984 int xe_vm_destroy_ioctl(struct drm_device *dev, void *data,
1985 struct drm_file *file)
1987 struct xe_device *xe = to_xe_device(dev);
1988 struct xe_file *xef = to_xe_file(file);
1989 struct drm_xe_vm_destroy *args = data;
1993 if (XE_IOCTL_DBG(xe, args->pad) ||
1994 XE_IOCTL_DBG(xe, args->reserved[0] || args->reserved[1]))
1997 mutex_lock(&xef->vm.lock);
1998 vm = xa_load(&xef->vm.xa, args->vm_id);
1999 if (XE_IOCTL_DBG(xe, !vm))
2001 else if (XE_IOCTL_DBG(xe, vm->preempt.num_exec_queues))
2004 xa_erase(&xef->vm.xa, args->vm_id);
2005 mutex_unlock(&xef->vm.lock);
2008 xe_vm_close_and_put(vm);
2013 static const u32 region_to_mem_type[] = {
2019 static int xe_vm_prefetch(struct xe_vm *vm, struct xe_vma *vma,
2020 struct xe_exec_queue *q, u32 region,
2021 struct xe_sync_entry *syncs, u32 num_syncs,
2022 bool first_op, bool last_op)
2024 struct xe_exec_queue *wait_exec_queue = to_wait_exec_queue(vm, q);
2027 xe_assert(vm->xe, region <= ARRAY_SIZE(region_to_mem_type));
2029 if (!xe_vma_has_no_bo(vma)) {
2030 err = xe_bo_migrate(xe_vma_bo(vma), region_to_mem_type[region]);
2035 if (vma->tile_mask != (vma->tile_present & ~vma->tile_invalidated)) {
2036 return xe_vm_bind(vm, vma, q, xe_vma_bo(vma), syncs, num_syncs,
2037 true, first_op, last_op);
2041 /* Nothing to do, signal fences now */
2043 for (i = 0; i < num_syncs; i++) {
2044 struct dma_fence *fence =
2045 xe_exec_queue_last_fence_get(wait_exec_queue, vm);
2047 xe_sync_entry_signal(&syncs[i], NULL, fence);
2048 dma_fence_put(fence);
2056 static void prep_vma_destroy(struct xe_vm *vm, struct xe_vma *vma,
2059 down_read(&vm->userptr.notifier_lock);
2060 vma->gpuva.flags |= XE_VMA_DESTROYED;
2061 up_read(&vm->userptr.notifier_lock);
2063 xe_vm_remove_vma(vm, vma);
2067 #define ULL unsigned long long
2069 #if IS_ENABLED(CONFIG_DRM_XE_DEBUG_VM)
2070 static void print_op(struct xe_device *xe, struct drm_gpuva_op *op)
2075 case DRM_GPUVA_OP_MAP:
2076 vm_dbg(&xe->drm, "MAP: addr=0x%016llx, range=0x%016llx",
2077 (ULL)op->map.va.addr, (ULL)op->map.va.range);
2079 case DRM_GPUVA_OP_REMAP:
2080 vma = gpuva_to_vma(op->remap.unmap->va);
2081 vm_dbg(&xe->drm, "REMAP:UNMAP: addr=0x%016llx, range=0x%016llx, keep=%d",
2082 (ULL)xe_vma_start(vma), (ULL)xe_vma_size(vma),
2083 op->remap.unmap->keep ? 1 : 0);
2086 "REMAP:PREV: addr=0x%016llx, range=0x%016llx",
2087 (ULL)op->remap.prev->va.addr,
2088 (ULL)op->remap.prev->va.range);
2091 "REMAP:NEXT: addr=0x%016llx, range=0x%016llx",
2092 (ULL)op->remap.next->va.addr,
2093 (ULL)op->remap.next->va.range);
2095 case DRM_GPUVA_OP_UNMAP:
2096 vma = gpuva_to_vma(op->unmap.va);
2097 vm_dbg(&xe->drm, "UNMAP: addr=0x%016llx, range=0x%016llx, keep=%d",
2098 (ULL)xe_vma_start(vma), (ULL)xe_vma_size(vma),
2099 op->unmap.keep ? 1 : 0);
2101 case DRM_GPUVA_OP_PREFETCH:
2102 vma = gpuva_to_vma(op->prefetch.va);
2103 vm_dbg(&xe->drm, "PREFETCH: addr=0x%016llx, range=0x%016llx",
2104 (ULL)xe_vma_start(vma), (ULL)xe_vma_size(vma));
2107 drm_warn(&xe->drm, "NOT POSSIBLE");
2111 static void print_op(struct xe_device *xe, struct drm_gpuva_op *op)
2117 * Create operations list from IOCTL arguments, setup operations fields so parse
2118 * and commit steps are decoupled from IOCTL arguments. This step can fail.
2120 static struct drm_gpuva_ops *
2121 vm_bind_ioctl_ops_create(struct xe_vm *vm, struct xe_bo *bo,
2122 u64 bo_offset_or_userptr, u64 addr, u64 range,
2123 u32 operation, u32 flags,
2124 u32 prefetch_region, u16 pat_index)
2126 struct drm_gem_object *obj = bo ? &bo->ttm.base : NULL;
2127 struct drm_gpuva_ops *ops;
2128 struct drm_gpuva_op *__op;
2129 struct drm_gpuvm_bo *vm_bo;
2132 lockdep_assert_held_write(&vm->lock);
2134 vm_dbg(&vm->xe->drm,
2135 "op=%d, addr=0x%016llx, range=0x%016llx, bo_offset_or_userptr=0x%016llx",
2136 operation, (ULL)addr, (ULL)range,
2137 (ULL)bo_offset_or_userptr);
2139 switch (operation) {
2140 case DRM_XE_VM_BIND_OP_MAP:
2141 case DRM_XE_VM_BIND_OP_MAP_USERPTR:
2142 ops = drm_gpuvm_sm_map_ops_create(&vm->gpuvm, addr, range,
2143 obj, bo_offset_or_userptr);
2145 case DRM_XE_VM_BIND_OP_UNMAP:
2146 ops = drm_gpuvm_sm_unmap_ops_create(&vm->gpuvm, addr, range);
2148 case DRM_XE_VM_BIND_OP_PREFETCH:
2149 ops = drm_gpuvm_prefetch_ops_create(&vm->gpuvm, addr, range);
2151 case DRM_XE_VM_BIND_OP_UNMAP_ALL:
2152 xe_assert(vm->xe, bo);
2154 err = xe_bo_lock(bo, true);
2156 return ERR_PTR(err);
2158 vm_bo = drm_gpuvm_bo_obtain(&vm->gpuvm, obj);
2159 if (IS_ERR(vm_bo)) {
2161 return ERR_CAST(vm_bo);
2164 ops = drm_gpuvm_bo_unmap_ops_create(vm_bo);
2165 drm_gpuvm_bo_put(vm_bo);
2169 drm_warn(&vm->xe->drm, "NOT POSSIBLE");
2170 ops = ERR_PTR(-EINVAL);
2175 drm_gpuva_for_each_op(__op, ops) {
2176 struct xe_vma_op *op = gpuva_op_to_vma_op(__op);
2178 if (__op->op == DRM_GPUVA_OP_MAP) {
2179 op->map.is_null = flags & DRM_XE_VM_BIND_FLAG_NULL;
2180 op->map.dumpable = flags & DRM_XE_VM_BIND_FLAG_DUMPABLE;
2181 op->map.pat_index = pat_index;
2182 } else if (__op->op == DRM_GPUVA_OP_PREFETCH) {
2183 op->prefetch.region = prefetch_region;
2186 print_op(vm->xe, __op);
2192 static struct xe_vma *new_vma(struct xe_vm *vm, struct drm_gpuva_op_map *op,
2193 u16 pat_index, unsigned int flags)
2195 struct xe_bo *bo = op->gem.obj ? gem_to_xe_bo(op->gem.obj) : NULL;
2196 struct drm_exec exec;
2200 lockdep_assert_held_write(&vm->lock);
2203 drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT, 0);
2204 drm_exec_until_all_locked(&exec) {
2207 err = drm_exec_lock_obj(&exec, xe_vm_obj(vm));
2208 drm_exec_retry_on_contention(&exec);
2211 err = drm_exec_lock_obj(&exec, &bo->ttm.base);
2212 drm_exec_retry_on_contention(&exec);
2215 drm_exec_fini(&exec);
2216 return ERR_PTR(err);
2220 vma = xe_vma_create(vm, bo, op->gem.offset,
2221 op->va.addr, op->va.addr +
2222 op->va.range - 1, pat_index, flags);
2224 drm_exec_fini(&exec);
2226 if (xe_vma_is_userptr(vma)) {
2227 err = xe_vma_userptr_pin_pages(to_userptr_vma(vma));
2229 prep_vma_destroy(vm, vma, false);
2230 xe_vma_destroy_unlocked(vma);
2231 return ERR_PTR(err);
2233 } else if (!xe_vma_has_no_bo(vma) && !bo->vm) {
2234 err = add_preempt_fences(vm, bo);
2236 prep_vma_destroy(vm, vma, false);
2237 xe_vma_destroy_unlocked(vma);
2238 return ERR_PTR(err);
2245 static u64 xe_vma_max_pte_size(struct xe_vma *vma)
2247 if (vma->gpuva.flags & XE_VMA_PTE_1G)
2249 else if (vma->gpuva.flags & (XE_VMA_PTE_2M | XE_VMA_PTE_COMPACT))
2251 else if (vma->gpuva.flags & XE_VMA_PTE_64K)
2253 else if (vma->gpuva.flags & XE_VMA_PTE_4K)
2256 return SZ_1G; /* Uninitialized, used max size */
2259 static void xe_vma_set_pte_size(struct xe_vma *vma, u64 size)
2263 vma->gpuva.flags |= XE_VMA_PTE_1G;
2266 vma->gpuva.flags |= XE_VMA_PTE_2M;
2269 vma->gpuva.flags |= XE_VMA_PTE_64K;
2272 vma->gpuva.flags |= XE_VMA_PTE_4K;
2277 static int xe_vma_op_commit(struct xe_vm *vm, struct xe_vma_op *op)
2281 lockdep_assert_held_write(&vm->lock);
2283 switch (op->base.op) {
2284 case DRM_GPUVA_OP_MAP:
2285 err |= xe_vm_insert_vma(vm, op->map.vma);
2287 op->flags |= XE_VMA_OP_COMMITTED;
2289 case DRM_GPUVA_OP_REMAP:
2292 gpuva_to_vma(op->base.remap.unmap->va)->tile_present;
2294 prep_vma_destroy(vm, gpuva_to_vma(op->base.remap.unmap->va),
2296 op->flags |= XE_VMA_OP_COMMITTED;
2298 if (op->remap.prev) {
2299 err |= xe_vm_insert_vma(vm, op->remap.prev);
2301 op->flags |= XE_VMA_OP_PREV_COMMITTED;
2302 if (!err && op->remap.skip_prev) {
2303 op->remap.prev->tile_present =
2305 op->remap.prev = NULL;
2308 if (op->remap.next) {
2309 err |= xe_vm_insert_vma(vm, op->remap.next);
2311 op->flags |= XE_VMA_OP_NEXT_COMMITTED;
2312 if (!err && op->remap.skip_next) {
2313 op->remap.next->tile_present =
2315 op->remap.next = NULL;
2319 /* Adjust for partial unbind after removin VMA from VM */
2321 op->base.remap.unmap->va->va.addr = op->remap.start;
2322 op->base.remap.unmap->va->va.range = op->remap.range;
2326 case DRM_GPUVA_OP_UNMAP:
2327 prep_vma_destroy(vm, gpuva_to_vma(op->base.unmap.va), true);
2328 op->flags |= XE_VMA_OP_COMMITTED;
2330 case DRM_GPUVA_OP_PREFETCH:
2331 op->flags |= XE_VMA_OP_COMMITTED;
2334 drm_warn(&vm->xe->drm, "NOT POSSIBLE");
2341 static int vm_bind_ioctl_ops_parse(struct xe_vm *vm, struct xe_exec_queue *q,
2342 struct drm_gpuva_ops *ops,
2343 struct xe_sync_entry *syncs, u32 num_syncs,
2344 struct list_head *ops_list, bool last)
2346 struct xe_device *xe = vm->xe;
2347 struct xe_vma_op *last_op = NULL;
2348 struct drm_gpuva_op *__op;
2351 lockdep_assert_held_write(&vm->lock);
2353 drm_gpuva_for_each_op(__op, ops) {
2354 struct xe_vma_op *op = gpuva_op_to_vma_op(__op);
2356 bool first = list_empty(ops_list);
2357 unsigned int flags = 0;
2359 INIT_LIST_HEAD(&op->link);
2360 list_add_tail(&op->link, ops_list);
2363 op->flags |= XE_VMA_OP_FIRST;
2364 op->num_syncs = num_syncs;
2370 switch (op->base.op) {
2371 case DRM_GPUVA_OP_MAP:
2373 flags |= op->map.is_null ?
2374 VMA_CREATE_FLAG_IS_NULL : 0;
2375 flags |= op->map.dumpable ?
2376 VMA_CREATE_FLAG_DUMPABLE : 0;
2378 vma = new_vma(vm, &op->base.map, op->map.pat_index,
2381 return PTR_ERR(vma);
2386 case DRM_GPUVA_OP_REMAP:
2388 struct xe_vma *old =
2389 gpuva_to_vma(op->base.remap.unmap->va);
2391 op->remap.start = xe_vma_start(old);
2392 op->remap.range = xe_vma_size(old);
2394 if (op->base.remap.prev) {
2395 flags |= op->base.remap.unmap->va->flags &
2397 VMA_CREATE_FLAG_READ_ONLY : 0;
2398 flags |= op->base.remap.unmap->va->flags &
2400 VMA_CREATE_FLAG_IS_NULL : 0;
2401 flags |= op->base.remap.unmap->va->flags &
2403 VMA_CREATE_FLAG_DUMPABLE : 0;
2405 vma = new_vma(vm, op->base.remap.prev,
2406 old->pat_index, flags);
2408 return PTR_ERR(vma);
2410 op->remap.prev = vma;
2413 * Userptr creates a new SG mapping so
2414 * we must also rebind.
2416 op->remap.skip_prev = !xe_vma_is_userptr(old) &&
2417 IS_ALIGNED(xe_vma_end(vma),
2418 xe_vma_max_pte_size(old));
2419 if (op->remap.skip_prev) {
2420 xe_vma_set_pte_size(vma, xe_vma_max_pte_size(old));
2424 op->remap.start = xe_vma_end(vma);
2425 vm_dbg(&xe->drm, "REMAP:SKIP_PREV: addr=0x%016llx, range=0x%016llx",
2426 (ULL)op->remap.start,
2427 (ULL)op->remap.range);
2431 if (op->base.remap.next) {
2432 flags |= op->base.remap.unmap->va->flags &
2434 VMA_CREATE_FLAG_READ_ONLY : 0;
2435 flags |= op->base.remap.unmap->va->flags &
2437 VMA_CREATE_FLAG_IS_NULL : 0;
2438 flags |= op->base.remap.unmap->va->flags &
2440 VMA_CREATE_FLAG_DUMPABLE : 0;
2442 vma = new_vma(vm, op->base.remap.next,
2443 old->pat_index, flags);
2445 return PTR_ERR(vma);
2447 op->remap.next = vma;
2450 * Userptr creates a new SG mapping so
2451 * we must also rebind.
2453 op->remap.skip_next = !xe_vma_is_userptr(old) &&
2454 IS_ALIGNED(xe_vma_start(vma),
2455 xe_vma_max_pte_size(old));
2456 if (op->remap.skip_next) {
2457 xe_vma_set_pte_size(vma, xe_vma_max_pte_size(old));
2461 vm_dbg(&xe->drm, "REMAP:SKIP_NEXT: addr=0x%016llx, range=0x%016llx",
2462 (ULL)op->remap.start,
2463 (ULL)op->remap.range);
2468 case DRM_GPUVA_OP_UNMAP:
2469 case DRM_GPUVA_OP_PREFETCH:
2473 drm_warn(&vm->xe->drm, "NOT POSSIBLE");
2478 err = xe_vma_op_commit(vm, op);
2483 /* FIXME: Unhandled corner case */
2484 XE_WARN_ON(!last_op && last && !list_empty(ops_list));
2491 last_op->flags |= XE_VMA_OP_LAST;
2492 last_op->num_syncs = num_syncs;
2493 last_op->syncs = syncs;
2499 static int op_execute(struct drm_exec *exec, struct xe_vm *vm,
2500 struct xe_vma *vma, struct xe_vma_op *op)
2504 lockdep_assert_held_write(&vm->lock);
2506 err = xe_vm_prepare_vma(exec, vma, 1);
2510 xe_vm_assert_held(vm);
2511 xe_bo_assert_held(xe_vma_bo(vma));
2513 switch (op->base.op) {
2514 case DRM_GPUVA_OP_MAP:
2515 err = xe_vm_bind(vm, vma, op->q, xe_vma_bo(vma),
2516 op->syncs, op->num_syncs,
2517 !xe_vm_in_fault_mode(vm),
2518 op->flags & XE_VMA_OP_FIRST,
2519 op->flags & XE_VMA_OP_LAST);
2521 case DRM_GPUVA_OP_REMAP:
2523 bool prev = !!op->remap.prev;
2524 bool next = !!op->remap.next;
2526 if (!op->remap.unmap_done) {
2528 vma->gpuva.flags |= XE_VMA_FIRST_REBIND;
2529 err = xe_vm_unbind(vm, vma, op->q, op->syncs,
2531 op->flags & XE_VMA_OP_FIRST,
2532 op->flags & XE_VMA_OP_LAST &&
2536 op->remap.unmap_done = true;
2540 op->remap.prev->gpuva.flags |= XE_VMA_LAST_REBIND;
2541 err = xe_vm_bind(vm, op->remap.prev, op->q,
2542 xe_vma_bo(op->remap.prev), op->syncs,
2543 op->num_syncs, true, false,
2544 op->flags & XE_VMA_OP_LAST && !next);
2545 op->remap.prev->gpuva.flags &= ~XE_VMA_LAST_REBIND;
2548 op->remap.prev = NULL;
2552 op->remap.next->gpuva.flags |= XE_VMA_LAST_REBIND;
2553 err = xe_vm_bind(vm, op->remap.next, op->q,
2554 xe_vma_bo(op->remap.next),
2555 op->syncs, op->num_syncs,
2557 op->flags & XE_VMA_OP_LAST);
2558 op->remap.next->gpuva.flags &= ~XE_VMA_LAST_REBIND;
2561 op->remap.next = NULL;
2566 case DRM_GPUVA_OP_UNMAP:
2567 err = xe_vm_unbind(vm, vma, op->q, op->syncs,
2568 op->num_syncs, op->flags & XE_VMA_OP_FIRST,
2569 op->flags & XE_VMA_OP_LAST);
2571 case DRM_GPUVA_OP_PREFETCH:
2572 err = xe_vm_prefetch(vm, vma, op->q, op->prefetch.region,
2573 op->syncs, op->num_syncs,
2574 op->flags & XE_VMA_OP_FIRST,
2575 op->flags & XE_VMA_OP_LAST);
2578 drm_warn(&vm->xe->drm, "NOT POSSIBLE");
2582 trace_xe_vma_fail(vma);
2587 static int __xe_vma_op_execute(struct xe_vm *vm, struct xe_vma *vma,
2588 struct xe_vma_op *op)
2590 struct drm_exec exec;
2594 drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT, 0);
2595 drm_exec_until_all_locked(&exec) {
2596 err = op_execute(&exec, vm, vma, op);
2597 drm_exec_retry_on_contention(&exec);
2601 drm_exec_fini(&exec);
2603 if (err == -EAGAIN) {
2604 lockdep_assert_held_write(&vm->lock);
2606 if (op->base.op == DRM_GPUVA_OP_REMAP) {
2607 if (!op->remap.unmap_done)
2608 vma = gpuva_to_vma(op->base.remap.unmap->va);
2609 else if (op->remap.prev)
2610 vma = op->remap.prev;
2612 vma = op->remap.next;
2615 if (xe_vma_is_userptr(vma)) {
2616 err = xe_vma_userptr_pin_pages(to_userptr_vma(vma));
2620 trace_xe_vma_fail(vma);
2627 static int xe_vma_op_execute(struct xe_vm *vm, struct xe_vma_op *op)
2631 lockdep_assert_held_write(&vm->lock);
2633 switch (op->base.op) {
2634 case DRM_GPUVA_OP_MAP:
2635 ret = __xe_vma_op_execute(vm, op->map.vma, op);
2637 case DRM_GPUVA_OP_REMAP:
2641 if (!op->remap.unmap_done)
2642 vma = gpuva_to_vma(op->base.remap.unmap->va);
2643 else if (op->remap.prev)
2644 vma = op->remap.prev;
2646 vma = op->remap.next;
2648 ret = __xe_vma_op_execute(vm, vma, op);
2651 case DRM_GPUVA_OP_UNMAP:
2652 ret = __xe_vma_op_execute(vm, gpuva_to_vma(op->base.unmap.va),
2655 case DRM_GPUVA_OP_PREFETCH:
2656 ret = __xe_vma_op_execute(vm,
2657 gpuva_to_vma(op->base.prefetch.va),
2661 drm_warn(&vm->xe->drm, "NOT POSSIBLE");
2667 static void xe_vma_op_cleanup(struct xe_vm *vm, struct xe_vma_op *op)
2669 bool last = op->flags & XE_VMA_OP_LAST;
2672 while (op->num_syncs--)
2673 xe_sync_entry_cleanup(&op->syncs[op->num_syncs]);
2676 xe_exec_queue_put(op->q);
2678 if (!list_empty(&op->link))
2679 list_del(&op->link);
2681 drm_gpuva_ops_free(&vm->gpuvm, op->ops);
2686 static void xe_vma_op_unwind(struct xe_vm *vm, struct xe_vma_op *op,
2687 bool post_commit, bool prev_post_commit,
2688 bool next_post_commit)
2690 lockdep_assert_held_write(&vm->lock);
2692 switch (op->base.op) {
2693 case DRM_GPUVA_OP_MAP:
2695 prep_vma_destroy(vm, op->map.vma, post_commit);
2696 xe_vma_destroy_unlocked(op->map.vma);
2699 case DRM_GPUVA_OP_UNMAP:
2701 struct xe_vma *vma = gpuva_to_vma(op->base.unmap.va);
2704 down_read(&vm->userptr.notifier_lock);
2705 vma->gpuva.flags &= ~XE_VMA_DESTROYED;
2706 up_read(&vm->userptr.notifier_lock);
2708 xe_vm_insert_vma(vm, vma);
2712 case DRM_GPUVA_OP_REMAP:
2714 struct xe_vma *vma = gpuva_to_vma(op->base.remap.unmap->va);
2716 if (op->remap.prev) {
2717 prep_vma_destroy(vm, op->remap.prev, prev_post_commit);
2718 xe_vma_destroy_unlocked(op->remap.prev);
2720 if (op->remap.next) {
2721 prep_vma_destroy(vm, op->remap.next, next_post_commit);
2722 xe_vma_destroy_unlocked(op->remap.next);
2725 down_read(&vm->userptr.notifier_lock);
2726 vma->gpuva.flags &= ~XE_VMA_DESTROYED;
2727 up_read(&vm->userptr.notifier_lock);
2729 xe_vm_insert_vma(vm, vma);
2733 case DRM_GPUVA_OP_PREFETCH:
2737 drm_warn(&vm->xe->drm, "NOT POSSIBLE");
2741 static void vm_bind_ioctl_ops_unwind(struct xe_vm *vm,
2742 struct drm_gpuva_ops **ops,
2747 for (i = num_ops_list - 1; i >= 0; --i) {
2748 struct drm_gpuva_ops *__ops = ops[i];
2749 struct drm_gpuva_op *__op;
2754 drm_gpuva_for_each_op_reverse(__op, __ops) {
2755 struct xe_vma_op *op = gpuva_op_to_vma_op(__op);
2757 xe_vma_op_unwind(vm, op,
2758 op->flags & XE_VMA_OP_COMMITTED,
2759 op->flags & XE_VMA_OP_PREV_COMMITTED,
2760 op->flags & XE_VMA_OP_NEXT_COMMITTED);
2763 drm_gpuva_ops_free(&vm->gpuvm, __ops);
2767 static int vm_bind_ioctl_ops_execute(struct xe_vm *vm,
2768 struct list_head *ops_list)
2770 struct xe_vma_op *op, *next;
2773 lockdep_assert_held_write(&vm->lock);
2775 list_for_each_entry_safe(op, next, ops_list, link) {
2776 err = xe_vma_op_execute(vm, op);
2778 drm_warn(&vm->xe->drm, "VM op(%d) failed with %d",
2781 * FIXME: Killing VM rather than proper error handling
2786 xe_vma_op_cleanup(vm, op);
2792 #define SUPPORTED_FLAGS (DRM_XE_VM_BIND_FLAG_NULL | \
2793 DRM_XE_VM_BIND_FLAG_DUMPABLE)
2794 #define XE_64K_PAGE_MASK 0xffffull
2795 #define ALL_DRM_XE_SYNCS_FLAGS (DRM_XE_SYNCS_FLAG_WAIT_FOR_OP)
2797 static int vm_bind_ioctl_check_args(struct xe_device *xe,
2798 struct drm_xe_vm_bind *args,
2799 struct drm_xe_vm_bind_op **bind_ops)
2804 if (XE_IOCTL_DBG(xe, args->pad || args->pad2) ||
2805 XE_IOCTL_DBG(xe, args->reserved[0] || args->reserved[1]))
2808 if (XE_IOCTL_DBG(xe, args->extensions))
2811 if (args->num_binds > 1) {
2812 u64 __user *bind_user =
2813 u64_to_user_ptr(args->vector_of_binds);
2815 *bind_ops = kvmalloc_array(args->num_binds,
2816 sizeof(struct drm_xe_vm_bind_op),
2817 GFP_KERNEL | __GFP_ACCOUNT);
2821 err = __copy_from_user(*bind_ops, bind_user,
2822 sizeof(struct drm_xe_vm_bind_op) *
2824 if (XE_IOCTL_DBG(xe, err)) {
2829 *bind_ops = &args->bind;
2832 for (i = 0; i < args->num_binds; ++i) {
2833 u64 range = (*bind_ops)[i].range;
2834 u64 addr = (*bind_ops)[i].addr;
2835 u32 op = (*bind_ops)[i].op;
2836 u32 flags = (*bind_ops)[i].flags;
2837 u32 obj = (*bind_ops)[i].obj;
2838 u64 obj_offset = (*bind_ops)[i].obj_offset;
2839 u32 prefetch_region = (*bind_ops)[i].prefetch_mem_region_instance;
2840 bool is_null = flags & DRM_XE_VM_BIND_FLAG_NULL;
2841 u16 pat_index = (*bind_ops)[i].pat_index;
2844 if (XE_IOCTL_DBG(xe, pat_index >= xe->pat.n_entries)) {
2849 pat_index = array_index_nospec(pat_index, xe->pat.n_entries);
2850 (*bind_ops)[i].pat_index = pat_index;
2851 coh_mode = xe_pat_index_get_coh_mode(xe, pat_index);
2852 if (XE_IOCTL_DBG(xe, !coh_mode)) { /* hw reserved */
2857 if (XE_WARN_ON(coh_mode > XE_COH_AT_LEAST_1WAY)) {
2862 if (XE_IOCTL_DBG(xe, op > DRM_XE_VM_BIND_OP_PREFETCH) ||
2863 XE_IOCTL_DBG(xe, flags & ~SUPPORTED_FLAGS) ||
2864 XE_IOCTL_DBG(xe, obj && is_null) ||
2865 XE_IOCTL_DBG(xe, obj_offset && is_null) ||
2866 XE_IOCTL_DBG(xe, op != DRM_XE_VM_BIND_OP_MAP &&
2868 XE_IOCTL_DBG(xe, !obj &&
2869 op == DRM_XE_VM_BIND_OP_MAP &&
2871 XE_IOCTL_DBG(xe, !obj &&
2872 op == DRM_XE_VM_BIND_OP_UNMAP_ALL) ||
2873 XE_IOCTL_DBG(xe, addr &&
2874 op == DRM_XE_VM_BIND_OP_UNMAP_ALL) ||
2875 XE_IOCTL_DBG(xe, range &&
2876 op == DRM_XE_VM_BIND_OP_UNMAP_ALL) ||
2877 XE_IOCTL_DBG(xe, obj &&
2878 op == DRM_XE_VM_BIND_OP_MAP_USERPTR) ||
2879 XE_IOCTL_DBG(xe, coh_mode == XE_COH_NONE &&
2880 op == DRM_XE_VM_BIND_OP_MAP_USERPTR) ||
2881 XE_IOCTL_DBG(xe, obj &&
2882 op == DRM_XE_VM_BIND_OP_PREFETCH) ||
2883 XE_IOCTL_DBG(xe, prefetch_region &&
2884 op != DRM_XE_VM_BIND_OP_PREFETCH) ||
2885 XE_IOCTL_DBG(xe, !(BIT(prefetch_region) &
2886 xe->info.mem_region_mask)) ||
2887 XE_IOCTL_DBG(xe, obj &&
2888 op == DRM_XE_VM_BIND_OP_UNMAP)) {
2893 if (XE_IOCTL_DBG(xe, obj_offset & ~PAGE_MASK) ||
2894 XE_IOCTL_DBG(xe, addr & ~PAGE_MASK) ||
2895 XE_IOCTL_DBG(xe, range & ~PAGE_MASK) ||
2896 XE_IOCTL_DBG(xe, !range &&
2897 op != DRM_XE_VM_BIND_OP_UNMAP_ALL)) {
2906 if (args->num_binds > 1)
2911 static int vm_bind_ioctl_signal_fences(struct xe_vm *vm,
2912 struct xe_exec_queue *q,
2913 struct xe_sync_entry *syncs,
2916 struct dma_fence *fence;
2919 fence = xe_sync_in_fence_get(syncs, num_syncs,
2920 to_wait_exec_queue(vm, q), vm);
2922 return PTR_ERR(fence);
2924 for (i = 0; i < num_syncs; i++)
2925 xe_sync_entry_signal(&syncs[i], NULL, fence);
2927 xe_exec_queue_last_fence_set(to_wait_exec_queue(vm, q), vm,
2929 dma_fence_put(fence);
2934 int xe_vm_bind_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
2936 struct xe_device *xe = to_xe_device(dev);
2937 struct xe_file *xef = to_xe_file(file);
2938 struct drm_xe_vm_bind *args = data;
2939 struct drm_xe_sync __user *syncs_user;
2940 struct xe_bo **bos = NULL;
2941 struct drm_gpuva_ops **ops = NULL;
2943 struct xe_exec_queue *q = NULL;
2944 u32 num_syncs, num_ufence = 0;
2945 struct xe_sync_entry *syncs = NULL;
2946 struct drm_xe_vm_bind_op *bind_ops;
2947 LIST_HEAD(ops_list);
2951 err = vm_bind_ioctl_check_args(xe, args, &bind_ops);
2955 if (args->exec_queue_id) {
2956 q = xe_exec_queue_lookup(xef, args->exec_queue_id);
2957 if (XE_IOCTL_DBG(xe, !q)) {
2962 if (XE_IOCTL_DBG(xe, !(q->flags & EXEC_QUEUE_FLAG_VM))) {
2964 goto put_exec_queue;
2968 vm = xe_vm_lookup(xef, args->vm_id);
2969 if (XE_IOCTL_DBG(xe, !vm)) {
2971 goto put_exec_queue;
2974 err = down_write_killable(&vm->lock);
2978 if (XE_IOCTL_DBG(xe, xe_vm_is_closed_or_banned(vm))) {
2980 goto release_vm_lock;
2983 for (i = 0; i < args->num_binds; ++i) {
2984 u64 range = bind_ops[i].range;
2985 u64 addr = bind_ops[i].addr;
2987 if (XE_IOCTL_DBG(xe, range > vm->size) ||
2988 XE_IOCTL_DBG(xe, addr > vm->size - range)) {
2990 goto release_vm_lock;
2994 if (args->num_binds) {
2995 bos = kvcalloc(args->num_binds, sizeof(*bos),
2996 GFP_KERNEL | __GFP_ACCOUNT);
2999 goto release_vm_lock;
3002 ops = kvcalloc(args->num_binds, sizeof(*ops),
3003 GFP_KERNEL | __GFP_ACCOUNT);
3006 goto release_vm_lock;
3010 for (i = 0; i < args->num_binds; ++i) {
3011 struct drm_gem_object *gem_obj;
3012 u64 range = bind_ops[i].range;
3013 u64 addr = bind_ops[i].addr;
3014 u32 obj = bind_ops[i].obj;
3015 u64 obj_offset = bind_ops[i].obj_offset;
3016 u16 pat_index = bind_ops[i].pat_index;
3022 gem_obj = drm_gem_object_lookup(file, obj);
3023 if (XE_IOCTL_DBG(xe, !gem_obj)) {
3027 bos[i] = gem_to_xe_bo(gem_obj);
3029 if (XE_IOCTL_DBG(xe, range > bos[i]->size) ||
3030 XE_IOCTL_DBG(xe, obj_offset >
3031 bos[i]->size - range)) {
3036 if (bos[i]->flags & XE_BO_INTERNAL_64K) {
3037 if (XE_IOCTL_DBG(xe, obj_offset &
3038 XE_64K_PAGE_MASK) ||
3039 XE_IOCTL_DBG(xe, addr & XE_64K_PAGE_MASK) ||
3040 XE_IOCTL_DBG(xe, range & XE_64K_PAGE_MASK)) {
3046 coh_mode = xe_pat_index_get_coh_mode(xe, pat_index);
3047 if (bos[i]->cpu_caching) {
3048 if (XE_IOCTL_DBG(xe, coh_mode == XE_COH_NONE &&
3049 bos[i]->cpu_caching == DRM_XE_GEM_CPU_CACHING_WB)) {
3053 } else if (XE_IOCTL_DBG(xe, coh_mode == XE_COH_NONE)) {
3055 * Imported dma-buf from a different device should
3056 * require 1way or 2way coherency since we don't know
3057 * how it was mapped on the CPU. Just assume is it
3058 * potentially cached on CPU side.
3065 if (args->num_syncs) {
3066 syncs = kcalloc(args->num_syncs, sizeof(*syncs), GFP_KERNEL);
3073 syncs_user = u64_to_user_ptr(args->syncs);
3074 for (num_syncs = 0; num_syncs < args->num_syncs; num_syncs++) {
3075 err = xe_sync_entry_parse(xe, xef, &syncs[num_syncs],
3076 &syncs_user[num_syncs],
3077 (xe_vm_in_lr_mode(vm) ?
3078 SYNC_PARSE_FLAG_LR_MODE : 0) |
3080 SYNC_PARSE_FLAG_DISALLOW_USER_FENCE : 0));
3084 if (xe_sync_is_ufence(&syncs[num_syncs]))
3088 if (XE_IOCTL_DBG(xe, num_ufence > 1)) {
3093 if (!args->num_binds) {
3098 for (i = 0; i < args->num_binds; ++i) {
3099 u64 range = bind_ops[i].range;
3100 u64 addr = bind_ops[i].addr;
3101 u32 op = bind_ops[i].op;
3102 u32 flags = bind_ops[i].flags;
3103 u64 obj_offset = bind_ops[i].obj_offset;
3104 u32 prefetch_region = bind_ops[i].prefetch_mem_region_instance;
3105 u16 pat_index = bind_ops[i].pat_index;
3107 ops[i] = vm_bind_ioctl_ops_create(vm, bos[i], obj_offset,
3108 addr, range, op, flags,
3109 prefetch_region, pat_index);
3110 if (IS_ERR(ops[i])) {
3111 err = PTR_ERR(ops[i]);
3116 err = vm_bind_ioctl_ops_parse(vm, q, ops[i], syncs, num_syncs,
3118 i == args->num_binds - 1);
3124 if (list_empty(&ops_list)) {
3131 xe_exec_queue_get(q);
3133 err = vm_bind_ioctl_ops_execute(vm, &ops_list);
3135 up_write(&vm->lock);
3138 xe_exec_queue_put(q);
3141 for (i = 0; bos && i < args->num_binds; ++i)
3146 if (args->num_binds > 1)
3152 vm_bind_ioctl_ops_unwind(vm, ops, args->num_binds);
3154 if (err == -ENODATA)
3155 err = vm_bind_ioctl_signal_fences(vm, q, syncs, num_syncs);
3157 xe_sync_entry_cleanup(&syncs[num_syncs]);
3161 for (i = 0; i < args->num_binds; ++i)
3164 up_write(&vm->lock);
3169 xe_exec_queue_put(q);
3173 if (args->num_binds > 1)
3179 * xe_vm_lock() - Lock the vm's dma_resv object
3180 * @vm: The struct xe_vm whose lock is to be locked
3181 * @intr: Whether to perform any wait interruptible
3183 * Return: 0 on success, -EINTR if @intr is true and the wait for a
3184 * contended lock was interrupted. If @intr is false, the function
3187 int xe_vm_lock(struct xe_vm *vm, bool intr)
3190 return dma_resv_lock_interruptible(xe_vm_resv(vm), NULL);
3192 return dma_resv_lock(xe_vm_resv(vm), NULL);
3196 * xe_vm_unlock() - Unlock the vm's dma_resv object
3197 * @vm: The struct xe_vm whose lock is to be released.
3199 * Unlock a buffer object lock that was locked by xe_vm_lock().
3201 void xe_vm_unlock(struct xe_vm *vm)
3203 dma_resv_unlock(xe_vm_resv(vm));
3207 * xe_vm_invalidate_vma - invalidate GPU mappings for VMA without a lock
3208 * @vma: VMA to invalidate
3210 * Walks a list of page tables leaves which it memset the entries owned by this
3211 * VMA to zero, invalidates the TLBs, and block until TLBs invalidation is
3214 * Returns 0 for success, negative error code otherwise.
3216 int xe_vm_invalidate_vma(struct xe_vma *vma)
3218 struct xe_device *xe = xe_vma_vm(vma)->xe;
3219 struct xe_tile *tile;
3220 u32 tile_needs_invalidate = 0;
3221 int seqno[XE_MAX_TILES_PER_DEVICE];
3225 xe_assert(xe, !xe_vma_is_null(vma));
3226 trace_xe_vma_invalidate(vma);
3228 /* Check that we don't race with page-table updates */
3229 if (IS_ENABLED(CONFIG_PROVE_LOCKING)) {
3230 if (xe_vma_is_userptr(vma)) {
3231 WARN_ON_ONCE(!mmu_interval_check_retry
3232 (&to_userptr_vma(vma)->userptr.notifier,
3233 to_userptr_vma(vma)->userptr.notifier_seq));
3234 WARN_ON_ONCE(!dma_resv_test_signaled(xe_vm_resv(xe_vma_vm(vma)),
3235 DMA_RESV_USAGE_BOOKKEEP));
3238 xe_bo_assert_held(xe_vma_bo(vma));
3242 for_each_tile(tile, xe, id) {
3243 if (xe_pt_zap_ptes(tile, vma)) {
3244 tile_needs_invalidate |= BIT(id);
3247 * FIXME: We potentially need to invalidate multiple
3248 * GTs within the tile
3250 seqno[id] = xe_gt_tlb_invalidation_vma(tile->primary_gt, NULL, vma);
3256 for_each_tile(tile, xe, id) {
3257 if (tile_needs_invalidate & BIT(id)) {
3258 ret = xe_gt_tlb_invalidation_wait(tile->primary_gt, seqno[id]);
3264 vma->tile_invalidated = vma->tile_mask;
3269 int xe_analyze_vm(struct drm_printer *p, struct xe_vm *vm, int gt_id)
3271 struct drm_gpuva *gpuva;
3275 if (!down_read_trylock(&vm->lock)) {
3276 drm_printf(p, " Failed to acquire VM lock to dump capture");
3279 if (vm->pt_root[gt_id]) {
3280 addr = xe_bo_addr(vm->pt_root[gt_id]->bo, 0, XE_PAGE_SIZE);
3281 is_vram = xe_bo_is_vram(vm->pt_root[gt_id]->bo);
3282 drm_printf(p, " VM root: A:0x%llx %s\n", addr,
3283 is_vram ? "VRAM" : "SYS");
3286 drm_gpuvm_for_each_va(gpuva, &vm->gpuvm) {
3287 struct xe_vma *vma = gpuva_to_vma(gpuva);
3288 bool is_userptr = xe_vma_is_userptr(vma);
3289 bool is_null = xe_vma_is_null(vma);
3293 } else if (is_userptr) {
3294 struct sg_table *sg = to_userptr_vma(vma)->userptr.sg;
3295 struct xe_res_cursor cur;
3298 xe_res_first_sg(sg, 0, XE_PAGE_SIZE, &cur);
3299 addr = xe_res_dma(&cur);
3304 addr = __xe_bo_addr(xe_vma_bo(vma), 0, XE_PAGE_SIZE);
3305 is_vram = xe_bo_is_vram(xe_vma_bo(vma));
3307 drm_printf(p, " [%016llx-%016llx] S:0x%016llx A:%016llx %s\n",
3308 xe_vma_start(vma), xe_vma_end(vma) - 1,
3310 addr, is_null ? "NULL" : is_userptr ? "USR" :
3311 is_vram ? "VRAM" : "SYS");
3318 struct xe_vm_snapshot {
3319 unsigned long num_snaps;
3325 struct mm_struct *mm;
3329 struct xe_vm_snapshot *xe_vm_snapshot_capture(struct xe_vm *vm)
3331 unsigned long num_snaps = 0, i;
3332 struct xe_vm_snapshot *snap = NULL;
3333 struct drm_gpuva *gpuva;
3338 mutex_lock(&vm->snap_mutex);
3339 drm_gpuvm_for_each_va(gpuva, &vm->gpuvm) {
3340 if (gpuva->flags & XE_VMA_DUMPABLE)
3345 snap = kvzalloc(offsetof(struct xe_vm_snapshot, snap[num_snaps]), GFP_NOWAIT);
3349 snap->num_snaps = num_snaps;
3351 drm_gpuvm_for_each_va(gpuva, &vm->gpuvm) {
3352 struct xe_vma *vma = gpuva_to_vma(gpuva);
3353 struct xe_bo *bo = vma->gpuva.gem.obj ?
3354 gem_to_xe_bo(vma->gpuva.gem.obj) : NULL;
3356 if (!(gpuva->flags & XE_VMA_DUMPABLE))
3359 snap->snap[i].ofs = xe_vma_start(vma);
3360 snap->snap[i].len = xe_vma_size(vma);
3362 snap->snap[i].bo = xe_bo_get(bo);
3363 snap->snap[i].bo_ofs = xe_vma_bo_offset(vma);
3364 } else if (xe_vma_is_userptr(vma)) {
3365 struct mm_struct *mm =
3366 to_userptr_vma(vma)->userptr.notifier.mm;
3368 if (mmget_not_zero(mm))
3369 snap->snap[i].mm = mm;
3371 snap->snap[i].data = ERR_PTR(-EFAULT);
3373 snap->snap[i].bo_ofs = xe_vma_userptr(vma);
3375 snap->snap[i].data = ERR_PTR(-ENOENT);
3381 mutex_unlock(&vm->snap_mutex);
3385 void xe_vm_snapshot_capture_delayed(struct xe_vm_snapshot *snap)
3387 for (int i = 0; i < snap->num_snaps; i++) {
3388 struct xe_bo *bo = snap->snap[i].bo;
3389 struct iosys_map src;
3392 if (IS_ERR(snap->snap[i].data))
3395 snap->snap[i].data = kvmalloc(snap->snap[i].len, GFP_USER);
3396 if (!snap->snap[i].data) {
3397 snap->snap[i].data = ERR_PTR(-ENOMEM);
3402 dma_resv_lock(bo->ttm.base.resv, NULL);
3403 err = ttm_bo_vmap(&bo->ttm, &src);
3405 xe_map_memcpy_from(xe_bo_device(bo),
3407 &src, snap->snap[i].bo_ofs,
3409 ttm_bo_vunmap(&bo->ttm, &src);
3411 dma_resv_unlock(bo->ttm.base.resv);
3413 void __user *userptr = (void __user *)(size_t)snap->snap[i].bo_ofs;
3415 kthread_use_mm(snap->snap[i].mm);
3416 if (!copy_from_user(snap->snap[i].data, userptr, snap->snap[i].len))
3420 kthread_unuse_mm(snap->snap[i].mm);
3422 mmput(snap->snap[i].mm);
3423 snap->snap[i].mm = NULL;
3427 kvfree(snap->snap[i].data);
3428 snap->snap[i].data = ERR_PTR(err);
3433 snap->snap[i].bo = NULL;
3437 void xe_vm_snapshot_print(struct xe_vm_snapshot *snap, struct drm_printer *p)
3441 for (i = 0; i < snap->num_snaps; i++) {
3442 if (IS_ERR(snap->snap[i].data))
3445 drm_printf(p, "[%llx].length: 0x%lx\n", snap->snap[i].ofs, snap->snap[i].len);
3446 drm_printf(p, "[%llx].data: ",
3449 for (j = 0; j < snap->snap[i].len; j += sizeof(u32)) {
3450 u32 *val = snap->snap[i].data + j;
3451 char dumped[ASCII85_BUFSZ];
3453 drm_puts(p, ascii85_encode(*val, dumped));
3460 drm_printf(p, "Unable to capture range [%llx-%llx]: %li\n",
3461 snap->snap[i].ofs, snap->snap[i].ofs + snap->snap[i].len - 1,
3462 PTR_ERR(snap->snap[i].data));
3466 void xe_vm_snapshot_free(struct xe_vm_snapshot *snap)
3473 for (i = 0; i < snap->num_snaps; i++) {
3474 if (!IS_ERR(snap->snap[i].data))
3475 kvfree(snap->snap[i].data);
3476 xe_bo_put(snap->snap[i].bo);
3477 if (snap->snap[i].mm)
3478 mmput(snap->snap[i].mm);