}
if (!rebind && last_munmap_rebind &&
xe_vm_in_compute_mode(vm))
- queue_work(vm->xe->ordered_wq,
- &vm->preempt.rebind_work);
+ xe_vm_queue_rebind_worker(vm);
} else {
kfree(rfence);
kfree(ifence);
struct ttm_buffer_object *xe_vm_ttm_bo(struct xe_vm *vm);
+static inline void xe_vm_queue_rebind_worker(struct xe_vm *vm)
+{
+ XE_WARN_ON(!xe_vm_in_compute_mode(vm));
+ queue_work(vm->xe->ordered_wq, &vm->preempt.rebind_work);
+}
+
/**
* xe_vm_reactivate_rebind() - Reactivate the rebind functionality on compute
* vms.
{
if (xe_vm_in_compute_mode(vm) && vm->preempt.rebind_deactivated) {
vm->preempt.rebind_deactivated = false;
- queue_work(system_unbound_wq, &vm->preempt.rebind_work);
+ xe_vm_queue_rebind_worker(vm);
}
}
int xe_vma_userptr_check_repin(struct xe_vma *vma);
-static inline void xe_vm_queue_rebind_worker(struct xe_vm *vm)
-{
- XE_WARN_ON(!xe_vm_in_compute_mode(vm));
- queue_work(vm->xe->ordered_wq, &vm->preempt.rebind_work);
-}
-
/*
* XE_ONSTACK_TV is used to size the tv_onstack array that is input
* to xe_vm_lock_dma_resv() and xe_vm_unlock_dma_resv().