}
/* Free old fence */
- dma_fence_put(&old_ef->base);
+ if (old_ef)
+ dma_fence_put(&old_ef->base);
return 0;
free_err:
struct amdgpu_eviction_fence_mgr *evf_mgr = work_to_evf_mgr(work, suspend_work.work);
struct amdgpu_fpriv *fpriv = evf_mgr_to_fpriv(evf_mgr);
struct amdgpu_userq_mgr *uq_mgr = &fpriv->userq_mgr;
- struct amdgpu_vm *vm = &fpriv->vm;
- struct amdgpu_bo_va *bo_va;
- struct drm_exec exec;
- bool userq_active = amdgpu_userqueue_active(uq_mgr);
- int ret;
-
-
- /* For userqueues, the fence replacement happens in resume path */
- if (userq_active) {
- amdgpu_userqueue_suspend(uq_mgr);
- return;
- }
-
- /* Signal old eviction fence */
- amdgpu_eviction_fence_signal(evf_mgr);
-
- /* Do not replace eviction fence is fd is getting closed */
- if (evf_mgr->fd_closing)
- return;
-
- /* Prepare the objects to replace eviction fence */
- drm_exec_init(&exec, DRM_EXEC_IGNORE_DUPLICATES, 0);
- drm_exec_until_all_locked(&exec) {
- ret = amdgpu_vm_lock_pd(vm, &exec, 2);
- drm_exec_retry_on_contention(&exec);
- if (unlikely(ret))
- goto unlock_drm;
-
- /* Lock the done list */
- list_for_each_entry(bo_va, &vm->done, base.vm_status) {
- struct amdgpu_bo *bo = bo_va->base.bo;
-
- if (!bo)
- continue;
-
- if (vm != bo_va->base.vm)
- continue;
+ struct amdgpu_eviction_fence *ev_fence;
- ret = drm_exec_lock_obj(&exec, &bo->tbo.base);
- drm_exec_retry_on_contention(&exec);
- if (unlikely(ret))
- goto unlock_drm;
- }
- }
+ mutex_lock(&uq_mgr->userq_mutex);
+ ev_fence = evf_mgr->ev_fence;
+ if (!ev_fence)
+ goto unlock;
- /* Replace old eviction fence with new one */
- ret = amdgpu_eviction_fence_replace_fence(&fpriv->evf_mgr, &exec);
- if (ret)
- DRM_ERROR("Failed to replace eviction fence\n");
+ amdgpu_userqueue_suspend(uq_mgr, ev_fence);
-unlock_drm:
- drm_exec_fini(&exec);
+unlock:
+ mutex_unlock(&uq_mgr->userq_mutex);
}
static bool amdgpu_eviction_fence_enable_signaling(struct dma_fence *f)
.enable_signaling = amdgpu_eviction_fence_enable_signaling,
};
-void amdgpu_eviction_fence_signal(struct amdgpu_eviction_fence_mgr *evf_mgr)
+void amdgpu_eviction_fence_signal(struct amdgpu_eviction_fence_mgr *evf_mgr,
+ struct amdgpu_eviction_fence *ev_fence)
{
spin_lock(&evf_mgr->ev_fence_lock);
- dma_fence_signal(&evf_mgr->ev_fence->base);
+ dma_fence_signal(&ev_fence->base);
spin_unlock(&evf_mgr->ev_fence_lock);
}
dma_resv_add_fence(resv, ef, DMA_RESV_USAGE_BOOKKEEP);
}
spin_unlock(&evf_mgr->ev_fence_lock);
+
return 0;
}
int amdgpu_eviction_fence_init(struct amdgpu_eviction_fence_mgr *evf_mgr)
{
- struct amdgpu_eviction_fence *ev_fence;
-
/* This needs to be done one time per open */
atomic_set(&evf_mgr->ev_fence_seq, 0);
evf_mgr->ev_fence_ctx = dma_fence_context_alloc(1);
spin_lock_init(&evf_mgr->ev_fence_lock);
- ev_fence = amdgpu_eviction_fence_create(evf_mgr);
- if (!ev_fence) {
- DRM_ERROR("Failed to craete eviction fence\n");
- return -ENOMEM;
- }
-
- spin_lock(&evf_mgr->ev_fence_lock);
- evf_mgr->ev_fence = ev_fence;
- spin_unlock(&evf_mgr->ev_fence_lock);
-
INIT_DELAYED_WORK(&evf_mgr->suspend_work, amdgpu_eviction_fence_suspend_worker);
return 0;
}
amdgpu_eviction_fence_init(struct amdgpu_eviction_fence_mgr *evf_mgr);
void
-amdgpu_eviction_fence_signal(struct amdgpu_eviction_fence_mgr *evf_mgr);
+amdgpu_eviction_fence_signal(struct amdgpu_eviction_fence_mgr *evf_mgr,
+ struct amdgpu_eviction_fence *ev_fence);
int
amdgpu_eviction_fence_replace_fence(struct amdgpu_eviction_fence_mgr *evf_mgr,
}
}
- /* Save the fence to wait for during suspend */
- mutex_lock(&userq_mgr->userq_mutex);
-
/* Retrieve the user queue */
queue = idr_find(&userq_mgr->userq_idr, args->queue_id);
if (!queue) {
r = -ENOENT;
- mutex_unlock(&userq_mgr->userq_mutex);
+ goto put_gobj_write;
}
drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT,
drm_exec_until_all_locked(&exec) {
r = drm_exec_prepare_array(&exec, gobj_read, num_read_bo_handles, 1);
drm_exec_retry_on_contention(&exec);
- if (r) {
- mutex_unlock(&userq_mgr->userq_mutex);
+ if (r)
goto exec_fini;
- }
r = drm_exec_prepare_array(&exec, gobj_write, num_write_bo_handles, 1);
drm_exec_retry_on_contention(&exec);
- if (r) {
- mutex_unlock(&userq_mgr->userq_mutex);
+ if (r)
goto exec_fini;
- }
}
r = amdgpu_userq_fence_read_wptr(queue, &wptr);
- if (r) {
- mutex_unlock(&userq_mgr->userq_mutex);
+ if (r)
goto exec_fini;
- }
/* Create a new fence */
r = amdgpu_userq_fence_create(queue, wptr, &fence);
- if (r) {
- mutex_unlock(&userq_mgr->userq_mutex);
+ if (r)
goto exec_fini;
- }
+
+ /* We are here means UQ is active, make sure the eviction fence is valid */
+ amdgpu_userqueue_ensure_ev_fence(&fpriv->userq_mgr, &fpriv->evf_mgr);
dma_fence_put(queue->last_fence);
queue->last_fence = dma_fence_get(fence);
return idr_find(&uq_mgr->userq_idr, qid);
}
+void
+amdgpu_userqueue_ensure_ev_fence(struct amdgpu_userq_mgr *uq_mgr,
+ struct amdgpu_eviction_fence_mgr *evf_mgr)
+{
+ struct amdgpu_eviction_fence *ev_fence;
+
+retry:
+ /* Flush any pending resume work to create ev_fence */
+ flush_delayed_work(&uq_mgr->resume_work);
+
+ mutex_lock(&uq_mgr->userq_mutex);
+ spin_lock(&evf_mgr->ev_fence_lock);
+ ev_fence = evf_mgr->ev_fence;
+ spin_unlock(&evf_mgr->ev_fence_lock);
+ if (!ev_fence || dma_fence_is_signaled(&ev_fence->base)) {
+ mutex_unlock(&uq_mgr->userq_mutex);
+ /*
+ * Looks like there was no pending resume work,
+ * add one now to create a valid eviction fence
+ */
+ schedule_delayed_work(&uq_mgr->resume_work, 0);
+ goto retry;
+ }
+}
+
int amdgpu_userqueue_create_object(struct amdgpu_userq_mgr *uq_mgr,
struct amdgpu_userq_obj *userq_obj,
int size)
return -EINVAL;
}
- mutex_lock(&uq_mgr->userq_mutex);
+ /*
+ * There could be a situation that we are creating a new queue while
+ * the other queues under this UQ_mgr are suspended. So if there is any
+ * resume work pending, wait for it to get done.
+ *
+ * This will also make sure we have a valid eviction fence ready to be used.
+ */
+ amdgpu_userqueue_ensure_ev_fence(&fpriv->userq_mgr, &fpriv->evf_mgr);
uq_funcs = adev->userq_funcs[args->in.ip_type];
if (!uq_funcs) {
unlock:
mutex_unlock(&uq_mgr->userq_mutex);
- if (!r) {
- /*
- * There could be a situation that we are creating a new queue while
- * the other queues under this UQ_mgr are suspended. So if there is any
- * resume work pending, wait for it to get done.
- */
- flush_delayed_work(&uq_mgr->resume_work);
- }
return r;
}
}
void
-amdgpu_userqueue_suspend(struct amdgpu_userq_mgr *uq_mgr)
+amdgpu_userqueue_suspend(struct amdgpu_userq_mgr *uq_mgr,
+ struct amdgpu_eviction_fence *ev_fence)
{
int ret;
struct amdgpu_fpriv *fpriv = uq_mgr_to_fpriv(uq_mgr);
struct amdgpu_eviction_fence_mgr *evf_mgr = &fpriv->evf_mgr;
-
- mutex_lock(&uq_mgr->userq_mutex);
-
- /* Wait for any pending userqueue fence to signal */
+ /* Wait for any pending userqueue fence work to finish */
ret = amdgpu_userqueue_wait_for_signal(uq_mgr);
if (ret) {
DRM_ERROR("Not suspending userqueue, timeout waiting for work\n");
- goto unlock;
+ return;
}
ret = amdgpu_userqueue_suspend_all(uq_mgr);
if (ret) {
DRM_ERROR("Failed to evict userqueue\n");
- goto unlock;
+ return;
}
/* Signal current eviction fence */
- amdgpu_eviction_fence_signal(evf_mgr);
+ amdgpu_eviction_fence_signal(evf_mgr, ev_fence);
if (evf_mgr->fd_closing) {
- mutex_unlock(&uq_mgr->userq_mutex);
cancel_delayed_work(&uq_mgr->resume_work);
return;
}
/* Schedule a resume work */
schedule_delayed_work(&uq_mgr->resume_work, 0);
-
-unlock:
- mutex_unlock(&uq_mgr->userq_mutex);
}
int amdgpu_userq_mgr_init(struct amdgpu_userq_mgr *userq_mgr, struct amdgpu_device *adev)
{
- struct amdgpu_fpriv *fpriv;
-
mutex_init(&userq_mgr->userq_mutex);
idr_init_base(&userq_mgr->userq_idr, 1);
userq_mgr->adev = adev;
- fpriv = uq_mgr_to_fpriv(userq_mgr);
- if (!fpriv->evf_mgr.ev_fence) {
- DRM_ERROR("Eviction fence not initialized yet\n");
- return -EINVAL;
- }
-
INIT_DELAYED_WORK(&userq_mgr->resume_work, amdgpu_userqueue_resume_worker);
return 0;
}
#ifndef AMDGPU_USERQUEUE_H_
#define AMDGPU_USERQUEUE_H_
+#include "amdgpu_eviction_fence.h"
#define AMDGPU_MAX_USERQ_COUNT 512
void amdgpu_userqueue_destroy_object(struct amdgpu_userq_mgr *uq_mgr,
struct amdgpu_userq_obj *userq_obj);
-void amdgpu_userqueue_suspend(struct amdgpu_userq_mgr *uq_mgr);
+void amdgpu_userqueue_suspend(struct amdgpu_userq_mgr *uq_mgr,
+ struct amdgpu_eviction_fence *ev_fence);
int amdgpu_userqueue_active(struct amdgpu_userq_mgr *uq_mgr);
+
+void amdgpu_userqueue_ensure_ev_fence(struct amdgpu_userq_mgr *userq_mgr,
+ struct amdgpu_eviction_fence_mgr *evf_mgr);
#endif