drm/amdgpu: move amdgpu_ctx_mgr_entity_fini to f_ops flush hook (V4)
authorAndrey Grodzovsky <andrey.grodzovsky@amd.com>
Wed, 30 May 2018 19:28:52 +0000 (15:28 -0400)
committerAlex Deucher <alexander.deucher@amd.com>
Fri, 15 Jun 2018 17:20:33 +0000 (12:20 -0500)
With this we can now terminate jobs enqueue into SW queue the moment
the task is being killed instead of waiting for last user of
drm file to release it.

Also stop checking for kref_read(&ctx->refcount) == 1 when
calling drm_sched_entity_do_release since other task
might still hold a reference to this entity but we don't
care since KILL means terminate job submission regardless
of what other tasks are doing.

v2:
Use returned remaining timeout as parameter for the next call.
Rebase.

v3:
Switch to working with jiffies.
Streamline remainder TO usage.
Rebase.

v4:
Rebase.

Signed-off-by: Andrey Grodzovsky <andrey.grodzovsky@amd.com>
Reviewed-by: Christian König <christian.koenig@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c

index c5bb36275e9379e2ecf36318233b7fc98f44483a..64b3a1ed04dcacd1af40fc7db15961d6fc57af93 100644 (file)
@@ -449,26 +449,28 @@ void amdgpu_ctx_mgr_entity_fini(struct amdgpu_ctx_mgr *mgr)
        struct amdgpu_ctx *ctx;
        struct idr *idp;
        uint32_t id, i;
+       long max_wait = MAX_WAIT_SCHED_ENTITY_Q_EMPTY;
 
        idp = &mgr->ctx_handles;
 
+       mutex_lock(&mgr->lock);
        idr_for_each_entry(idp, ctx, id) {
 
-               if (!ctx->adev)
+               if (!ctx->adev) {
+                       mutex_unlock(&mgr->lock);
                        return;
+               }
 
                for (i = 0; i < ctx->adev->num_rings; i++) {
 
                        if (ctx->adev->rings[i] == &ctx->adev->gfx.kiq.ring)
                                continue;
 
-                       if (kref_read(&ctx->refcount) == 1)
-                               drm_sched_entity_do_release(&ctx->adev->rings[i]->sched,
-                                                 &ctx->rings[i].entity);
-                       else
-                               DRM_ERROR("ctx %p is still alive\n", ctx);
+                       max_wait = drm_sched_entity_do_release(&ctx->adev->rings[i]->sched,
+                                         &ctx->rings[i].entity, max_wait);
                }
        }
+       mutex_unlock(&mgr->lock);
 }
 
 void amdgpu_ctx_mgr_entity_cleanup(struct amdgpu_ctx_mgr *mgr)
index b0bf2f24da48fb5489a13793c4a819bbef32ae2f..a549483032b0561955e1eff8b5cb38c5a4195ddc 100644 (file)
@@ -855,9 +855,21 @@ static const struct dev_pm_ops amdgpu_pm_ops = {
        .runtime_idle = amdgpu_pmops_runtime_idle,
 };
 
+static int amdgpu_flush(struct file *f, fl_owner_t id)
+{
+       struct drm_file *file_priv = f->private_data;
+       struct amdgpu_fpriv *fpriv = file_priv->driver_priv;
+
+       amdgpu_ctx_mgr_entity_fini(&fpriv->ctx_mgr);
+
+       return 0;
+}
+
+
 static const struct file_operations amdgpu_driver_kms_fops = {
        .owner = THIS_MODULE,
        .open = drm_open,
+       .flush = amdgpu_flush,
        .release = drm_release,
        .unlocked_ioctl = amdgpu_drm_ioctl,
        .mmap = amdgpu_mmap,
index 91517b166a3b8f504930586a97f173eadc7b47f6..c472bb53e41df98cebb24cfab56affdb32d393ff 100644 (file)
@@ -930,7 +930,6 @@ void amdgpu_driver_postclose_kms(struct drm_device *dev,
                return;
 
        pm_runtime_get_sync(dev->dev);
-       amdgpu_ctx_mgr_entity_fini(&fpriv->ctx_mgr);
 
        if (adev->asic_type != CHIP_RAVEN) {
                amdgpu_uvd_free_handles(adev, file_priv);