drm/amdgpu: Fix display freeze lockup error
authorArvind Yadav <Arvind.Yadav@amd.com>
Mon, 27 Jan 2025 12:52:01 +0000 (18:22 +0530)
committerAlex Deucher <alexander.deucher@amd.com>
Tue, 8 Apr 2025 20:48:20 +0000 (16:48 -0400)
A deadlock situation has arised between the userq
signal ioctl and the eviction fence. In this scenario,
the function amdgpu_userq_signal_ioctl() has acquired a reservation
lock on the read/write buffer object (BO) through drm_exec.
Subsequently, it calls amdgpu_userqueue_ensure_ev_fence(),
which is in a waiting for the userq resume work.
Meanwhile, the userq suspend worker has initiated the userq resume
work(amdgpu_userqueue_resume_worker). This userq resume work attempts
to validate the vm->done BO, leading to amdgpu_userqueue_validate_bos
also attempting to reservation lock the same write BO that is already
locked by amdgpu_userq_signal_ioctl.
As a result, the resume work becomes stalled, causing
amdgpu_userqueue_ensure_ev_fence to remain in a waiting state.

Call Trace:
[  242.836469] INFO: task gnome-shel:cs0:1288 blocked for more than 120 seconds.
[  242.836486]       Tainted: G           OE      6.12.0-rc2rebased-oct-24+ #4
[  242.836491] "echo 0 > /proc/sys/kernel/hung_task_timeout_secs" disables this message.
[  242.836494] task:gnome-shel:cs0  state:D stack:0     pid:1288  tgid:1282  ppid:1180   flags:0x00000002
[  242.836503] Call Trace:
[  242.836508]  <TASK>
[  242.836517]  __schedule+0x3e0/0xb10
[  242.836530]  ? srso_return_thunk+0x5/0x5f
[  242.836541]  schedule+0x31/0x120
[  242.836546]  schedule_timeout+0x150/0x160
[  242.836551]  ? srso_return_thunk+0x5/0x5f
[  242.836555]  ? sysvec_call_function+0x69/0xd0
[  242.836562]  ? srso_return_thunk+0x5/0x5f
[  242.836567]  ? preempt_count_add+0x7f/0xd0
[  242.836577]  __wait_for_common+0x91/0x180
[  242.836582]  ? __pfx_schedule_timeout+0x10/0x10
[  242.836590]  wait_for_completion+0x28/0x30
[  242.836595]  __flush_work+0x16c/0x290
[  242.836602]  ? __pfx_wq_barrier_func+0x10/0x10
[  242.836611]  flush_delayed_work+0x3a/0x60
[  242.836621]  amdgpu_userqueue_ensure_ev_fence+0x2d/0xb0 [amdgpu]
[  242.836966]  amdgpu_userq_signal_ioctl+0x959/0xec0 [amdgpu]
[  242.837171]  ? __pfx_amdgpu_userq_signal_ioctl+0x10/0x10 [amdgpu]
[  242.837365]  drm_ioctl_kernel+0xae/0x100 [drm]
[  242.837398]  drm_ioctl+0x2a1/0x500 [drm]
[  242.837420]  ? __pfx_amdgpu_userq_signal_ioctl+0x10/0x10 [amdgpu]
[  242.837622]  ? srso_return_thunk+0x5/0x5f
[  242.837627]  ? srso_return_thunk+0x5/0x5f
[  242.837630]  ? _raw_spin_unlock_irqrestore+0x2b/0x50
[  242.837635]  amdgpu_drm_ioctl+0x4f/0x90 [amdgpu]
[  242.837811]  __x64_sys_ioctl+0x99/0xd0
[  242.837820]  x64_sys_call+0x1209/0x20d0
[  242.837825]  do_syscall_64+0x51/0x120
[  242.837830]  entry_SYSCALL_64_after_hwframe+0x76/0x7e
[  242.837835] RIP: 0033:0x7f2f33f1a94f
[  242.837838] RSP: 002b:00007f2f24ffea30 EFLAGS: 00000246 ORIG_RAX: 0000000000000010
[  242.837842] RAX: ffffffffffffffda RBX: 00007f2f24ffebd0 RCX: 00007f2f33f1a94f
[  242.837845] RDX: 00007f2f24ffebd0 RSI: 00000000c0306457 RDI: 000000000000000d
[  242.837847] RBP: 00007f2f24ffeab0 R08: 0000000000000000 R09: 0000000000000000
[  242.837849] R10: 00007f2f24ffecd0 R11: 0000000000000246 R12: 00007f2f25000640
[  242.837851] R13: 00000000c0306457 R14: 000000000000000d R15: 00007fff3b39c1e0
[  242.837858]  </TASK>
[  242.837865] INFO: task Xwayland:cs0:1517 blocked for more than 120 seconds.
[  242.837869]       Tainted: G           OE      6.12.0-rc2rebased-oct-24+ #4
[  242.837872] "echo 0 > /proc/sys/kernel/hung_task_timeout_secs" disables this message.
[  242.837874] task:Xwayland:cs0    state:D stack:0     pid:1517  tgid:1338  ppid:1282   flags:0x00004002
[  242.837878] Call Trace:
[  242.837880]  <TASK>
[  242.837883]  __schedule+0x3e0/0xb10
[  242.837890]  schedule+0x31/0x120
[  242.837894]  schedule_preempt_disabled+0x1c/0x30
[  242.837897]  __mutex_lock.constprop.0+0x386/0x6e0
[  242.837902]  ? srso_return_thunk+0x5/0x5f
[  242.837905]  ? __timer_delete_sync+0x81/0xe0
[  242.837911]  __mutex_lock_slowpath+0x13/0x20
[  242.837915]  mutex_lock+0x3b/0x50
[  242.837919]  amdgpu_userqueue_ensure_ev_fence+0x35/0xb0 [amdgpu]
[  242.838138]  amdgpu_userq_signal_ioctl+0x959/0xec0 [amdgpu]
[  242.838340]  ? __pfx_amdgpu_userq_signal_ioctl+0x10/0x10 [amdgpu]
[  242.838531]  drm_ioctl_kernel+0xae/0x100 [drm]
[  242.838559]  drm_ioctl+0x2a1/0x500 [drm]
[  242.838580]  ? __pfx_amdgpu_userq_signal_ioctl+0x10/0x10 [amdgpu]
[  242.838778]  ? srso_return_thunk+0x5/0x5f
[  242.838783]  ? srso_return_thunk+0x5/0x5f
[  242.838786]  ? _raw_spin_unlock_irqrestore+0x2b/0x50
[  242.838791]  amdgpu_drm_ioctl+0x4f/0x90 [amdgpu]
[  242.838967]  __x64_sys_ioctl+0x99/0xd0
[  242.838972]  x64_sys_call+0x1209/0x20d0
[  242.838975]  do_syscall_64+0x51/0x120
[  242.838979]  entry_SYSCALL_64_after_hwframe+0x76/0x7e
[  242.838982] RIP: 0033:0x7f9118b1a94f
[  242.838985] RSP: 002b:00007f910cdff760 EFLAGS: 00000246 ORIG_RAX: 0000000000000010
[  242.838989] RAX: ffffffffffffffda RBX: 00007f910cdff910 RCX: 00007f9118b1a94f
[  242.838991] RDX: 00007f910cdff910 RSI: 00000000c0306457 RDI: 000000000000000c
[  242.838993] RBP: 00007f910cdff7e0 R08: 0000000000000000 R09: 0000000000000001
[  242.838995] R10: 00007f910cdff9d4 R11: 0000000000000246 R12: 00007f910ce00640
[  242.838997] R13: 00000000c0306457 R14: 000000000000000c R15: 00007fff9dd11d10
[  242.839004]  </TASK>

v2: Addressed review comemnts from Christian.
v3/v4: Addressed review comemnts from Christian.
   - Move drm_exec drm_exec loop after userq fence create.
   - cleanup the newly created userq fence in case of error.
v5 - Addressed review comemnts from Christian.
   - Create a new amdgpu_userq_fence_alloc() function for allocation.
   - Calling dma_fence_put for cleanup procedure.
   - make amdgpu_userq_fence_create() function static.
   - drm_exec_init is called after mutex_unlock.

Cc: Alex Deucher <alexander.deucher@amd.com>
Cc: Christian König <christian.koenig@amd.com>
Cc: Shashank Sharma <shashank.sharma@amd.com>
Reviewed-by: Christian König <christian.koenig@amd.com>
Signed-off-by: Arvind Yadav <arvind.yadav@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
drivers/gpu/drm/amd/amdgpu/amdgpu_userq_fence.c
drivers/gpu/drm/amd/amdgpu/amdgpu_userq_fence.h
drivers/gpu/drm/amd/amdgpu/amdgpu_userqueue.c

index 567a5ffa776512dc045e3dfe1788892170244154..a4953d668972a151a44d72df997281c4fce7d423 100644 (file)
@@ -197,11 +197,18 @@ void amdgpu_userq_fence_driver_put(struct amdgpu_userq_fence_driver *fence_drv)
        kref_put(&fence_drv->refcount, amdgpu_userq_fence_driver_destroy);
 }
 
-int amdgpu_userq_fence_create(struct amdgpu_usermode_queue *userq,
-                             u64 seq, struct dma_fence **f)
+#ifdef CONFIG_DRM_AMDGPU_NAVI3X_USERQ
+static int amdgpu_userq_fence_alloc(struct amdgpu_userq_fence **userq_fence)
+{
+       *userq_fence = kmem_cache_alloc(amdgpu_userq_fence_slab, GFP_ATOMIC);
+       return *userq_fence ? 0 : -ENOMEM;
+}
+
+static int amdgpu_userq_fence_create(struct amdgpu_usermode_queue *userq,
+                                    struct amdgpu_userq_fence *userq_fence,
+                                    u64 seq, struct dma_fence **f)
 {
        struct amdgpu_userq_fence_driver *fence_drv;
-       struct amdgpu_userq_fence *userq_fence;
        struct dma_fence *fence;
        unsigned long flags;
 
@@ -209,10 +216,6 @@ int amdgpu_userq_fence_create(struct amdgpu_usermode_queue *userq,
        if (!fence_drv)
                return -EINVAL;
 
-       userq_fence = kmem_cache_alloc(amdgpu_userq_fence_slab, GFP_ATOMIC);
-       if (!userq_fence)
-               return -ENOMEM;
-
        spin_lock_init(&userq_fence->lock);
        INIT_LIST_HEAD(&userq_fence->link);
        fence = &userq_fence->base;
@@ -266,6 +269,7 @@ int amdgpu_userq_fence_create(struct amdgpu_usermode_queue *userq,
 
        return 0;
 }
+#endif
 
 static const char *amdgpu_userq_fence_get_driver_name(struct dma_fence *f)
 {
@@ -383,6 +387,11 @@ map_error:
        return r;
 }
 
+static void amdgpu_userq_fence_cleanup(struct dma_fence *fence)
+{
+       dma_fence_put(fence);
+}
+
 int amdgpu_userq_signal_ioctl(struct drm_device *dev, void *data,
                              struct drm_file *filp)
 {
@@ -392,6 +401,7 @@ int amdgpu_userq_signal_ioctl(struct drm_device *dev, void *data,
        struct drm_gem_object **gobj_write = NULL;
        struct drm_gem_object **gobj_read = NULL;
        struct amdgpu_usermode_queue *queue;
+       struct amdgpu_userq_fence *userq_fence;
        struct drm_syncobj **syncobj = NULL;
        u32 *bo_handles_write, num_write_bo_handles;
        u32 *syncobj_handles, num_syncobj_handles;
@@ -475,6 +485,29 @@ int amdgpu_userq_signal_ioctl(struct drm_device *dev, void *data,
                goto put_gobj_write;
        }
 
+       r = amdgpu_userq_fence_read_wptr(queue, &wptr);
+       if (r)
+               goto put_gobj_write;
+
+       r = amdgpu_userq_fence_alloc(&userq_fence);
+       if (r)
+               goto put_gobj_write;
+
+       /* We are here means UQ is active, make sure the eviction fence is valid */
+       amdgpu_userqueue_ensure_ev_fence(&fpriv->userq_mgr, &fpriv->evf_mgr);
+
+       /* Create a new fence */
+       r = amdgpu_userq_fence_create(queue, userq_fence, wptr, &fence);
+       if (r) {
+               mutex_unlock(&userq_mgr->userq_mutex);
+               kmem_cache_free(amdgpu_userq_fence_slab, userq_fence);
+               goto put_gobj_write;
+       }
+
+       dma_fence_put(queue->last_fence);
+       queue->last_fence = dma_fence_get(fence);
+       mutex_unlock(&userq_mgr->userq_mutex);
+
        drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT,
                      (num_read_bo_handles + num_write_bo_handles));
 
@@ -482,31 +515,19 @@ int amdgpu_userq_signal_ioctl(struct drm_device *dev, void *data,
        drm_exec_until_all_locked(&exec) {
                r = drm_exec_prepare_array(&exec, gobj_read, num_read_bo_handles, 1);
                drm_exec_retry_on_contention(&exec);
-               if (r)
+               if (r) {
+                       amdgpu_userq_fence_cleanup(fence);
                        goto exec_fini;
+               }
 
                r = drm_exec_prepare_array(&exec, gobj_write, num_write_bo_handles, 1);
                drm_exec_retry_on_contention(&exec);
-               if (r)
+               if (r) {
+                       amdgpu_userq_fence_cleanup(fence);
                        goto exec_fini;
+               }
        }
 
-       r = amdgpu_userq_fence_read_wptr(queue, &wptr);
-       if (r)
-               goto exec_fini;
-
-       /* Create a new fence */
-       r = amdgpu_userq_fence_create(queue, wptr, &fence);
-       if (r)
-               goto exec_fini;
-
-       /* We are here means UQ is active, make sure the eviction fence is valid */
-       amdgpu_userqueue_ensure_ev_fence(&fpriv->userq_mgr, &fpriv->evf_mgr);
-
-       dma_fence_put(queue->last_fence);
-       queue->last_fence = dma_fence_get(fence);
-       mutex_unlock(&userq_mgr->userq_mutex);
-
        for (i = 0; i < num_read_bo_handles; i++) {
                if (!gobj_read || !gobj_read[i]->resv)
                        continue;
index f1a90840ac1fd503eae6c4b8b5f5a53910000b56..f0a91cc028808a31a1713d3d504ef2c936a1804a 100644 (file)
@@ -61,8 +61,7 @@ struct amdgpu_userq_fence_driver {
 
 int amdgpu_userq_fence_slab_init(void);
 void amdgpu_userq_fence_slab_fini(void);
-int amdgpu_userq_fence_create(struct amdgpu_usermode_queue *userq,
-                             u64 seq, struct dma_fence **f);
+
 void amdgpu_userq_fence_driver_get(struct amdgpu_userq_fence_driver *fence_drv);
 void amdgpu_userq_fence_driver_put(struct amdgpu_userq_fence_driver *fence_drv);
 int amdgpu_userq_fence_driver_alloc(struct amdgpu_device *adev,
index 2eac83ba8bdf7628d0ce878e83002cc355ff34e4..f1d4e29772a53d34daf4fb1f5242438e3359f5c5 100644 (file)
@@ -455,7 +455,7 @@ amdgpu_userqueue_validate_bos(struct amdgpu_userq_mgr *uq_mgr)
        bool clear, unlock;
        int ret = 0;
 
-       drm_exec_init(&exec, DRM_EXEC_IGNORE_DUPLICATES | DRM_EXEC_INTERRUPTIBLE_WAIT, 0);
+       drm_exec_init(&exec, DRM_EXEC_IGNORE_DUPLICATES, 0);
        drm_exec_until_all_locked(&exec) {
                ret = amdgpu_vm_lock_pd(vm, &exec, 2);
                drm_exec_retry_on_contention(&exec);