drm/amdgpu/gfx_9.4.3: wait for reset done before remap
authorJiadong Zhu <Jiadong.Zhu@amd.com>
Fri, 28 Jun 2024 03:48:22 +0000 (11:48 +0800)
committerAlex Deucher <alexander.deucher@amd.com>
Fri, 16 Aug 2024 18:18:22 +0000 (14:18 -0400)
There is a racing condition that cp firmware modifies
MQD in reset sequence after driver updates it for
remapping. We have to wait till CP_HQD_ACTIVE becoming
false then remap the queue.

v2: fix KIQ locking (Alex)
v3: fix KIQ locking harder

Acked-by: Vitaly Prosyak <vitaly.prosyak@amd.com>
Signed-off-by: Jiadong Zhu <Jiadong.Zhu@amd.com>
Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c

index 44c6e2d44722010d55e826532b781d6b1237c4c7..9a740020243d437bc91e1d6b94d61677e31c8dc2 100644 (file)
@@ -3433,7 +3433,7 @@ static int gfx_v9_4_3_reset_kcq(struct amdgpu_ring *ring,
        struct amdgpu_kiq *kiq = &adev->gfx.kiq[ring->xcc_id];
        struct amdgpu_ring *kiq_ring = &kiq->ring;
        unsigned long flags;
-       int r;
+       int r, i;
 
        if (!kiq->pmf || !kiq->pmf->kiq_unmap_queues)
                return -EINVAL;
@@ -3455,9 +3455,28 @@ static int gfx_v9_4_3_reset_kcq(struct amdgpu_ring *ring,
        if (r)
                return r;
 
+       /* make sure dequeue is complete*/
+       gfx_v9_4_3_xcc_set_safe_mode(adev, ring->xcc_id);
+       mutex_lock(&adev->srbm_mutex);
+       soc15_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0, GET_INST(GC, ring->xcc_id));
+       for (i = 0; i < adev->usec_timeout; i++) {
+               if (!(RREG32_SOC15(GC, 0, regCP_HQD_ACTIVE) & 1))
+                       break;
+               udelay(1);
+       }
+       if (i >= adev->usec_timeout)
+               r = -ETIMEDOUT;
+       soc15_grbm_select(adev, 0, 0, 0, 0, GET_INST(GC, ring->xcc_id));
+       mutex_unlock(&adev->srbm_mutex);
+       gfx_v9_4_3_xcc_unset_safe_mode(adev, ring->xcc_id);
+       if (r) {
+               dev_err(adev->dev, "fail to wait on hqd deactive\n");
+               return r;
+       }
+
        r = amdgpu_bo_reserve(ring->mqd_obj, false);
        if (unlikely(r != 0)){
-               DRM_ERROR("fail to resv mqd_obj\n");
+               dev_err(adev->dev, "fail to resv mqd_obj\n");
                return r;
        }
        r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&ring->mqd_ptr);
@@ -3467,15 +3486,23 @@ static int gfx_v9_4_3_reset_kcq(struct amdgpu_ring *ring,
                ring->mqd_ptr = NULL;
        }
        amdgpu_bo_unreserve(ring->mqd_obj);
-       if (r){
-               DRM_ERROR("fail to unresv mqd_obj\n");
+       if (r) {
+               dev_err(adev->dev, "fail to unresv mqd_obj\n");
                return r;
        }
+       spin_lock_irqsave(&kiq->ring_lock, flags);
        r = amdgpu_ring_alloc(kiq_ring, kiq->pmf->map_queues_size);
+       if (r) {
+               spin_unlock_irqrestore(&kiq->ring_lock, flags);
+               return -ENOMEM;
+       }
        kiq->pmf->kiq_map_queues(kiq_ring, ring);
+       amdgpu_ring_commit(kiq_ring);
+       spin_unlock_irqrestore(&kiq->ring_lock, flags);
+
        r = amdgpu_ring_test_ring(kiq_ring);
-       if (r){
-               DRM_ERROR("fail to remap queue\n");
+       if (r) {
+               dev_err(adev->dev, "fail to remap queue\n");
                return r;
        }
        return amdgpu_ring_test_ring(ring);