drm/amdgpu/gfx9.4.3: implement reset_hw_queue for gfx9.4.3
authorJiadong Zhu <Jiadong.Zhu@amd.com>
Thu, 4 Jul 2024 06:51:58 +0000 (14:51 +0800)
committerAlex Deucher <alexander.deucher@amd.com>
Fri, 16 Aug 2024 18:18:30 +0000 (14:18 -0400)
Using mmio to do queue reset. Enter safe mode
before writing mmio registers.

v2: set register instance offset according to xcc id.

Acked-by: Vitaly Prosyak <vitaly.prosyak@amd.com>
Signed-off-by: Jiadong Zhu <Jiadong.Zhu@amd.com>
Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c

index 9a740020243d437bc91e1d6b94d61677e31c8dc2..18cb6d45d54f29e25634bedd654611cdaa5ea3bb 100644 (file)
@@ -200,6 +200,8 @@ static void gfx_v9_4_3_set_gds_init(struct amdgpu_device *adev);
 static void gfx_v9_4_3_set_rlc_funcs(struct amdgpu_device *adev);
 static int gfx_v9_4_3_get_cu_info(struct amdgpu_device *adev,
                                struct amdgpu_cu_info *cu_info);
+static void gfx_v9_4_3_xcc_set_safe_mode(struct amdgpu_device *adev, int xcc_id);
+static void gfx_v9_4_3_xcc_unset_safe_mode(struct amdgpu_device *adev, int xcc_id);
 
 static void gfx_v9_4_3_kiq_set_resources(struct amdgpu_ring *kiq_ring,
                                uint64_t queue_mask)
@@ -311,12 +313,46 @@ static void gfx_v9_4_3_kiq_invalidate_tlbs(struct amdgpu_ring *kiq_ring,
                        PACKET3_INVALIDATE_TLBS_FLUSH_TYPE(flush_type));
 }
 
+static void gfx_v9_4_3_kiq_reset_hw_queue(struct amdgpu_ring *kiq_ring, uint32_t queue_type,
+                                         uint32_t me_id, uint32_t pipe_id, uint32_t queue_id,
+                                         uint32_t xcc_id, uint32_t vmid)
+{
+       struct amdgpu_device *adev = kiq_ring->adev;
+       unsigned i;
+
+       /* enter save mode */
+       gfx_v9_4_3_xcc_set_safe_mode(adev, xcc_id);
+       mutex_lock(&adev->srbm_mutex);
+       soc15_grbm_select(adev, me_id, pipe_id, queue_id, 0, xcc_id);
+
+       if (queue_type == AMDGPU_RING_TYPE_COMPUTE) {
+               WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_HQD_DEQUEUE_REQUEST, 0x2);
+               WREG32_SOC15(GC, GET_INST(GC, xcc_id), regSPI_COMPUTE_QUEUE_RESET, 0x1);
+               /* wait till dequeue take effects */
+               for (i = 0; i < adev->usec_timeout; i++) {
+                       if (!(RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_HQD_ACTIVE) & 1))
+                               break;
+                       udelay(1);
+               }
+               if (i >= adev->usec_timeout)
+                       dev_err(adev->dev, "fail to wait on hqd deactive\n");
+       } else {
+               dev_err(adev->dev, "reset queue_type(%d) not supported\n\n", queue_type);
+       }
+
+       soc15_grbm_select(adev, 0, 0, 0, 0, 0);
+       mutex_unlock(&adev->srbm_mutex);
+       /* exit safe mode */
+       gfx_v9_4_3_xcc_unset_safe_mode(adev, xcc_id);
+}
+
 static const struct kiq_pm4_funcs gfx_v9_4_3_kiq_pm4_funcs = {
        .kiq_set_resources = gfx_v9_4_3_kiq_set_resources,
        .kiq_map_queues = gfx_v9_4_3_kiq_map_queues,
        .kiq_unmap_queues = gfx_v9_4_3_kiq_unmap_queues,
        .kiq_query_status = gfx_v9_4_3_kiq_query_status,
        .kiq_invalidate_tlbs = gfx_v9_4_3_kiq_invalidate_tlbs,
+       .kiq_reset_hw_queue = gfx_v9_4_3_kiq_reset_hw_queue,
        .set_resources_size = 8,
        .map_queues_size = 7,
        .unmap_queues_size = 6,