drm/amdgpu: replace reset_error_count with amdgpu_ras_reset_error_count
authorTao Zhou <tao.zhou1@amd.com>
Wed, 18 Oct 2023 09:57:25 +0000 (17:57 +0800)
committerAlex Deucher <alexander.deucher@amd.com>
Fri, 20 Oct 2023 19:11:28 +0000 (15:11 -0400)
Simplify the code.

Signed-off-by: Tao Zhou <tao.zhou1@amd.com>
Reviewed-by: Hawking Zhang <Hawking.Zhang@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c
drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c

index 0f98f720d9ca6fbbc3e5bab5e01ac8fd0f7f2490..2d385476939b61531a8371d5100c40b571903253 100644 (file)
@@ -3578,9 +3578,7 @@ static void amdgpu_device_xgmi_reset_func(struct work_struct *__work)
                if (adev->asic_reset_res)
                        goto fail;
 
-               if (adev->mmhub.ras && adev->mmhub.ras->ras_block.hw_ops &&
-                   adev->mmhub.ras->ras_block.hw_ops->reset_ras_error_count)
-                       adev->mmhub.ras->ras_block.hw_ops->reset_ras_error_count(adev);
+               amdgpu_ras_reset_error_count(adev, AMDGPU_RAS_BLOCK__MMHUB);
        } else {
 
                task_barrier_full(&hive->tb);
@@ -5201,9 +5199,7 @@ int amdgpu_do_asic_reset(struct list_head *device_list_handle,
 
        if (!r && amdgpu_ras_intr_triggered()) {
                list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
-                       if (tmp_adev->mmhub.ras && tmp_adev->mmhub.ras->ras_block.hw_ops &&
-                           tmp_adev->mmhub.ras->ras_block.hw_ops->reset_ras_error_count)
-                               tmp_adev->mmhub.ras->ras_block.hw_ops->reset_ras_error_count(tmp_adev);
+                       amdgpu_ras_reset_error_count(tmp_adev, AMDGPU_RAS_BLOCK__MMHUB);
                }
 
                amdgpu_ras_intr_cleared();
index 19aa6b7b16fba44ebdde782c5f196fe01b7b06e1..9d5d742ee9d366b0a9c068ae7ca842717bbf1e48 100644 (file)
@@ -908,7 +908,7 @@ static int amdgpu_xgmi_ras_late_init(struct amdgpu_device *adev, struct ras_comm
            adev->gmc.xgmi.num_physical_nodes == 0)
                return 0;
 
-       adev->gmc.xgmi.ras->ras_block.hw_ops->reset_ras_error_count(adev);
+       amdgpu_ras_reset_error_count(adev, AMDGPU_RAS_BLOCK__XGMI_WAFL);
 
        return amdgpu_ras_block_late_init(adev, ras_block);
 }
@@ -1075,7 +1075,7 @@ static void amdgpu_xgmi_query_ras_error_count(struct amdgpu_device *adev,
                break;
        }
 
-       adev->gmc.xgmi.ras->ras_block.hw_ops->reset_ras_error_count(adev);
+       amdgpu_ras_reset_error_count(adev, AMDGPU_RAS_BLOCK__XGMI_WAFL);
 
        err_data->ue_count += ue_cnt;
        err_data->ce_count += ce_cnt;
index 3a1050344b59fb9d8eda7de90173ca76c9704cad..5fed01e34928253a6b58a6ca5785900d41e710cf 100644 (file)
@@ -1587,13 +1587,8 @@ static int gmc_v9_0_late_init(void *handle)
        }
 
        if (!amdgpu_persistent_edc_harvesting_supported(adev)) {
-               if (adev->mmhub.ras && adev->mmhub.ras->ras_block.hw_ops &&
-                   adev->mmhub.ras->ras_block.hw_ops->reset_ras_error_count)
-                       adev->mmhub.ras->ras_block.hw_ops->reset_ras_error_count(adev);
-
-               if (adev->hdp.ras && adev->hdp.ras->ras_block.hw_ops &&
-                   adev->hdp.ras->ras_block.hw_ops->reset_ras_error_count)
-                       adev->hdp.ras->ras_block.hw_ops->reset_ras_error_count(adev);
+               amdgpu_ras_reset_error_count(adev, AMDGPU_RAS_BLOCK__MMHUB);
+               amdgpu_ras_reset_error_count(adev, AMDGPU_RAS_BLOCK__HDP);
        }
 
        r = amdgpu_gmc_ras_late_init(adev);
index dff66e1ae7eacc8afccc3c0b29fdeded10521d04..683d51ae4bf10c17c8aeb6e720f06a89d036fc8a 100644 (file)
@@ -1749,11 +1749,8 @@ static int sdma_v4_0_late_init(void *handle)
 
        sdma_v4_0_setup_ulv(adev);
 
-       if (!amdgpu_persistent_edc_harvesting_supported(adev)) {
-               if (adev->sdma.ras && adev->sdma.ras->ras_block.hw_ops &&
-                   adev->sdma.ras->ras_block.hw_ops->reset_ras_error_count)
-                       adev->sdma.ras->ras_block.hw_ops->reset_ras_error_count(adev);
-       }
+       if (!amdgpu_persistent_edc_harvesting_supported(adev))
+               amdgpu_ras_reset_error_count(adev, AMDGPU_RAS_BLOCK__SDMA);
 
        return 0;
 }
index 31aa245552d6f1ad86cc8ea840635df619ba8eb0..c46bc6aa4f48f8d61a7ce28b1fd3c89a55c761cf 100644 (file)
@@ -1276,11 +1276,8 @@ static int sdma_v4_4_2_late_init(void *handle)
                .cb = sdma_v4_4_2_process_ras_data_cb,
        };
 #endif
-       if (!amdgpu_persistent_edc_harvesting_supported(adev)) {
-               if (adev->sdma.ras && adev->sdma.ras->ras_block.hw_ops &&
-                   adev->sdma.ras->ras_block.hw_ops->reset_ras_error_count)
-                       adev->sdma.ras->ras_block.hw_ops->reset_ras_error_count(adev);
-       }
+       if (!amdgpu_persistent_edc_harvesting_supported(adev))
+               amdgpu_ras_reset_error_count(adev, AMDGPU_RAS_BLOCK__SDMA);
 
        return 0;
 }