drm/amdgpu: move umc ras irq functions to umc block
authorTao Zhou <tao.zhou1@amd.com>
Thu, 12 Sep 2019 03:11:25 +0000 (11:11 +0800)
committerAlex Deucher <alexander.deucher@amd.com>
Thu, 3 Oct 2019 14:11:01 +0000 (09:11 -0500)
move umc ras irq functions from gmc v9 to generic umc block, these
functions are relevant to umc and they can be shared among all
generations of umc

Signed-off-by: Tao Zhou <tao.zhou1@amd.com>
Reviewed-by: Guchun Chen <guchun.chen@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c
drivers/gpu/drm/amd/amdgpu/amdgpu_umc.h
drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c

index c5d8b08af7314ac5a3355b0fd9be99cd8fb89f08..d11b4d38ca1e73f018dfdc82bf18b185b3f3af3e 100644 (file)
@@ -21,7 +21,6 @@
  *
  */
 
-#include "amdgpu.h"
 #include "amdgpu_ras.h"
 
 int amdgpu_umc_ras_late_init(struct amdgpu_device *adev, void *ras_ih_info)
@@ -75,3 +74,67 @@ free:
        adev->gmc.umc_ras_if = NULL;
        return r;
 }
+
+int amdgpu_umc_process_ras_data_cb(struct amdgpu_device *adev,
+               void *ras_error_status,
+               struct amdgpu_iv_entry *entry)
+{
+       struct ras_err_data *err_data = (struct ras_err_data *)ras_error_status;
+
+       if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__GFX))
+               return AMDGPU_RAS_SUCCESS;
+
+       kgd2kfd_set_sram_ecc_flag(adev->kfd.dev);
+       if (adev->umc.funcs &&
+           adev->umc.funcs->query_ras_error_count)
+           adev->umc.funcs->query_ras_error_count(adev, ras_error_status);
+
+       if (adev->umc.funcs &&
+           adev->umc.funcs->query_ras_error_address &&
+           adev->umc.max_ras_err_cnt_per_query) {
+               err_data->err_addr =
+                       kcalloc(adev->umc.max_ras_err_cnt_per_query,
+                               sizeof(struct eeprom_table_record), GFP_KERNEL);
+               /* still call query_ras_error_address to clear error status
+                * even NOMEM error is encountered
+                */
+               if(!err_data->err_addr)
+                       DRM_WARN("Failed to alloc memory for umc error address record!\n");
+
+               /* umc query_ras_error_address is also responsible for clearing
+                * error status
+                */
+               adev->umc.funcs->query_ras_error_address(adev, ras_error_status);
+       }
+
+       /* only uncorrectable error needs gpu reset */
+       if (err_data->ue_count) {
+               if (err_data->err_addr_cnt &&
+                   amdgpu_ras_add_bad_pages(adev, err_data->err_addr,
+                                               err_data->err_addr_cnt))
+                       DRM_WARN("Failed to add ras bad page!\n");
+
+               amdgpu_ras_reset_gpu(adev, 0);
+       }
+
+       kfree(err_data->err_addr);
+       return AMDGPU_RAS_SUCCESS;
+}
+
+int amdgpu_umc_process_ecc_irq(struct amdgpu_device *adev,
+               struct amdgpu_irq_src *source,
+               struct amdgpu_iv_entry *entry)
+{
+       struct ras_common_if *ras_if = adev->gmc.umc_ras_if;
+       struct ras_dispatch_if ih_data = {
+               .entry = entry,
+       };
+
+       if (!ras_if)
+               return 0;
+
+       ih_data.head = *ras_if;
+
+       amdgpu_ras_interrupt_dispatch(adev, &ih_data);
+       return 0;
+}
index c907b14c9be8553f18d66390ec2beeb758ccf07b..9ac1c2f792998e6a90ea92e9e972167f2b97ec0a 100644 (file)
@@ -82,4 +82,10 @@ struct amdgpu_umc {
 };
 
 int amdgpu_umc_ras_late_init(struct amdgpu_device *adev, void *ras_ih_info);
+int amdgpu_umc_process_ras_data_cb(struct amdgpu_device *adev,
+               void *ras_error_status,
+               struct amdgpu_iv_entry *entry);
+int amdgpu_umc_process_ecc_irq(struct amdgpu_device *adev,
+               struct amdgpu_irq_src *source,
+               struct amdgpu_iv_entry *entry);
 #endif
index 8910a18547b067d59aa350e0e051b47e8f6d8732..37f180ce4b7769147bd63f2e548b324d30f01a75 100644 (file)
@@ -245,70 +245,6 @@ static int gmc_v9_0_ecc_interrupt_state(struct amdgpu_device *adev,
        return 0;
 }
 
-static int gmc_v9_0_process_ras_data_cb(struct amdgpu_device *adev,
-               void *ras_error_status,
-               struct amdgpu_iv_entry *entry)
-{
-       struct ras_err_data *err_data = (struct ras_err_data *)ras_error_status;
-
-       if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__GFX))
-               return AMDGPU_RAS_SUCCESS;
-
-       kgd2kfd_set_sram_ecc_flag(adev->kfd.dev);
-       if (adev->umc.funcs &&
-           adev->umc.funcs->query_ras_error_count)
-           adev->umc.funcs->query_ras_error_count(adev, ras_error_status);
-
-       if (adev->umc.funcs &&
-           adev->umc.funcs->query_ras_error_address &&
-           adev->umc.max_ras_err_cnt_per_query) {
-               err_data->err_addr =
-                       kcalloc(adev->umc.max_ras_err_cnt_per_query,
-                               sizeof(struct eeprom_table_record), GFP_KERNEL);
-               /* still call query_ras_error_address to clear error status
-                * even NOMEM error is encountered
-                */
-               if(!err_data->err_addr)
-                       DRM_WARN("Failed to alloc memory for umc error address record!\n");
-
-               /* umc query_ras_error_address is also responsible for clearing
-                * error status
-                */
-               adev->umc.funcs->query_ras_error_address(adev, err_data);
-       }
-
-       /* only uncorrectable error needs gpu reset */
-       if (err_data->ue_count) {
-               if (err_data->err_addr_cnt &&
-                   amdgpu_ras_add_bad_pages(adev, err_data->err_addr,
-                                               err_data->err_addr_cnt))
-                       DRM_WARN("Failed to add ras bad page!\n");
-
-               amdgpu_ras_reset_gpu(adev, 0);
-       }
-
-       kfree(err_data->err_addr);
-       return AMDGPU_RAS_SUCCESS;
-}
-
-static int gmc_v9_0_process_ecc_irq(struct amdgpu_device *adev,
-               struct amdgpu_irq_src *source,
-               struct amdgpu_iv_entry *entry)
-{
-       struct ras_common_if *ras_if = adev->gmc.umc_ras_if;
-       struct ras_dispatch_if ih_data = {
-               .entry = entry,
-       };
-
-       if (!ras_if)
-               return 0;
-
-       ih_data.head = *ras_if;
-
-       amdgpu_ras_interrupt_dispatch(adev, &ih_data);
-       return 0;
-}
-
 static int gmc_v9_0_vm_fault_interrupt_state(struct amdgpu_device *adev,
                                        struct amdgpu_irq_src *src,
                                        unsigned type,
@@ -449,7 +385,7 @@ static const struct amdgpu_irq_src_funcs gmc_v9_0_irq_funcs = {
 
 static const struct amdgpu_irq_src_funcs gmc_v9_0_ecc_funcs = {
        .set = gmc_v9_0_ecc_interrupt_state,
-       .process = gmc_v9_0_process_ecc_irq,
+       .process = amdgpu_umc_process_ecc_irq,
 };
 
 static void gmc_v9_0_set_irq_funcs(struct amdgpu_device *adev)
@@ -805,7 +741,7 @@ static int gmc_v9_0_ecc_late_init(void *handle)
        int r;
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
        struct ras_ih_if umc_ih_info = {
-               .cb = gmc_v9_0_process_ras_data_cb,
+               .cb = amdgpu_umc_process_ras_data_cb,
        };
 
        if (adev->umc.funcs && adev->umc.funcs->ras_late_init) {