drm/amdgpu: Register aqua vanjaram vcn poison irq
authorStanley.Yang <Stanley.Yang@amd.com>
Tue, 13 May 2025 11:46:08 +0000 (19:46 +0800)
committerAlex Deucher <alexander.deucher@amd.com>
Thu, 22 May 2025 16:01:24 +0000 (12:01 -0400)
Register aqua vanjaram vcn poison irq, add vcn poison handle.

Signed-off-by: Stanley.Yang <Stanley.Yang@amd.com>
Reviewed-by: Hawking Zhang <Hawking.Zhang@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.c
drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.h

index 712e1fba33ce6c7b28621889fbdae159aef5a127..764b3ff09f1ee9a6f0196dd028ffe33b65b1538f 100644 (file)
@@ -169,6 +169,10 @@ static int vcn_v4_0_3_sw_init(struct amdgpu_ip_block *ip_block)
        if (r)
                return r;
 
+       /* VCN POISON TRAP */
+       r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VCN,
+               VCN_4_0__SRCID_UVD_POISON, &adev->vcn.inst->ras_poison_irq);
+
        for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
 
                r = amdgpu_vcn_sw_init(adev, i);
@@ -387,6 +391,9 @@ static int vcn_v4_0_3_hw_fini(struct amdgpu_ip_block *ip_block)
                        vinst->set_pg_state(vinst, AMD_PG_STATE_GATE);
        }
 
+       if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__VCN))
+               amdgpu_irq_put(adev, &adev->vcn.inst->ras_poison_irq, 0);
+
        return 0;
 }
 
@@ -1814,11 +1821,24 @@ static int vcn_v4_0_3_process_interrupt(struct amdgpu_device *adev,
        return 0;
 }
 
+static int vcn_v4_0_3_set_ras_interrupt_state(struct amdgpu_device *adev,
+                                       struct amdgpu_irq_src *source,
+                                       unsigned int type,
+                                       enum amdgpu_interrupt_state state)
+{
+       return 0;
+}
+
 static const struct amdgpu_irq_src_funcs vcn_v4_0_3_irq_funcs = {
        .set = vcn_v4_0_3_set_interrupt_state,
        .process = vcn_v4_0_3_process_interrupt,
 };
 
+static const struct amdgpu_irq_src_funcs vcn_v4_0_3_ras_irq_funcs = {
+       .set = vcn_v4_0_3_set_ras_interrupt_state,
+       .process = amdgpu_vcn_process_poison_irq,
+};
+
 /**
  * vcn_v4_0_3_set_irq_funcs - set VCN block interrupt irq functions
  *
@@ -1834,6 +1854,9 @@ static void vcn_v4_0_3_set_irq_funcs(struct amdgpu_device *adev)
                adev->vcn.inst->irq.num_types++;
        }
        adev->vcn.inst->irq.funcs = &vcn_v4_0_3_irq_funcs;
+
+       adev->vcn.inst->ras_poison_irq.num_types = 1;
+       adev->vcn.inst->ras_poison_irq.funcs = &vcn_v4_0_3_ras_irq_funcs;
 }
 
 static void vcn_v4_0_3_print_ip_state(struct amdgpu_ip_block *ip_block, struct drm_printer *p)
@@ -1981,9 +2004,44 @@ static void vcn_v4_0_3_reset_ras_error_count(struct amdgpu_device *adev)
                vcn_v4_0_3_inst_reset_ras_error_count(adev, i);
 }
 
+static uint32_t vcn_v4_0_3_query_poison_by_instance(struct amdgpu_device *adev,
+                       uint32_t instance, uint32_t sub_block)
+{
+       uint32_t poison_stat = 0, reg_value = 0;
+
+       switch (sub_block) {
+       case AMDGPU_VCN_V4_0_3_VCPU_VCODEC:
+               reg_value = RREG32_SOC15(VCN, instance, regUVD_RAS_VCPU_VCODEC_STATUS);
+               poison_stat = REG_GET_FIELD(reg_value, UVD_RAS_VCPU_VCODEC_STATUS, POISONED_PF);
+               break;
+       default:
+               break;
+       }
+
+       if (poison_stat)
+               dev_info(adev->dev, "Poison detected in VCN%d, sub_block%d\n",
+                       instance, sub_block);
+
+       return poison_stat;
+}
+
+static bool vcn_v4_0_3_query_poison_status(struct amdgpu_device *adev)
+{
+       uint32_t inst, sub;
+       uint32_t poison_stat = 0;
+
+       for (inst = 0; inst < adev->vcn.num_vcn_inst; inst++)
+               for (sub = 0; sub < AMDGPU_VCN_V4_0_3_MAX_SUB_BLOCK; sub++)
+                       poison_stat +=
+                       vcn_v4_0_3_query_poison_by_instance(adev, inst, sub);
+
+       return !!poison_stat;
+}
+
 static const struct amdgpu_ras_block_hw_ops vcn_v4_0_3_ras_hw_ops = {
        .query_ras_error_count = vcn_v4_0_3_query_ras_error_count,
        .reset_ras_error_count = vcn_v4_0_3_reset_ras_error_count,
+       .query_poison_status = vcn_v4_0_3_query_poison_status,
 };
 
 static int vcn_v4_0_3_aca_bank_parser(struct aca_handle *handle, struct aca_bank *bank,
@@ -2059,6 +2117,13 @@ static int vcn_v4_0_3_ras_late_init(struct amdgpu_device *adev, struct ras_commo
        if (r)
                return r;
 
+       if (amdgpu_ras_is_supported(adev, ras_block->block) &&
+               adev->vcn.inst->ras_poison_irq.funcs) {
+               r = amdgpu_irq_get(adev, &adev->vcn.inst->ras_poison_irq, 0);
+               if (r)
+                       goto late_fini;
+       }
+
        r = amdgpu_ras_bind_aca(adev, AMDGPU_RAS_BLOCK__VCN,
                                &vcn_v4_0_3_aca_info, NULL);
        if (r)
index 03572a1d0c9cb751b631c81f8587fc04cdf3db5f..aeab89853a923aedeb04c235108310f2cbb9933d 100644 (file)
 #ifndef __VCN_V4_0_3_H__
 #define __VCN_V4_0_3_H__
 
+enum amdgpu_vcn_v4_0_3_sub_block {
+       AMDGPU_VCN_V4_0_3_VCPU_VCODEC = 0,
+
+       AMDGPU_VCN_V4_0_3_MAX_SUB_BLOCK,
+};
+
 extern const struct amdgpu_ip_block_version vcn_v4_0_3_ip_block;
 
 void vcn_v4_0_3_enc_ring_emit_reg_wait(struct amdgpu_ring *ring, uint32_t reg,