drm/amdgpu/vcn4.0.3: convert internal functions to use vcn_inst
authorAlex Deucher <alexander.deucher@amd.com>
Fri, 22 Nov 2024 22:01:49 +0000 (17:01 -0500)
committerAlex Deucher <alexander.deucher@amd.com>
Thu, 27 Feb 2025 20:52:29 +0000 (15:52 -0500)
Pass the vcn instance structure to these functions rather
than adev and the instance number.

TODO: clean up the function internals to use the vinst state
directly rather than accessing it indirectly via adev->vcn.inst[].

Reviewed-by: Boyuan Zhang <Boyuan.Zhang@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.c

index a0b848ad097bd2f45269eec0be383105756bff23..9e1c25210f880bc384f978143cbd8de7e5046540 100644 (file)
@@ -431,13 +431,14 @@ static int vcn_v4_0_3_resume(struct amdgpu_ip_block *ip_block)
 /**
  * vcn_v4_0_3_mc_resume - memory controller programming
  *
- * @adev: amdgpu_device pointer
- * @inst_idx: instance number
+ * @vinst: VCN instance
  *
  * Let the VCN memory controller know it's offsets
  */
-static void vcn_v4_0_3_mc_resume(struct amdgpu_device *adev, int inst_idx)
+static void vcn_v4_0_3_mc_resume(struct amdgpu_vcn_inst *vinst)
 {
+       struct amdgpu_device *adev = vinst->adev;
+       int inst_idx = vinst->inst;
        uint32_t offset, size, vcn_inst;
        const struct common_firmware_header *hdr;
 
@@ -505,14 +506,16 @@ static void vcn_v4_0_3_mc_resume(struct amdgpu_device *adev, int inst_idx)
 /**
  * vcn_v4_0_3_mc_resume_dpg_mode - memory controller programming for dpg mode
  *
- * @adev: amdgpu_device pointer
- * @inst_idx: instance number index
+ * @vinst: VCN instance
  * @indirect: indirectly write sram
  *
  * Let the VCN memory controller know it's offsets with dpg mode
  */
-static void vcn_v4_0_3_mc_resume_dpg_mode(struct amdgpu_device *adev, int inst_idx, bool indirect)
+static void vcn_v4_0_3_mc_resume_dpg_mode(struct amdgpu_vcn_inst *vinst,
+                                         bool indirect)
 {
+       struct amdgpu_device *adev = vinst->adev;
+       int inst_idx = vinst->inst;
        uint32_t offset, size;
        const struct common_firmware_header *hdr;
 
@@ -619,13 +622,14 @@ static void vcn_v4_0_3_mc_resume_dpg_mode(struct amdgpu_device *adev, int inst_i
 /**
  * vcn_v4_0_3_disable_clock_gating - disable VCN clock gating
  *
- * @adev: amdgpu_device pointer
- * @inst_idx: instance number
+ * @vinst: VCN instance
  *
  * Disable clock gating for VCN block
  */
-static void vcn_v4_0_3_disable_clock_gating(struct amdgpu_device *adev, int inst_idx)
+static void vcn_v4_0_3_disable_clock_gating(struct amdgpu_vcn_inst *vinst)
 {
+       struct amdgpu_device *adev = vinst->adev;
+       int inst_idx = vinst->inst;
        uint32_t data;
        int vcn_inst;
 
@@ -712,16 +716,18 @@ static void vcn_v4_0_3_disable_clock_gating(struct amdgpu_device *adev, int inst
 /**
  * vcn_v4_0_3_disable_clock_gating_dpg_mode - disable VCN clock gating dpg mode
  *
- * @adev: amdgpu_device pointer
+ * @vinst: VCN instance
  * @sram_sel: sram select
- * @inst_idx: instance number index
  * @indirect: indirectly write sram
  *
  * Disable clock gating for VCN block with dpg mode
  */
-static void vcn_v4_0_3_disable_clock_gating_dpg_mode(struct amdgpu_device *adev, uint8_t sram_sel,
-                               int inst_idx, uint8_t indirect)
+static void vcn_v4_0_3_disable_clock_gating_dpg_mode(struct amdgpu_vcn_inst *vinst,
+                                                    uint8_t sram_sel,
+                                                    uint8_t indirect)
 {
+       struct amdgpu_device *adev = vinst->adev;
+       int inst_idx = vinst->inst;
        uint32_t reg_data = 0;
 
        if (adev->cg_flags & AMD_CG_SUPPORT_VCN_MGCG)
@@ -763,13 +769,14 @@ static void vcn_v4_0_3_disable_clock_gating_dpg_mode(struct amdgpu_device *adev,
 /**
  * vcn_v4_0_3_enable_clock_gating - enable VCN clock gating
  *
- * @adev: amdgpu_device pointer
- * @inst_idx: instance number
+ * @vinst: VCN instance
  *
  * Enable clock gating for VCN block
  */
-static void vcn_v4_0_3_enable_clock_gating(struct amdgpu_device *adev, int inst_idx)
+static void vcn_v4_0_3_enable_clock_gating(struct amdgpu_vcn_inst *vinst)
 {
+       struct amdgpu_device *adev = vinst->adev;
+       int inst_idx = vinst->inst;
        uint32_t data;
        int vcn_inst;
 
@@ -814,14 +821,16 @@ static void vcn_v4_0_3_enable_clock_gating(struct amdgpu_device *adev, int inst_
 /**
  * vcn_v4_0_3_start_dpg_mode - VCN start with dpg mode
  *
- * @adev: amdgpu_device pointer
- * @inst_idx: instance number index
+ * @vinst: VCN instance
  * @indirect: indirectly write sram
  *
  * Start VCN block with dpg mode
  */
-static int vcn_v4_0_3_start_dpg_mode(struct amdgpu_device *adev, int inst_idx, bool indirect)
+static int vcn_v4_0_3_start_dpg_mode(struct amdgpu_vcn_inst *vinst,
+                                    bool indirect)
 {
+       struct amdgpu_device *adev = vinst->adev;
+       int inst_idx = vinst->inst;
        volatile struct amdgpu_vcn4_fw_shared *fw_shared =
                                                adev->vcn.inst[inst_idx].fw_shared.cpu_addr;
        struct amdgpu_ring *ring;
@@ -849,7 +858,7 @@ static int vcn_v4_0_3_start_dpg_mode(struct amdgpu_device *adev, int inst_idx, b
        }
 
        /* enable clock gating */
-       vcn_v4_0_3_disable_clock_gating_dpg_mode(adev, 0, inst_idx, indirect);
+       vcn_v4_0_3_disable_clock_gating_dpg_mode(vinst, 0, indirect);
 
        /* enable VCPU clock */
        tmp = (0xFF << UVD_VCPU_CNTL__PRB_TIMEOUT_VAL__SHIFT);
@@ -899,7 +908,7 @@ static int vcn_v4_0_3_start_dpg_mode(struct amdgpu_device *adev, int inst_idx, b
                 (0x1 << UVD_MPC_SET_MUX__SET_1__SHIFT) |
                 (0x2 << UVD_MPC_SET_MUX__SET_2__SHIFT)), 0, indirect);
 
-       vcn_v4_0_3_mc_resume_dpg_mode(adev, inst_idx, indirect);
+       vcn_v4_0_3_mc_resume_dpg_mode(vinst, indirect);
 
        tmp = (0xFF << UVD_VCPU_CNTL__PRB_TIMEOUT_VAL__SHIFT);
        tmp |= UVD_VCPU_CNTL__CLK_EN_MASK;
@@ -1146,20 +1155,21 @@ static int vcn_v4_0_3_start_sriov(struct amdgpu_device *adev)
 /**
  * vcn_v4_0_3_start - VCN start
  *
- * @adev: amdgpu_device pointer
- * @i: instance to start
+ * @vinst: VCN instance
  *
  * Start VCN block
  */
-static int vcn_v4_0_3_start(struct amdgpu_device *adev, int i)
+static int vcn_v4_0_3_start(struct amdgpu_vcn_inst *vinst)
 {
+       struct amdgpu_device *adev = vinst->adev;
+       int i = vinst->inst;
        volatile struct amdgpu_vcn4_fw_shared *fw_shared;
        struct amdgpu_ring *ring;
        int j, k, r, vcn_inst;
        uint32_t tmp;
 
        if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)
-               return vcn_v4_0_3_start_dpg_mode(adev, i, adev->vcn.inst[i].indirect_sram);
+               return vcn_v4_0_3_start_dpg_mode(vinst, adev->vcn.inst[i].indirect_sram);
 
        vcn_inst = GET_INST(VCN, i);
        /* set VCN status busy */
@@ -1168,7 +1178,7 @@ static int vcn_v4_0_3_start(struct amdgpu_device *adev, int i)
        WREG32_SOC15(VCN, vcn_inst, regUVD_STATUS, tmp);
 
        /* SW clock gating */
-       vcn_v4_0_3_disable_clock_gating(adev, i);
+       vcn_v4_0_3_disable_clock_gating(vinst);
 
        /* enable VCPU clock */
        WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst, regUVD_VCPU_CNTL),
@@ -1222,7 +1232,7 @@ static int vcn_v4_0_3_start(struct amdgpu_device *adev, int i)
                      (0x1 << UVD_MPC_SET_MUX__SET_1__SHIFT) |
                      (0x2 << UVD_MPC_SET_MUX__SET_2__SHIFT)));
 
-       vcn_v4_0_3_mc_resume(adev, i);
+       vcn_v4_0_3_mc_resume(vinst);
 
        /* VCN global tiling registers */
        WREG32_SOC15(VCN, vcn_inst, regUVD_GFX8_ADDR_CONFIG,
@@ -1316,13 +1326,14 @@ static int vcn_v4_0_3_start(struct amdgpu_device *adev, int i)
 /**
  * vcn_v4_0_3_stop_dpg_mode - VCN stop with dpg mode
  *
- * @adev: amdgpu_device pointer
- * @inst_idx: instance number index
+ * @vinst: VCN instance
  *
  * Stop VCN block with dpg mode
  */
-static int vcn_v4_0_3_stop_dpg_mode(struct amdgpu_device *adev, int inst_idx)
+static int vcn_v4_0_3_stop_dpg_mode(struct amdgpu_vcn_inst *vinst)
 {
+       struct amdgpu_device *adev = vinst->adev;
+       int inst_idx = vinst->inst;
        uint32_t tmp;
        int vcn_inst;
 
@@ -1348,13 +1359,14 @@ static int vcn_v4_0_3_stop_dpg_mode(struct amdgpu_device *adev, int inst_idx)
 /**
  * vcn_v4_0_3_stop - VCN stop
  *
- * @adev: amdgpu_device pointer
- * @i: instance to stop
+ * @vinst: VCN instance
  *
  * Stop VCN block
  */
-static int vcn_v4_0_3_stop(struct amdgpu_device *adev, int i)
+static int vcn_v4_0_3_stop(struct amdgpu_vcn_inst *vinst)
 {
+       struct amdgpu_device *adev = vinst->adev;
+       int i = vinst->inst;
        volatile struct amdgpu_vcn4_fw_shared *fw_shared;
        int r = 0, vcn_inst;
        uint32_t tmp;
@@ -1365,7 +1377,7 @@ static int vcn_v4_0_3_stop(struct amdgpu_device *adev, int i)
        fw_shared->sq.queue_mode |= FW_QUEUE_DPG_HOLD_OFF;
 
        if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) {
-               vcn_v4_0_3_stop_dpg_mode(adev, i);
+               vcn_v4_0_3_stop_dpg_mode(vinst);
                goto Done;
        }
 
@@ -1422,7 +1434,7 @@ static int vcn_v4_0_3_stop(struct amdgpu_device *adev, int i)
        WREG32_SOC15(VCN, vcn_inst, regUVD_STATUS, 0);
 
        /* apply HW clock gating */
-       vcn_v4_0_3_enable_clock_gating(adev, i);
+       vcn_v4_0_3_enable_clock_gating(vinst);
 
 Done:
        return 0;
@@ -1656,13 +1668,15 @@ static int vcn_v4_0_3_set_clockgating_state(struct amdgpu_ip_block *ip_block,
        int i;
 
        for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
+               struct amdgpu_vcn_inst *vinst = &adev->vcn.inst[i];
+
                if (enable) {
                        if (RREG32_SOC15(VCN, GET_INST(VCN, i),
                                         regUVD_STATUS) != UVD_STATUS__IDLE)
                                return -EBUSY;
-                       vcn_v4_0_3_enable_clock_gating(adev, i);
+                       vcn_v4_0_3_enable_clock_gating(vinst);
                } else {
-                       vcn_v4_0_3_disable_clock_gating(adev, i);
+                       vcn_v4_0_3_disable_clock_gating(vinst);
                }
        }
        return 0;
@@ -1673,6 +1687,7 @@ static int vcn_v4_0_3_set_powergating_state_inst(struct amdgpu_ip_block *ip_bloc
                                                 int i)
 {
        struct amdgpu_device *adev = ip_block->adev;
+       struct amdgpu_vcn_inst *vinst = &adev->vcn.inst[i];
        int ret = 0;
 
        /* for SRIOV, guest should not control VCN Power-gating
@@ -1688,9 +1703,9 @@ static int vcn_v4_0_3_set_powergating_state_inst(struct amdgpu_ip_block *ip_bloc
                return 0;
 
        if (state == AMD_PG_STATE_GATE)
-               ret = vcn_v4_0_3_stop(adev, i);
+               ret = vcn_v4_0_3_stop(vinst);
        else
-               ret = vcn_v4_0_3_start(adev, i);
+               ret = vcn_v4_0_3_start(vinst);
 
        if (!ret)
                adev->vcn.inst[i].cur_state = state;