drm/amdgpu: Change SOC15_REG_OFFSET to use dynamic register offset
authorShaoyun Liu <Shaoyun.Liu@amd.com>
Wed, 29 Nov 2017 18:51:32 +0000 (13:51 -0500)
committerAlex Deucher <alexander.deucher@amd.com>
Fri, 8 Dec 2017 16:32:24 +0000 (11:32 -0500)
Acked-by: Christian Konig <christian.koenig@amd.com>
Signed-off-by: Shaoyun Liu <Shaoyun.Liu@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
drivers/gpu/drm/amd/amdgpu/psp_v10_0.c
drivers/gpu/drm/amd/amdgpu/psp_v3_1.c
drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
drivers/gpu/drm/amd/amdgpu/soc15.c
drivers/gpu/drm/amd/amdgpu/soc15_common.h
drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c
drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c

index b6122364565e0a8b8870a7978524640e70f6cf35..ec53c23d0f6b4fcc364af9879e8467f685d6453f 100644 (file)
@@ -3585,6 +3585,8 @@ static void gfx_v9_0_ring_emit_hdp_flush(struct amdgpu_ring *ring)
 
 static void gfx_v9_0_ring_emit_hdp_invalidate(struct amdgpu_ring *ring)
 {
+       struct amdgpu_device *adev = ring->adev;
+
        gfx_v9_0_write_data_to_reg(ring, 0, true,
                                   SOC15_REG_OFFSET(HDP, 0, mmHDP_READ_CACHE_INVALIDATE), 1);
 }
@@ -3746,6 +3748,8 @@ static void gfx_v9_0_ring_set_wptr_compute(struct amdgpu_ring *ring)
 static void gfx_v9_0_ring_emit_fence_kiq(struct amdgpu_ring *ring, u64 addr,
                                         u64 seq, unsigned int flags)
 {
+       struct amdgpu_device *adev = ring->adev;
+
        /* we only allocate 32bit for each seq wb address */
        BUG_ON(flags & AMDGPU_FENCE_FLAG_64BIT);
 
index 78fe3f2917a08f47bb92651fc2705fef30bc128c..acaf7891eeabcd4899095d10e5f27e0e90a8ee05 100644 (file)
@@ -298,9 +298,10 @@ int psp_v10_0_cmd_submit(struct psp_context *psp,
 }
 
 static int
-psp_v10_0_sram_map(unsigned int *sram_offset, unsigned int *sram_addr_reg_offset,
-                 unsigned int *sram_data_reg_offset,
-                 enum AMDGPU_UCODE_ID ucode_id)
+psp_v10_0_sram_map(struct amdgpu_device *adev,
+               unsigned int *sram_offset, unsigned int *sram_addr_reg_offset,
+               unsigned int *sram_data_reg_offset,
+               enum AMDGPU_UCODE_ID ucode_id)
 {
        int ret = 0;
 
@@ -395,7 +396,7 @@ bool psp_v10_0_compare_sram_data(struct psp_context *psp,
        uint32_t *ucode_mem = NULL;
        struct amdgpu_device *adev = psp->adev;
 
-       err = psp_v10_0_sram_map(&fw_sram_reg_val, &fw_sram_addr_reg_offset,
+       err = psp_v10_0_sram_map(adev, &fw_sram_reg_val, &fw_sram_addr_reg_offset,
                                &fw_sram_data_reg_offset, ucode_type);
        if (err)
                return false;
index e75a23d858ef1a2ffc068955ad6954563025b16e..0b22e58733db332e4abc3a1d0e48b3c09ce73763 100644 (file)
@@ -410,9 +410,10 @@ int psp_v3_1_cmd_submit(struct psp_context *psp,
 }
 
 static int
-psp_v3_1_sram_map(unsigned int *sram_offset, unsigned int *sram_addr_reg_offset,
-                 unsigned int *sram_data_reg_offset,
-                 enum AMDGPU_UCODE_ID ucode_id)
+psp_v3_1_sram_map(struct amdgpu_device *adev,
+               unsigned int *sram_offset, unsigned int *sram_addr_reg_offset,
+               unsigned int *sram_data_reg_offset,
+               enum AMDGPU_UCODE_ID ucode_id)
 {
        int ret = 0;
 
@@ -507,7 +508,7 @@ bool psp_v3_1_compare_sram_data(struct psp_context *psp,
        uint32_t *ucode_mem = NULL;
        struct amdgpu_device *adev = psp->adev;
 
-       err = psp_v3_1_sram_map(&fw_sram_reg_val, &fw_sram_addr_reg_offset,
+       err = psp_v3_1_sram_map(adev, &fw_sram_reg_val, &fw_sram_addr_reg_offset,
                                &fw_sram_data_reg_offset, ucode_type);
        if (err)
                return false;
index 5c98c4dacf073ae425ed6fa0ade596abd4edae27..d3b1e5069e656e392decd2231598e21934530167 100644 (file)
@@ -385,6 +385,8 @@ static void sdma_v4_0_ring_emit_hdp_flush(struct amdgpu_ring *ring)
 
 static void sdma_v4_0_ring_emit_hdp_invalidate(struct amdgpu_ring *ring)
 {
+       struct amdgpu_device *adev = ring->adev;
+
        amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_SRBM_WRITE) |
                          SDMA_PKT_SRBM_WRITE_HEADER_BYTE_EN(0xf));
        amdgpu_ring_write(ring, SOC15_REG_OFFSET(HDP, 0, mmHDP_READ_CACHE_INVALIDATE));
index fb5f825e8924471de82f9bcce3636beadbc540c4..47fafe8540cbc56dc0f763c475878bd85bcdf95f 100644 (file)
@@ -373,12 +373,9 @@ static uint32_t soc15_get_register_value(struct amdgpu_device *adev,
        if (indexed) {
                return soc15_read_indexed_register(adev, se_num, sh_num, reg_offset);
        } else {
-               switch (reg_offset) {
-               case SOC15_REG_OFFSET(GC, 0, mmGB_ADDR_CONFIG):
+               if (reg_offset == SOC15_REG_OFFSET(GC, 0, mmGB_ADDR_CONFIG))
                        return adev->gfx.config.gb_addr_config;
-               default:
-                       return RREG32(reg_offset);
-               }
+               return RREG32(reg_offset);
        }
 }
 
index e2207c5745f080e12279c3ef9bd1519d430b9868..413951c33983c2e5b89ebabe99f42b3a62f60a98 100644 (file)
@@ -41,11 +41,7 @@ struct nbio_hdp_flush_reg {
 
 
 /* Register Access Macros */
-#define SOC15_REG_OFFSET(ip, inst, reg)       (0 == reg##_BASE_IDX ? ip##_BASE__INST##inst##_SEG0 + reg : \
-                                                (1 == reg##_BASE_IDX ? ip##_BASE__INST##inst##_SEG1 + reg : \
-                                                    (2 == reg##_BASE_IDX ? ip##_BASE__INST##inst##_SEG2 + reg : \
-                                                        (3 == reg##_BASE_IDX ? ip##_BASE__INST##inst##_SEG3 + reg : \
-                                                            (ip##_BASE__INST##inst##_SEG4 + reg)))))
+#define SOC15_REG_OFFSET(ip, inst, reg)        (adev->reg_offset[ip##_HWIP][inst][reg##_BASE_IDX] + reg)
 
 #define WREG32_FIELD15(ip, idx, reg, field, val)       \
        WREG32(adev->reg_offset[ip##_HWIP][idx][mm##reg##_BASE_IDX] + mm##reg,  \
index dfaf9cb8a320b390ed75113d18257e98dc24b1da..b22adeb39653c74d15a24f43503e508d1f500d44 100644 (file)
@@ -1086,6 +1086,8 @@ static void uvd_v7_0_stop(struct amdgpu_device *adev)
 static void uvd_v7_0_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq,
                                     unsigned flags)
 {
+       struct amdgpu_device *adev = ring->adev;
+
        WARN_ON(flags & AMDGPU_FENCE_FLAG_64BIT);
 
        amdgpu_ring_write(ring,
@@ -1123,6 +1125,7 @@ static void uvd_v7_0_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq
 static void uvd_v7_0_enc_ring_emit_fence(struct amdgpu_ring *ring, u64 addr,
                        u64 seq, unsigned flags)
 {
+
        WARN_ON(flags & AMDGPU_FENCE_FLAG_64BIT);
 
        amdgpu_ring_write(ring, HEVC_ENC_CMD_FENCE);
@@ -1141,6 +1144,8 @@ static void uvd_v7_0_enc_ring_emit_fence(struct amdgpu_ring *ring, u64 addr,
  */
 static void uvd_v7_0_ring_emit_hdp_flush(struct amdgpu_ring *ring)
 {
+       struct amdgpu_device *adev = ring->adev;
+
        amdgpu_ring_write(ring, PACKET0(SOC15_REG_OFFSET(NBIF, 0,
                mmHDP_MEM_COHERENCY_FLUSH_CNTL), 0));
        amdgpu_ring_write(ring, 0);
@@ -1155,6 +1160,8 @@ static void uvd_v7_0_ring_emit_hdp_flush(struct amdgpu_ring *ring)
  */
 static void uvd_v7_0_ring_emit_hdp_invalidate(struct amdgpu_ring *ring)
 {
+       struct amdgpu_device *adev = ring->adev;
+
        amdgpu_ring_write(ring, PACKET0(SOC15_REG_OFFSET(HDP, 0, mmHDP_READ_CACHE_INVALIDATE), 0));
        amdgpu_ring_write(ring, 1);
 }
@@ -1214,6 +1221,8 @@ static void uvd_v7_0_ring_emit_ib(struct amdgpu_ring *ring,
                                  struct amdgpu_ib *ib,
                                  unsigned vm_id, bool ctx_switch)
 {
+       struct amdgpu_device *adev = ring->adev;
+
        amdgpu_ring_write(ring,
                PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_RBC_IB_VMID), 0));
        amdgpu_ring_write(ring, vm_id);
@@ -1250,6 +1259,8 @@ static void uvd_v7_0_enc_ring_emit_ib(struct amdgpu_ring *ring,
 static void uvd_v7_0_vm_reg_write(struct amdgpu_ring *ring,
                                uint32_t data0, uint32_t data1)
 {
+       struct amdgpu_device *adev = ring->adev;
+
        amdgpu_ring_write(ring,
                PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA0), 0));
        amdgpu_ring_write(ring, data0);
@@ -1264,6 +1275,8 @@ static void uvd_v7_0_vm_reg_write(struct amdgpu_ring *ring,
 static void uvd_v7_0_vm_reg_wait(struct amdgpu_ring *ring,
                                uint32_t data0, uint32_t data1, uint32_t mask)
 {
+       struct amdgpu_device *adev = ring->adev;
+
        amdgpu_ring_write(ring,
                PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA0), 0));
        amdgpu_ring_write(ring, data0);
index 6f7a7676a1522085b8e25f914fa5c29871abe801..3f1ca86967048c2bc4ad71c5e58c3f6430951a8d 100644 (file)
@@ -744,6 +744,8 @@ static void vcn_v1_0_dec_ring_set_wptr(struct amdgpu_ring *ring)
  */
 static void vcn_v1_0_dec_ring_insert_start(struct amdgpu_ring *ring)
 {
+       struct amdgpu_device *adev = ring->adev;
+
        amdgpu_ring_write(ring,
                PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA0), 0));
        amdgpu_ring_write(ring, 0);
@@ -761,6 +763,8 @@ static void vcn_v1_0_dec_ring_insert_start(struct amdgpu_ring *ring)
  */
 static void vcn_v1_0_dec_ring_insert_end(struct amdgpu_ring *ring)
 {
+       struct amdgpu_device *adev = ring->adev;
+
        amdgpu_ring_write(ring,
                PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_CMD), 0));
        amdgpu_ring_write(ring, VCN_DEC_CMD_PACKET_END << 1);
@@ -777,6 +781,8 @@ static void vcn_v1_0_dec_ring_insert_end(struct amdgpu_ring *ring)
 static void vcn_v1_0_dec_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq,
                                     unsigned flags)
 {
+       struct amdgpu_device *adev = ring->adev;
+
        WARN_ON(flags & AMDGPU_FENCE_FLAG_64BIT);
 
        amdgpu_ring_write(ring,
@@ -812,6 +818,8 @@ static void vcn_v1_0_dec_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64
  */
 static void vcn_v1_0_dec_ring_emit_hdp_invalidate(struct amdgpu_ring *ring)
 {
+       struct amdgpu_device *adev = ring->adev;
+
        amdgpu_ring_write(ring, PACKET0(SOC15_REG_OFFSET(HDP, 0, mmHDP_READ_CACHE_INVALIDATE), 0));
        amdgpu_ring_write(ring, 1);
 }
@@ -828,6 +836,8 @@ static void vcn_v1_0_dec_ring_emit_ib(struct amdgpu_ring *ring,
                                  struct amdgpu_ib *ib,
                                  unsigned vm_id, bool ctx_switch)
 {
+       struct amdgpu_device *adev = ring->adev;
+
        amdgpu_ring_write(ring,
                PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_RBC_IB_VMID), 0));
        amdgpu_ring_write(ring, vm_id);
@@ -846,6 +856,8 @@ static void vcn_v1_0_dec_ring_emit_ib(struct amdgpu_ring *ring,
 static void vcn_v1_0_dec_vm_reg_write(struct amdgpu_ring *ring,
                                uint32_t data0, uint32_t data1)
 {
+       struct amdgpu_device *adev = ring->adev;
+
        amdgpu_ring_write(ring,
                PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA0), 0));
        amdgpu_ring_write(ring, data0);
@@ -860,6 +872,8 @@ static void vcn_v1_0_dec_vm_reg_write(struct amdgpu_ring *ring,
 static void vcn_v1_0_dec_vm_reg_wait(struct amdgpu_ring *ring,
                                uint32_t data0, uint32_t data1, uint32_t mask)
 {
+       struct amdgpu_device *adev = ring->adev;
+
        amdgpu_ring_write(ring,
                PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA0), 0));
        amdgpu_ring_write(ring, data0);