2 * Copyright 2016 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
24 #include "amdgpu_atombios.h"
25 #include "nbio_v6_1.h"
27 #include "nbio/nbio_6_1_default.h"
28 #include "nbio/nbio_6_1_offset.h"
29 #include "nbio/nbio_6_1_sh_mask.h"
30 #include "nbio/nbio_6_1_smn.h"
31 #include "vega10_enum.h"
32 #include <uapi/linux/kfd_ioctl.h>
34 static void nbio_v6_1_remap_hdp_registers(struct amdgpu_device *adev)
36 WREG32_SOC15(NBIO, 0, mmREMAP_HDP_MEM_FLUSH_CNTL,
37 adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL);
38 WREG32_SOC15(NBIO, 0, mmREMAP_HDP_REG_FLUSH_CNTL,
39 adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_REG_FLUSH_CNTL);
42 static u32 nbio_v6_1_get_rev_id(struct amdgpu_device *adev)
44 u32 tmp = RREG32_SOC15(NBIO, 0, mmRCC_DEV0_EPF0_STRAP0);
46 tmp &= RCC_DEV0_EPF0_STRAP0__STRAP_ATI_REV_ID_DEV0_F0_MASK;
47 tmp >>= RCC_DEV0_EPF0_STRAP0__STRAP_ATI_REV_ID_DEV0_F0__SHIFT;
52 static void nbio_v6_1_mc_access_enable(struct amdgpu_device *adev, bool enable)
55 WREG32_SOC15(NBIO, 0, mmBIF_FB_EN,
56 BIF_FB_EN__FB_READ_EN_MASK |
57 BIF_FB_EN__FB_WRITE_EN_MASK);
59 WREG32_SOC15(NBIO, 0, mmBIF_FB_EN, 0);
62 static u32 nbio_v6_1_get_memsize(struct amdgpu_device *adev)
64 return RREG32_SOC15(NBIO, 0, mmRCC_PF_0_0_RCC_CONFIG_MEMSIZE);
67 static void nbio_v6_1_sdma_doorbell_range(struct amdgpu_device *adev, int instance,
68 bool use_doorbell, int doorbell_index, int doorbell_size)
70 u32 reg = instance == 0 ? SOC15_REG_OFFSET(NBIO, 0, mmBIF_SDMA0_DOORBELL_RANGE) :
71 SOC15_REG_OFFSET(NBIO, 0, mmBIF_SDMA1_DOORBELL_RANGE);
73 u32 doorbell_range = RREG32(reg);
76 doorbell_range = REG_SET_FIELD(doorbell_range, BIF_SDMA0_DOORBELL_RANGE, OFFSET, doorbell_index);
77 doorbell_range = REG_SET_FIELD(doorbell_range, BIF_SDMA0_DOORBELL_RANGE, SIZE, doorbell_size);
79 doorbell_range = REG_SET_FIELD(doorbell_range, BIF_SDMA0_DOORBELL_RANGE, SIZE, 0);
81 WREG32(reg, doorbell_range);
85 static void nbio_v6_1_enable_doorbell_aperture(struct amdgpu_device *adev,
88 WREG32_FIELD15(NBIO, 0, RCC_PF_0_0_RCC_DOORBELL_APER_EN, BIF_DOORBELL_APER_EN, enable ? 1 : 0);
91 static void nbio_v6_1_enable_doorbell_selfring_aperture(struct amdgpu_device *adev,
97 tmp = REG_SET_FIELD(tmp, BIF_BX_PF0_DOORBELL_SELFRING_GPA_APER_CNTL, DOORBELL_SELFRING_GPA_APER_EN, 1) |
98 REG_SET_FIELD(tmp, BIF_BX_PF0_DOORBELL_SELFRING_GPA_APER_CNTL, DOORBELL_SELFRING_GPA_APER_MODE, 1) |
99 REG_SET_FIELD(tmp, BIF_BX_PF0_DOORBELL_SELFRING_GPA_APER_CNTL, DOORBELL_SELFRING_GPA_APER_SIZE, 0);
101 WREG32_SOC15(NBIO, 0, mmBIF_BX_PF0_DOORBELL_SELFRING_GPA_APER_BASE_LOW,
102 lower_32_bits(adev->doorbell.base));
103 WREG32_SOC15(NBIO, 0, mmBIF_BX_PF0_DOORBELL_SELFRING_GPA_APER_BASE_HIGH,
104 upper_32_bits(adev->doorbell.base));
107 WREG32_SOC15(NBIO, 0, mmBIF_BX_PF0_DOORBELL_SELFRING_GPA_APER_CNTL, tmp);
111 static void nbio_v6_1_ih_doorbell_range(struct amdgpu_device *adev,
112 bool use_doorbell, int doorbell_index)
114 u32 ih_doorbell_range = RREG32_SOC15(NBIO, 0, mmBIF_IH_DOORBELL_RANGE);
117 ih_doorbell_range = REG_SET_FIELD(ih_doorbell_range, BIF_IH_DOORBELL_RANGE, OFFSET, doorbell_index);
118 ih_doorbell_range = REG_SET_FIELD(ih_doorbell_range,
119 BIF_IH_DOORBELL_RANGE, SIZE, 6);
121 ih_doorbell_range = REG_SET_FIELD(ih_doorbell_range, BIF_IH_DOORBELL_RANGE, SIZE, 0);
123 WREG32_SOC15(NBIO, 0, mmBIF_IH_DOORBELL_RANGE, ih_doorbell_range);
126 static void nbio_v6_1_ih_control(struct amdgpu_device *adev)
130 /* setup interrupt control */
131 WREG32_SOC15(NBIO, 0, mmINTERRUPT_CNTL2, adev->dummy_page_addr >> 8);
132 interrupt_cntl = RREG32_SOC15(NBIO, 0, mmINTERRUPT_CNTL);
133 /* INTERRUPT_CNTL__IH_DUMMY_RD_OVERRIDE_MASK=0 - dummy read disabled with msi, enabled without msi
134 * INTERRUPT_CNTL__IH_DUMMY_RD_OVERRIDE_MASK=1 - dummy read controlled by IH_DUMMY_RD_EN
136 interrupt_cntl = REG_SET_FIELD(interrupt_cntl, INTERRUPT_CNTL, IH_DUMMY_RD_OVERRIDE, 0);
137 /* INTERRUPT_CNTL__IH_REQ_NONSNOOP_EN_MASK=1 if ring is in non-cacheable memory, e.g., vram */
138 interrupt_cntl = REG_SET_FIELD(interrupt_cntl, INTERRUPT_CNTL, IH_REQ_NONSNOOP_EN, 0);
139 WREG32_SOC15(NBIO, 0, mmINTERRUPT_CNTL, interrupt_cntl);
142 static void nbio_v6_1_update_medium_grain_clock_gating(struct amdgpu_device *adev,
147 def = data = RREG32_PCIE(smnCPM_CONTROL);
148 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_BIF_MGCG)) {
149 data |= (CPM_CONTROL__LCLK_DYN_GATE_ENABLE_MASK |
150 CPM_CONTROL__TXCLK_DYN_GATE_ENABLE_MASK |
151 CPM_CONTROL__TXCLK_PERM_GATE_ENABLE_MASK |
152 CPM_CONTROL__TXCLK_LCNT_GATE_ENABLE_MASK |
153 CPM_CONTROL__TXCLK_REGS_GATE_ENABLE_MASK |
154 CPM_CONTROL__TXCLK_PRBS_GATE_ENABLE_MASK |
155 CPM_CONTROL__REFCLK_REGS_GATE_ENABLE_MASK);
157 data &= ~(CPM_CONTROL__LCLK_DYN_GATE_ENABLE_MASK |
158 CPM_CONTROL__TXCLK_DYN_GATE_ENABLE_MASK |
159 CPM_CONTROL__TXCLK_PERM_GATE_ENABLE_MASK |
160 CPM_CONTROL__TXCLK_LCNT_GATE_ENABLE_MASK |
161 CPM_CONTROL__TXCLK_REGS_GATE_ENABLE_MASK |
162 CPM_CONTROL__TXCLK_PRBS_GATE_ENABLE_MASK |
163 CPM_CONTROL__REFCLK_REGS_GATE_ENABLE_MASK);
167 WREG32_PCIE(smnCPM_CONTROL, data);
170 static void nbio_v6_1_update_medium_grain_light_sleep(struct amdgpu_device *adev,
175 def = data = RREG32_PCIE(smnPCIE_CNTL2);
176 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_BIF_LS)) {
177 data |= (PCIE_CNTL2__SLV_MEM_LS_EN_MASK |
178 PCIE_CNTL2__MST_MEM_LS_EN_MASK |
179 PCIE_CNTL2__REPLAY_MEM_LS_EN_MASK);
181 data &= ~(PCIE_CNTL2__SLV_MEM_LS_EN_MASK |
182 PCIE_CNTL2__MST_MEM_LS_EN_MASK |
183 PCIE_CNTL2__REPLAY_MEM_LS_EN_MASK);
187 WREG32_PCIE(smnPCIE_CNTL2, data);
190 static void nbio_v6_1_get_clockgating_state(struct amdgpu_device *adev,
195 /* AMD_CG_SUPPORT_BIF_MGCG */
196 data = RREG32_PCIE(smnCPM_CONTROL);
197 if (data & CPM_CONTROL__LCLK_DYN_GATE_ENABLE_MASK)
198 *flags |= AMD_CG_SUPPORT_BIF_MGCG;
200 /* AMD_CG_SUPPORT_BIF_LS */
201 data = RREG32_PCIE(smnPCIE_CNTL2);
202 if (data & PCIE_CNTL2__SLV_MEM_LS_EN_MASK)
203 *flags |= AMD_CG_SUPPORT_BIF_LS;
206 static u32 nbio_v6_1_get_hdp_flush_req_offset(struct amdgpu_device *adev)
208 return SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF0_GPU_HDP_FLUSH_REQ);
211 static u32 nbio_v6_1_get_hdp_flush_done_offset(struct amdgpu_device *adev)
213 return SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF0_GPU_HDP_FLUSH_DONE);
216 static u32 nbio_v6_1_get_pcie_index_offset(struct amdgpu_device *adev)
218 return SOC15_REG_OFFSET(NBIO, 0, mmPCIE_INDEX2);
221 static u32 nbio_v6_1_get_pcie_data_offset(struct amdgpu_device *adev)
223 return SOC15_REG_OFFSET(NBIO, 0, mmPCIE_DATA2);
226 const struct nbio_hdp_flush_reg nbio_v6_1_hdp_flush_reg = {
227 .ref_and_mask_cp0 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__CP0_MASK,
228 .ref_and_mask_cp1 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__CP1_MASK,
229 .ref_and_mask_cp2 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__CP2_MASK,
230 .ref_and_mask_cp3 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__CP3_MASK,
231 .ref_and_mask_cp4 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__CP4_MASK,
232 .ref_and_mask_cp5 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__CP5_MASK,
233 .ref_and_mask_cp6 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__CP6_MASK,
234 .ref_and_mask_cp7 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__CP7_MASK,
235 .ref_and_mask_cp8 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__CP8_MASK,
236 .ref_and_mask_cp9 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__CP9_MASK,
237 .ref_and_mask_sdma0 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__SDMA0_MASK,
238 .ref_and_mask_sdma1 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__SDMA1_MASK
241 static void nbio_v6_1_init_registers(struct amdgpu_device *adev)
245 def = data = RREG32_PCIE(smnPCIE_CONFIG_CNTL);
246 data = REG_SET_FIELD(data, PCIE_CONFIG_CNTL, CI_SWUS_MAX_READ_REQUEST_SIZE_MODE, 1);
247 data = REG_SET_FIELD(data, PCIE_CONFIG_CNTL, CI_SWUS_MAX_READ_REQUEST_SIZE_PRIV, 1);
250 WREG32_PCIE(smnPCIE_CONFIG_CNTL, data);
252 def = data = RREG32_PCIE(smnPCIE_CI_CNTL);
253 data = REG_SET_FIELD(data, PCIE_CI_CNTL, CI_SLV_ORDERING_DIS, 1);
256 WREG32_PCIE(smnPCIE_CI_CNTL, data);
259 const struct amdgpu_nbio_funcs nbio_v6_1_funcs = {
260 .get_hdp_flush_req_offset = nbio_v6_1_get_hdp_flush_req_offset,
261 .get_hdp_flush_done_offset = nbio_v6_1_get_hdp_flush_done_offset,
262 .get_pcie_index_offset = nbio_v6_1_get_pcie_index_offset,
263 .get_pcie_data_offset = nbio_v6_1_get_pcie_data_offset,
264 .get_rev_id = nbio_v6_1_get_rev_id,
265 .mc_access_enable = nbio_v6_1_mc_access_enable,
266 .get_memsize = nbio_v6_1_get_memsize,
267 .sdma_doorbell_range = nbio_v6_1_sdma_doorbell_range,
268 .enable_doorbell_aperture = nbio_v6_1_enable_doorbell_aperture,
269 .enable_doorbell_selfring_aperture = nbio_v6_1_enable_doorbell_selfring_aperture,
270 .ih_doorbell_range = nbio_v6_1_ih_doorbell_range,
271 .update_medium_grain_clock_gating = nbio_v6_1_update_medium_grain_clock_gating,
272 .update_medium_grain_light_sleep = nbio_v6_1_update_medium_grain_light_sleep,
273 .get_clockgating_state = nbio_v6_1_get_clockgating_state,
274 .ih_control = nbio_v6_1_ih_control,
275 .init_registers = nbio_v6_1_init_registers,
276 .remap_hdp_registers = nbio_v6_1_remap_hdp_registers,