drm/amd/pm: correct the usage for 'supported' member of smu_feature structure
[linux-block.git] / drivers / gpu / drm / amd / pm / swsmu / smu11 / navi10_ppt.c
CommitLineData
b3490673
HR
1/*
2 * Copyright 2019 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23
d8e0b16d
EQ
24#define SWSMU_CODE_LAYER_L2
25
b3490673 26#include <linux/firmware.h>
354e6e14 27#include <linux/pci.h>
1bc73475 28#include <linux/i2c.h>
b3490673 29#include "amdgpu.h"
2f60dd50 30#include "amdgpu_dpm.h"
b3490673
HR
31#include "amdgpu_smu.h"
32#include "atomfirmware.h"
33#include "amdgpu_atomfirmware.h"
22f2447c 34#include "amdgpu_atombios.h"
49e78c82 35#include "soc15_common.h"
b3490673 36#include "smu_v11_0.h"
013fd3a6 37#include "smu11_driver_if_navi10.h"
b3490673
HR
38#include "atom.h"
39#include "navi10_ppt.h"
40#include "smu_v11_0_pptable.h"
41#include "smu_v11_0_ppsmc.h"
49e78c82
EQ
42#include "nbio/nbio_2_3_offset.h"
43#include "nbio/nbio_2_3_sh_mask.h"
947c127b
LG
44#include "thm/thm_11_0_2_offset.h"
45#include "thm/thm_11_0_2_sh_mask.h"
b3490673 46
fc419158 47#include "asic_reg/mp/mp_11_0_sh_mask.h"
6c339f37 48#include "smu_cmn.h"
665945eb 49#include "smu_11_0_cdr_table.h"
fc419158 50
55084d7f
EQ
51/*
52 * DO NOT use these for err/warn/info/debug messages.
53 * Use dev_err, dev_warn, dev_info and dev_dbg instead.
54 * They are more MGPU friendly.
55 */
56#undef pr_err
57#undef pr_warn
58#undef pr_info
59#undef pr_debug
60
44ff0ae6 61#define FEATURE_MASK(feature) (1ULL << feature)
4228b601
KW
62#define SMC_DPM_FEATURE ( \
63 FEATURE_MASK(FEATURE_DPM_PREFETCHER_BIT) | \
64 FEATURE_MASK(FEATURE_DPM_GFXCLK_BIT) | \
65 FEATURE_MASK(FEATURE_DPM_GFX_PACE_BIT) | \
66 FEATURE_MASK(FEATURE_DPM_UCLK_BIT) | \
67 FEATURE_MASK(FEATURE_DPM_SOCCLK_BIT) | \
68 FEATURE_MASK(FEATURE_DPM_MP0CLK_BIT) | \
69 FEATURE_MASK(FEATURE_DPM_LINK_BIT) | \
70 FEATURE_MASK(FEATURE_DPM_DCEFCLK_BIT))
71
7d6c13ef
EQ
72#define SMU_11_0_GFX_BUSY_THRESHOLD 15
73
6c339f37 74static struct cmn2asic_msg_mapping navi10_message_map[SMU_MSG_MAX_COUNT] = {
d4f3c0b3
WS
75 MSG_MAP(TestMessage, PPSMC_MSG_TestMessage, 1),
76 MSG_MAP(GetSmuVersion, PPSMC_MSG_GetSmuVersion, 1),
77 MSG_MAP(GetDriverIfVersion, PPSMC_MSG_GetDriverIfVersion, 1),
78 MSG_MAP(SetAllowedFeaturesMaskLow, PPSMC_MSG_SetAllowedFeaturesMaskLow, 0),
79 MSG_MAP(SetAllowedFeaturesMaskHigh, PPSMC_MSG_SetAllowedFeaturesMaskHigh, 0),
80 MSG_MAP(EnableAllSmuFeatures, PPSMC_MSG_EnableAllSmuFeatures, 0),
81 MSG_MAP(DisableAllSmuFeatures, PPSMC_MSG_DisableAllSmuFeatures, 0),
f1802aa7
YZ
82 MSG_MAP(EnableSmuFeaturesLow, PPSMC_MSG_EnableSmuFeaturesLow, 0),
83 MSG_MAP(EnableSmuFeaturesHigh, PPSMC_MSG_EnableSmuFeaturesHigh, 0),
84 MSG_MAP(DisableSmuFeaturesLow, PPSMC_MSG_DisableSmuFeaturesLow, 0),
85 MSG_MAP(DisableSmuFeaturesHigh, PPSMC_MSG_DisableSmuFeaturesHigh, 0),
d4f3c0b3
WS
86 MSG_MAP(GetEnabledSmuFeaturesLow, PPSMC_MSG_GetEnabledSmuFeaturesLow, 1),
87 MSG_MAP(GetEnabledSmuFeaturesHigh, PPSMC_MSG_GetEnabledSmuFeaturesHigh, 1),
fd30b7d9 88 MSG_MAP(SetWorkloadMask, PPSMC_MSG_SetWorkloadMask, 0),
d4f3c0b3 89 MSG_MAP(SetPptLimit, PPSMC_MSG_SetPptLimit, 0),
fd30b7d9
YZ
90 MSG_MAP(SetDriverDramAddrHigh, PPSMC_MSG_SetDriverDramAddrHigh, 1),
91 MSG_MAP(SetDriverDramAddrLow, PPSMC_MSG_SetDriverDramAddrLow, 1),
d4f3c0b3
WS
92 MSG_MAP(SetToolsDramAddrHigh, PPSMC_MSG_SetToolsDramAddrHigh, 0),
93 MSG_MAP(SetToolsDramAddrLow, PPSMC_MSG_SetToolsDramAddrLow, 0),
fd30b7d9 94 MSG_MAP(TransferTableSmu2Dram, PPSMC_MSG_TransferTableSmu2Dram, 1),
d4f3c0b3
WS
95 MSG_MAP(TransferTableDram2Smu, PPSMC_MSG_TransferTableDram2Smu, 0),
96 MSG_MAP(UseDefaultPPTable, PPSMC_MSG_UseDefaultPPTable, 0),
97 MSG_MAP(UseBackupPPTable, PPSMC_MSG_UseBackupPPTable, 0),
98 MSG_MAP(RunBtc, PPSMC_MSG_RunBtc, 0),
99 MSG_MAP(EnterBaco, PPSMC_MSG_EnterBaco, 0),
fd30b7d9
YZ
100 MSG_MAP(SetSoftMinByFreq, PPSMC_MSG_SetSoftMinByFreq, 1),
101 MSG_MAP(SetSoftMaxByFreq, PPSMC_MSG_SetSoftMaxByFreq, 1),
102 MSG_MAP(SetHardMinByFreq, PPSMC_MSG_SetHardMinByFreq, 0),
d4f3c0b3
WS
103 MSG_MAP(SetHardMaxByFreq, PPSMC_MSG_SetHardMaxByFreq, 0),
104 MSG_MAP(GetMinDpmFreq, PPSMC_MSG_GetMinDpmFreq, 1),
105 MSG_MAP(GetMaxDpmFreq, PPSMC_MSG_GetMaxDpmFreq, 1),
106 MSG_MAP(GetDpmFreqByIndex, PPSMC_MSG_GetDpmFreqByIndex, 1),
107 MSG_MAP(SetMemoryChannelConfig, PPSMC_MSG_SetMemoryChannelConfig, 0),
108 MSG_MAP(SetGeminiMode, PPSMC_MSG_SetGeminiMode, 0),
109 MSG_MAP(SetGeminiApertureHigh, PPSMC_MSG_SetGeminiApertureHigh, 0),
110 MSG_MAP(SetGeminiApertureLow, PPSMC_MSG_SetGeminiApertureLow, 0),
111 MSG_MAP(OverridePcieParameters, PPSMC_MSG_OverridePcieParameters, 0),
112 MSG_MAP(SetMinDeepSleepDcefclk, PPSMC_MSG_SetMinDeepSleepDcefclk, 0),
113 MSG_MAP(ReenableAcDcInterrupt, PPSMC_MSG_ReenableAcDcInterrupt, 0),
114 MSG_MAP(NotifyPowerSource, PPSMC_MSG_NotifyPowerSource, 0),
115 MSG_MAP(SetUclkFastSwitch, PPSMC_MSG_SetUclkFastSwitch, 0),
116 MSG_MAP(SetVideoFps, PPSMC_MSG_SetVideoFps, 0),
117 MSG_MAP(PrepareMp1ForUnload, PPSMC_MSG_PrepareMp1ForUnload, 1),
118 MSG_MAP(DramLogSetDramAddrHigh, PPSMC_MSG_DramLogSetDramAddrHigh, 0),
119 MSG_MAP(DramLogSetDramAddrLow, PPSMC_MSG_DramLogSetDramAddrLow, 0),
120 MSG_MAP(DramLogSetDramSize, PPSMC_MSG_DramLogSetDramSize, 0),
121 MSG_MAP(ConfigureGfxDidt, PPSMC_MSG_ConfigureGfxDidt, 0),
122 MSG_MAP(NumOfDisplays, PPSMC_MSG_NumOfDisplays, 0),
123 MSG_MAP(SetSystemVirtualDramAddrHigh, PPSMC_MSG_SetSystemVirtualDramAddrHigh, 0),
124 MSG_MAP(SetSystemVirtualDramAddrLow, PPSMC_MSG_SetSystemVirtualDramAddrLow, 0),
125 MSG_MAP(AllowGfxOff, PPSMC_MSG_AllowGfxOff, 0),
126 MSG_MAP(DisallowGfxOff, PPSMC_MSG_DisallowGfxOff, 0),
127 MSG_MAP(GetPptLimit, PPSMC_MSG_GetPptLimit, 0),
128 MSG_MAP(GetDcModeMaxDpmFreq, PPSMC_MSG_GetDcModeMaxDpmFreq, 1),
129 MSG_MAP(GetDebugData, PPSMC_MSG_GetDebugData, 0),
130 MSG_MAP(ExitBaco, PPSMC_MSG_ExitBaco, 0),
131 MSG_MAP(PrepareMp1ForReset, PPSMC_MSG_PrepareMp1ForReset, 0),
132 MSG_MAP(PrepareMp1ForShutdown, PPSMC_MSG_PrepareMp1ForShutdown, 0),
133 MSG_MAP(PowerUpVcn, PPSMC_MSG_PowerUpVcn, 0),
134 MSG_MAP(PowerDownVcn, PPSMC_MSG_PowerDownVcn, 0),
135 MSG_MAP(PowerUpJpeg, PPSMC_MSG_PowerUpJpeg, 0),
136 MSG_MAP(PowerDownJpeg, PPSMC_MSG_PowerDownJpeg, 0),
137 MSG_MAP(BacoAudioD3PME, PPSMC_MSG_BacoAudioD3PME, 0),
138 MSG_MAP(ArmD3, PPSMC_MSG_ArmD3, 0),
139 MSG_MAP(DAL_DISABLE_DUMMY_PSTATE_CHANGE,PPSMC_MSG_DALDisableDummyPstateChange, 0),
140 MSG_MAP(DAL_ENABLE_DUMMY_PSTATE_CHANGE, PPSMC_MSG_DALEnableDummyPstateChange, 0),
141 MSG_MAP(GetVoltageByDpm, PPSMC_MSG_GetVoltageByDpm, 0),
142 MSG_MAP(GetVoltageByDpmOverdrive, PPSMC_MSG_GetVoltageByDpmOverdrive, 0),
94a670d5 143 MSG_MAP(SetMGpuFanBoostLimitRpm, PPSMC_MSG_SetMGpuFanBoostLimitRpm, 0),
665945eb
EQ
144 MSG_MAP(SET_DRIVER_DUMMY_TABLE_DRAM_ADDR_HIGH, PPSMC_MSG_SetDriverDummyTableDramAddrHigh, 0),
145 MSG_MAP(SET_DRIVER_DUMMY_TABLE_DRAM_ADDR_LOW, PPSMC_MSG_SetDriverDummyTableDramAddrLow, 0),
bb7257b5 146 MSG_MAP(GET_UMC_FW_WA, PPSMC_MSG_GetUMCFWWA, 0),
b3490673
HR
147};
148
6c339f37 149static struct cmn2asic_mapping navi10_clk_map[SMU_CLK_COUNT] = {
0de94acf 150 CLK_MAP(GFXCLK, PPCLK_GFXCLK),
b1e7e224 151 CLK_MAP(SCLK, PPCLK_GFXCLK),
0de94acf 152 CLK_MAP(SOCCLK, PPCLK_SOCCLK),
b1e7e224 153 CLK_MAP(FCLK, PPCLK_SOCCLK),
0de94acf 154 CLK_MAP(UCLK, PPCLK_UCLK),
b1e7e224 155 CLK_MAP(MCLK, PPCLK_UCLK),
0de94acf
HR
156 CLK_MAP(DCLK, PPCLK_DCLK),
157 CLK_MAP(VCLK, PPCLK_VCLK),
158 CLK_MAP(DCEFCLK, PPCLK_DCEFCLK),
159 CLK_MAP(DISPCLK, PPCLK_DISPCLK),
160 CLK_MAP(PIXCLK, PPCLK_PIXCLK),
161 CLK_MAP(PHYCLK, PPCLK_PHYCLK),
162};
163
6c339f37 164static struct cmn2asic_mapping navi10_feature_mask_map[SMU_FEATURE_COUNT] = {
ffcb08df
HR
165 FEA_MAP(DPM_PREFETCHER),
166 FEA_MAP(DPM_GFXCLK),
167 FEA_MAP(DPM_GFX_PACE),
168 FEA_MAP(DPM_UCLK),
169 FEA_MAP(DPM_SOCCLK),
170 FEA_MAP(DPM_MP0CLK),
171 FEA_MAP(DPM_LINK),
172 FEA_MAP(DPM_DCEFCLK),
173 FEA_MAP(MEM_VDDCI_SCALING),
174 FEA_MAP(MEM_MVDD_SCALING),
175 FEA_MAP(DS_GFXCLK),
176 FEA_MAP(DS_SOCCLK),
177 FEA_MAP(DS_LCLK),
178 FEA_MAP(DS_DCEFCLK),
179 FEA_MAP(DS_UCLK),
180 FEA_MAP(GFX_ULV),
181 FEA_MAP(FW_DSTATE),
182 FEA_MAP(GFXOFF),
183 FEA_MAP(BACO),
184 FEA_MAP(VCN_PG),
185 FEA_MAP(JPEG_PG),
186 FEA_MAP(USB_PG),
187 FEA_MAP(RSMU_SMN_CG),
188 FEA_MAP(PPT),
189 FEA_MAP(TDC),
190 FEA_MAP(GFX_EDC),
191 FEA_MAP(APCC_PLUS),
192 FEA_MAP(GTHR),
193 FEA_MAP(ACDC),
194 FEA_MAP(VR0HOT),
195 FEA_MAP(VR1HOT),
196 FEA_MAP(FW_CTF),
197 FEA_MAP(FAN_CONTROL),
198 FEA_MAP(THERMAL),
199 FEA_MAP(GFX_DCS),
200 FEA_MAP(RM),
201 FEA_MAP(LED_DISPLAY),
202 FEA_MAP(GFX_SS),
203 FEA_MAP(OUT_OF_BAND_MONITOR),
204 FEA_MAP(TEMP_DEPENDENT_VMIN),
205 FEA_MAP(MMHUB_PG),
206 FEA_MAP(ATHUB_PG),
f256ba47 207 FEA_MAP(APCC_DFLL),
ffcb08df
HR
208};
209
6c339f37 210static struct cmn2asic_mapping navi10_table_map[SMU_TABLE_COUNT] = {
2436911b
HR
211 TAB_MAP(PPTABLE),
212 TAB_MAP(WATERMARKS),
213 TAB_MAP(AVFS),
214 TAB_MAP(AVFS_PSM_DEBUG),
215 TAB_MAP(AVFS_FUSE_OVERRIDE),
216 TAB_MAP(PMSTATUSLOG),
217 TAB_MAP(SMU_METRICS),
218 TAB_MAP(DRIVER_SMU_CONFIG),
219 TAB_MAP(ACTIVITY_MONITOR_COEFF),
220 TAB_MAP(OVERDRIVE),
221 TAB_MAP(I2C_COMMANDS),
222 TAB_MAP(PACE),
223};
224
6c339f37 225static struct cmn2asic_mapping navi10_pwr_src_map[SMU_POWER_SOURCE_COUNT] = {
8890fe5f
HR
226 PWR_MAP(AC),
227 PWR_MAP(DC),
228};
229
6c339f37 230static struct cmn2asic_mapping navi10_workload_map[PP_SMC_POWER_PROFILE_COUNT] = {
6c6187ec
KW
231 WORKLOAD_MAP(PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT, WORKLOAD_PPLIB_DEFAULT_BIT),
232 WORKLOAD_MAP(PP_SMC_POWER_PROFILE_FULLSCREEN3D, WORKLOAD_PPLIB_FULL_SCREEN_3D_BIT),
233 WORKLOAD_MAP(PP_SMC_POWER_PROFILE_POWERSAVING, WORKLOAD_PPLIB_POWER_SAVING_BIT),
234 WORKLOAD_MAP(PP_SMC_POWER_PROFILE_VIDEO, WORKLOAD_PPLIB_VIDEO_BIT),
235 WORKLOAD_MAP(PP_SMC_POWER_PROFILE_VR, WORKLOAD_PPLIB_VR_BIT),
2c874ad9 236 WORKLOAD_MAP(PP_SMC_POWER_PROFILE_COMPUTE, WORKLOAD_PPLIB_COMPUTE_BIT),
6c6187ec
KW
237 WORKLOAD_MAP(PP_SMC_POWER_PROFILE_CUSTOM, WORKLOAD_PPLIB_CUSTOM_BIT),
238};
239
64cdee43
GS
240static const uint8_t navi1x_throttler_map[] = {
241 [THROTTLER_TEMP_EDGE_BIT] = (SMU_THROTTLER_TEMP_EDGE_BIT),
242 [THROTTLER_TEMP_HOTSPOT_BIT] = (SMU_THROTTLER_TEMP_HOTSPOT_BIT),
243 [THROTTLER_TEMP_MEM_BIT] = (SMU_THROTTLER_TEMP_MEM_BIT),
244 [THROTTLER_TEMP_VR_GFX_BIT] = (SMU_THROTTLER_TEMP_VR_GFX_BIT),
245 [THROTTLER_TEMP_VR_MEM0_BIT] = (SMU_THROTTLER_TEMP_VR_MEM0_BIT),
246 [THROTTLER_TEMP_VR_MEM1_BIT] = (SMU_THROTTLER_TEMP_VR_MEM1_BIT),
247 [THROTTLER_TEMP_VR_SOC_BIT] = (SMU_THROTTLER_TEMP_VR_SOC_BIT),
248 [THROTTLER_TEMP_LIQUID0_BIT] = (SMU_THROTTLER_TEMP_LIQUID0_BIT),
249 [THROTTLER_TEMP_LIQUID1_BIT] = (SMU_THROTTLER_TEMP_LIQUID1_BIT),
250 [THROTTLER_TDC_GFX_BIT] = (SMU_THROTTLER_TDC_GFX_BIT),
251 [THROTTLER_TDC_SOC_BIT] = (SMU_THROTTLER_TDC_SOC_BIT),
252 [THROTTLER_PPT0_BIT] = (SMU_THROTTLER_PPT0_BIT),
253 [THROTTLER_PPT1_BIT] = (SMU_THROTTLER_PPT1_BIT),
254 [THROTTLER_PPT2_BIT] = (SMU_THROTTLER_PPT2_BIT),
255 [THROTTLER_PPT3_BIT] = (SMU_THROTTLER_PPT3_BIT),
256 [THROTTLER_FIT_BIT] = (SMU_THROTTLER_FIT_BIT),
257 [THROTTLER_PPM_BIT] = (SMU_THROTTLER_PPM_BIT),
258 [THROTTLER_APCC_BIT] = (SMU_THROTTLER_APCC_BIT),
259};
260
261
fc419158
TZ
262static bool is_asic_secure(struct smu_context *smu)
263{
264 struct amdgpu_device *adev = smu->adev;
265 bool is_secure = true;
266 uint32_t mp0_fw_intf;
267
268 mp0_fw_intf = RREG32_PCIE(MP0_Public |
269 (smnMP0_FW_INTF & 0xffffffff));
270
271 if (!(mp0_fw_intf & (1 << 19)))
272 is_secure = false;
273
274 return is_secure;
275}
276
b3490673 277static int
74c958a3 278navi10_get_allowed_feature_mask(struct smu_context *smu,
b3490673
HR
279 uint32_t *feature_mask, uint32_t num)
280{
9e040216
KF
281 struct amdgpu_device *adev = smu->adev;
282
b3490673
HR
283 if (num > 2)
284 return -EINVAL;
285
74c958a3
KW
286 memset(feature_mask, 0, sizeof(uint32_t) * num);
287
77ee9caf 288 *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_DPM_PREFETCHER_BIT)
74c958a3 289 | FEATURE_MASK(FEATURE_DPM_MP0CLK_BIT)
74c958a3 290 | FEATURE_MASK(FEATURE_RSMU_SMN_CG_BIT)
d7a8efa5 291 | FEATURE_MASK(FEATURE_DS_SOCCLK_BIT)
74c958a3
KW
292 | FEATURE_MASK(FEATURE_PPT_BIT)
293 | FEATURE_MASK(FEATURE_TDC_BIT)
294 | FEATURE_MASK(FEATURE_GFX_EDC_BIT)
c1972a56 295 | FEATURE_MASK(FEATURE_APCC_PLUS_BIT)
74c958a3
KW
296 | FEATURE_MASK(FEATURE_VR0HOT_BIT)
297 | FEATURE_MASK(FEATURE_FAN_CONTROL_BIT)
298 | FEATURE_MASK(FEATURE_THERMAL_BIT)
299 | FEATURE_MASK(FEATURE_LED_DISPLAY_BIT)
c1972a56 300 | FEATURE_MASK(FEATURE_DS_LCLK_BIT)
3a3c51dd 301 | FEATURE_MASK(FEATURE_DS_DCEFCLK_BIT)
d8ceb192 302 | FEATURE_MASK(FEATURE_FW_DSTATE_BIT)
7c6fe84c 303 | FEATURE_MASK(FEATURE_BACO_BIT)
597292eb
KF
304 | FEATURE_MASK(FEATURE_GFX_SS_BIT)
305 | FEATURE_MASK(FEATURE_APCC_DFLL_BIT)
c1972a56
XY
306 | FEATURE_MASK(FEATURE_FW_CTF_BIT)
307 | FEATURE_MASK(FEATURE_OUT_OF_BAND_MONITOR_BIT);
8c3b2d1b 308
bc7ef865
AD
309 if (adev->pm.pp_feature & PP_SCLK_DPM_MASK)
310 *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_DPM_GFXCLK_BIT);
311
312 if (adev->pm.pp_feature & PP_PCIE_DPM_MASK)
313 *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_DPM_LINK_BIT);
314
315 if (adev->pm.pp_feature & PP_DCEFCLK_DPM_MASK)
316 *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_DPM_DCEFCLK_BIT);
317
bc7ef865
AD
318 if (adev->pm.pp_feature & PP_ULV_MASK)
319 *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_GFX_ULV_BIT);
320
321 if (adev->pm.pp_feature & PP_SCLK_DEEP_SLEEP_MASK)
322 *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_DS_GFXCLK_BIT);
323
bb3d7d32 324 if (adev->pm.pp_feature & PP_GFXOFF_MASK)
597292eb 325 *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_GFXOFF_BIT);
9e040216 326
c12d410f
HR
327 if (smu->adev->pg_flags & AMD_PG_SUPPORT_MMHUB)
328 *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_MMHUB_PG_BIT);
329
a201b6ac
HR
330 if (smu->adev->pg_flags & AMD_PG_SUPPORT_ATHUB)
331 *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_ATHUB_PG_BIT);
332
c4b76d23 333 if (smu->adev->pg_flags & AMD_PG_SUPPORT_VCN)
43717ff6
LL
334 *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_VCN_PG_BIT);
335
336 if (smu->adev->pg_flags & AMD_PG_SUPPORT_JPEG)
337 *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_JPEG_PG_BIT);
c4b76d23 338
f5cdd2bd
AD
339 if (smu->dc_controlled_by_gpio)
340 *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_ACDC_BIT);
341
c220ba6f
EQ
342 if (adev->pm.pp_feature & PP_SOCCLK_DPM_MASK)
343 *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_DPM_SOCCLK_BIT);
344
345 /* DPM UCLK enablement should be skipped for navi10 A0 secure board */
346 if (!(is_asic_secure(smu) &&
1d789535 347 (adev->ip_versions[MP1_HWIP][0] == IP_VERSION(11, 0, 0)) &&
c220ba6f
EQ
348 (adev->rev_id == 0)) &&
349 (adev->pm.pp_feature & PP_MCLK_DPM_MASK))
350 *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_DPM_UCLK_BIT)
351 | FEATURE_MASK(FEATURE_MEM_VDDCI_SCALING_BIT)
352 | FEATURE_MASK(FEATURE_MEM_MVDD_SCALING_BIT);
353
10144762
EQ
354 /* DS SOCCLK enablement should be skipped for navi10 A0 secure board */
355 if (is_asic_secure(smu) &&
1d789535 356 (adev->ip_versions[MP1_HWIP][0] == IP_VERSION(11, 0, 0)) &&
10144762
EQ
357 (adev->rev_id == 0))
358 *(uint64_t *)feature_mask &=
359 ~FEATURE_MASK(FEATURE_DS_SOCCLK_BIT);
c877dff7 360
b3490673
HR
361 return 0;
362}
363
458020dd 364static void navi10_check_bxco_support(struct smu_context *smu)
b3490673 365{
4a13b4ce
EQ
366 struct smu_table_context *table_context = &smu->smu_table;
367 struct smu_11_0_powerplay_table *powerplay_table =
368 table_context->power_play_table;
369 struct smu_baco_context *smu_baco = &smu->smu_baco;
458020dd
LL
370 struct amdgpu_device *adev = smu->adev;
371 uint32_t val;
372
373 if (powerplay_table->platform_caps & SMU_11_0_PP_PLATFORM_CAP_BACO ||
374 powerplay_table->platform_caps & SMU_11_0_PP_PLATFORM_CAP_MACO) {
375 val = RREG32_SOC15(NBIO, 0, mmRCC_BIF_STRAP0);
376 smu_baco->platform_support =
377 (val & RCC_BIF_STRAP0__STRAP_PX_CAPABLE_MASK) ? true :
378 false;
379 }
380}
381
382static int navi10_check_powerplay_table(struct smu_context *smu)
383{
384 struct smu_table_context *table_context = &smu->smu_table;
385 struct smu_11_0_powerplay_table *powerplay_table =
386 table_context->power_play_table;
4a13b4ce
EQ
387
388 if (powerplay_table->platform_caps & SMU_11_0_PP_PLATFORM_CAP_HARDWAREDC)
389 smu->dc_controlled_by_gpio = true;
390
458020dd 391 navi10_check_bxco_support(smu);
4a13b4ce
EQ
392
393 table_context->thermal_controller_type =
394 powerplay_table->thermal_controller_type;
395
396 /*
397 * Instead of having its own buffer space and get overdrive_table copied,
398 * smu->od_settings just points to the actual overdrive_table
399 */
400 smu->od_settings = &powerplay_table->overdrive_table;
401
b3490673
HR
402 return 0;
403}
404
405static int navi10_append_powerplay_table(struct smu_context *smu)
406{
9e040216 407 struct amdgpu_device *adev = smu->adev;
b3490673
HR
408 struct smu_table_context *table_context = &smu->smu_table;
409 PPTable_t *smc_pptable = table_context->driver_pptable;
410 struct atom_smc_dpm_info_v4_5 *smc_dpm_table;
02c0bb4e 411 struct atom_smc_dpm_info_v4_7 *smc_dpm_table_v4_7;
b3490673
HR
412 int index, ret;
413
414 index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
415 smc_dpm_info);
416
22f2447c 417 ret = amdgpu_atombios_get_data_table(adev, index, NULL, NULL, NULL,
b3490673
HR
418 (uint8_t **)&smc_dpm_table);
419 if (ret)
420 return ret;
421
d9811cfc 422 dev_info(adev->dev, "smc_dpm_info table revision(format.content): %d.%d\n",
02c0bb4e
EQ
423 smc_dpm_table->table_header.format_revision,
424 smc_dpm_table->table_header.content_revision);
425
426 if (smc_dpm_table->table_header.format_revision != 4) {
d9811cfc 427 dev_err(adev->dev, "smc_dpm_info table format revision is not 4!\n");
02c0bb4e
EQ
428 return -EINVAL;
429 }
430
431 switch (smc_dpm_table->table_header.content_revision) {
432 case 5: /* nv10 and nv14 */
4a9bd6db
KC
433 smu_memcpy_trailing(smc_pptable, I2cControllers, BoardReserved,
434 smc_dpm_table, I2cControllers);
02c0bb4e
EQ
435 break;
436 case 7: /* nv12 */
22f2447c 437 ret = amdgpu_atombios_get_data_table(adev, index, NULL, NULL, NULL,
02c0bb4e
EQ
438 (uint8_t **)&smc_dpm_table_v4_7);
439 if (ret)
440 return ret;
4a9bd6db
KC
441 smu_memcpy_trailing(smc_pptable, I2cControllers, BoardReserved,
442 smc_dpm_table_v4_7, I2cControllers);
02c0bb4e
EQ
443 break;
444 default:
d9811cfc 445 dev_err(smu->adev->dev, "smc_dpm_info with unsupported content revision %d!\n",
02c0bb4e
EQ
446 smc_dpm_table->table_header.content_revision);
447 return -EINVAL;
448 }
b3490673 449
2a8bfa13 450 if (adev->pm.pp_feature & PP_GFXOFF_MASK) {
2a8bfa13
JX
451 /* TODO: remove it once SMU fw fix it */
452 smc_pptable->DebugOverrides |= DPM_OVERRIDE_DISABLE_DFLL_PLL_SHUTDOWN;
453 }
454
b3490673
HR
455 return 0;
456}
457
458static int navi10_store_powerplay_table(struct smu_context *smu)
459{
b3490673 460 struct smu_table_context *table_context = &smu->smu_table;
4a13b4ce
EQ
461 struct smu_11_0_powerplay_table *powerplay_table =
462 table_context->power_play_table;
b3490673
HR
463
464 memcpy(table_context->driver_pptable, &powerplay_table->smc_pptable,
465 sizeof(PPTable_t));
466
4a13b4ce
EQ
467 return 0;
468}
5e6d2665 469
4a13b4ce
EQ
470static int navi10_setup_pptable(struct smu_context *smu)
471{
472 int ret = 0;
f5cdd2bd 473
4a13b4ce
EQ
474 ret = smu_v11_0_setup_pptable(smu);
475 if (ret)
476 return ret;
767acabd 477
4a13b4ce
EQ
478 ret = navi10_store_powerplay_table(smu);
479 if (ret)
480 return ret;
481
482 ret = navi10_append_powerplay_table(smu);
483 if (ret)
484 return ret;
485
486 ret = navi10_check_powerplay_table(smu);
487 if (ret)
488 return ret;
489
490 return ret;
b3490673
HR
491}
492
c1b353b7 493static int navi10_tables_init(struct smu_context *smu)
22c9c6ca 494{
b94afb61 495 struct smu_table_context *smu_table = &smu->smu_table;
c1b353b7 496 struct smu_table *tables = smu_table->tables;
b94afb61 497
22c9c6ca
HR
498 SMU_TABLE_INIT(tables, SMU_TABLE_PPTABLE, sizeof(PPTable_t),
499 PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
500 SMU_TABLE_INIT(tables, SMU_TABLE_WATERMARKS, sizeof(Watermarks_t),
501 PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
7d6c13ef
EQ
502 SMU_TABLE_INIT(tables, SMU_TABLE_SMU_METRICS, sizeof(SmuMetrics_NV1X_t),
503 PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
1bc73475
AD
504 SMU_TABLE_INIT(tables, SMU_TABLE_I2C_COMMANDS, sizeof(SwI2cRequest_t),
505 PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
22c9c6ca
HR
506 SMU_TABLE_INIT(tables, SMU_TABLE_OVERDRIVE, sizeof(OverDriveTable_t),
507 PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
508 SMU_TABLE_INIT(tables, SMU_TABLE_PMSTATUSLOG, SMU11_TOOL_SIZE,
509 PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
510 SMU_TABLE_INIT(tables, SMU_TABLE_ACTIVITY_MONITOR_COEFF,
511 sizeof(DpmActivityMonitorCoeffInt_t), PAGE_SIZE,
512 AMDGPU_GEM_DOMAIN_VRAM);
9634de27 513
7d6c13ef
EQ
514 smu_table->metrics_table = kzalloc(sizeof(SmuMetrics_NV1X_t),
515 GFP_KERNEL);
b94afb61 516 if (!smu_table->metrics_table)
6d4ff50a 517 goto err0_out;
b94afb61
KW
518 smu_table->metrics_time = 0;
519
61e2d322 520 smu_table->gpu_metrics_table_size = sizeof(struct gpu_metrics_v1_3);
6d4ff50a
EQ
521 smu_table->gpu_metrics_table = kzalloc(smu_table->gpu_metrics_table_size, GFP_KERNEL);
522 if (!smu_table->gpu_metrics_table)
523 goto err1_out;
524
9fa1ed5b
EQ
525 smu_table->watermarks_table = kzalloc(sizeof(Watermarks_t), GFP_KERNEL);
526 if (!smu_table->watermarks_table)
6d4ff50a 527 goto err2_out;
9fa1ed5b 528
9634de27 529 return 0;
6d4ff50a
EQ
530
531err2_out:
532 kfree(smu_table->gpu_metrics_table);
533err1_out:
534 kfree(smu_table->metrics_table);
535err0_out:
536 return -ENOMEM;
22c9c6ca
HR
537}
538
7d6c13ef
EQ
539static int navi10_get_legacy_smu_metrics_data(struct smu_context *smu,
540 MetricsMember_t member,
541 uint32_t *value)
542{
543 struct smu_table_context *smu_table= &smu->smu_table;
544 SmuMetrics_legacy_t *metrics =
545 (SmuMetrics_legacy_t *)smu_table->metrics_table;
546 int ret = 0;
547
da11407f
EQ
548 ret = smu_cmn_get_metrics_table(smu,
549 NULL,
550 false);
551 if (ret)
7d6c13ef 552 return ret;
7d6c13ef
EQ
553
554 switch (member) {
555 case METRICS_CURR_GFXCLK:
556 *value = metrics->CurrClock[PPCLK_GFXCLK];
557 break;
558 case METRICS_CURR_SOCCLK:
559 *value = metrics->CurrClock[PPCLK_SOCCLK];
560 break;
561 case METRICS_CURR_UCLK:
562 *value = metrics->CurrClock[PPCLK_UCLK];
563 break;
564 case METRICS_CURR_VCLK:
565 *value = metrics->CurrClock[PPCLK_VCLK];
566 break;
567 case METRICS_CURR_DCLK:
568 *value = metrics->CurrClock[PPCLK_DCLK];
569 break;
570 case METRICS_CURR_DCEFCLK:
571 *value = metrics->CurrClock[PPCLK_DCEFCLK];
572 break;
573 case METRICS_AVERAGE_GFXCLK:
574 *value = metrics->AverageGfxclkFrequency;
575 break;
576 case METRICS_AVERAGE_SOCCLK:
577 *value = metrics->AverageSocclkFrequency;
578 break;
579 case METRICS_AVERAGE_UCLK:
580 *value = metrics->AverageUclkFrequency;
581 break;
582 case METRICS_AVERAGE_GFXACTIVITY:
583 *value = metrics->AverageGfxActivity;
584 break;
585 case METRICS_AVERAGE_MEMACTIVITY:
586 *value = metrics->AverageUclkActivity;
587 break;
588 case METRICS_AVERAGE_SOCKETPOWER:
589 *value = metrics->AverageSocketPower << 8;
590 break;
591 case METRICS_TEMPERATURE_EDGE:
592 *value = metrics->TemperatureEdge *
593 SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
594 break;
595 case METRICS_TEMPERATURE_HOTSPOT:
596 *value = metrics->TemperatureHotspot *
597 SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
598 break;
599 case METRICS_TEMPERATURE_MEM:
600 *value = metrics->TemperatureMem *
601 SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
602 break;
603 case METRICS_TEMPERATURE_VRGFX:
604 *value = metrics->TemperatureVrGfx *
605 SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
606 break;
607 case METRICS_TEMPERATURE_VRSOC:
608 *value = metrics->TemperatureVrSoc *
609 SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
610 break;
611 case METRICS_THROTTLER_STATUS:
612 *value = metrics->ThrottlerStatus;
613 break;
614 case METRICS_CURR_FANSPEED:
615 *value = metrics->CurrFanSpeed;
616 break;
617 default:
618 *value = UINT_MAX;
619 break;
620 }
621
7d6c13ef
EQ
622 return ret;
623}
624
62d35163
EQ
625static int navi10_get_smu_metrics_data(struct smu_context *smu,
626 MetricsMember_t member,
627 uint32_t *value)
628{
629 struct smu_table_context *smu_table= &smu->smu_table;
7d6c13ef
EQ
630 SmuMetrics_t *metrics =
631 (SmuMetrics_t *)smu_table->metrics_table;
632 int ret = 0;
633
da11407f
EQ
634 ret = smu_cmn_get_metrics_table(smu,
635 NULL,
636 false);
637 if (ret)
7d6c13ef 638 return ret;
7d6c13ef
EQ
639
640 switch (member) {
641 case METRICS_CURR_GFXCLK:
642 *value = metrics->CurrClock[PPCLK_GFXCLK];
643 break;
644 case METRICS_CURR_SOCCLK:
645 *value = metrics->CurrClock[PPCLK_SOCCLK];
646 break;
647 case METRICS_CURR_UCLK:
648 *value = metrics->CurrClock[PPCLK_UCLK];
649 break;
650 case METRICS_CURR_VCLK:
651 *value = metrics->CurrClock[PPCLK_VCLK];
652 break;
653 case METRICS_CURR_DCLK:
654 *value = metrics->CurrClock[PPCLK_DCLK];
655 break;
656 case METRICS_CURR_DCEFCLK:
657 *value = metrics->CurrClock[PPCLK_DCEFCLK];
658 break;
659 case METRICS_AVERAGE_GFXCLK:
660 if (metrics->AverageGfxActivity > SMU_11_0_GFX_BUSY_THRESHOLD)
661 *value = metrics->AverageGfxclkFrequencyPreDs;
662 else
663 *value = metrics->AverageGfxclkFrequencyPostDs;
664 break;
665 case METRICS_AVERAGE_SOCCLK:
666 *value = metrics->AverageSocclkFrequency;
667 break;
668 case METRICS_AVERAGE_UCLK:
669 *value = metrics->AverageUclkFrequencyPostDs;
670 break;
671 case METRICS_AVERAGE_GFXACTIVITY:
672 *value = metrics->AverageGfxActivity;
673 break;
674 case METRICS_AVERAGE_MEMACTIVITY:
675 *value = metrics->AverageUclkActivity;
676 break;
677 case METRICS_AVERAGE_SOCKETPOWER:
678 *value = metrics->AverageSocketPower << 8;
679 break;
680 case METRICS_TEMPERATURE_EDGE:
681 *value = metrics->TemperatureEdge *
682 SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
683 break;
684 case METRICS_TEMPERATURE_HOTSPOT:
685 *value = metrics->TemperatureHotspot *
686 SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
687 break;
688 case METRICS_TEMPERATURE_MEM:
689 *value = metrics->TemperatureMem *
690 SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
691 break;
692 case METRICS_TEMPERATURE_VRGFX:
693 *value = metrics->TemperatureVrGfx *
694 SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
695 break;
696 case METRICS_TEMPERATURE_VRSOC:
697 *value = metrics->TemperatureVrSoc *
698 SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
699 break;
700 case METRICS_THROTTLER_STATUS:
701 *value = metrics->ThrottlerStatus;
702 break;
703 case METRICS_CURR_FANSPEED:
704 *value = metrics->CurrFanSpeed;
705 break;
706 default:
707 *value = UINT_MAX;
708 break;
709 }
710
7d6c13ef
EQ
711 return ret;
712}
713
714static int navi12_get_legacy_smu_metrics_data(struct smu_context *smu,
715 MetricsMember_t member,
716 uint32_t *value)
717{
718 struct smu_table_context *smu_table= &smu->smu_table;
719 SmuMetrics_NV12_legacy_t *metrics =
720 (SmuMetrics_NV12_legacy_t *)smu_table->metrics_table;
62d35163
EQ
721 int ret = 0;
722
da11407f
EQ
723 ret = smu_cmn_get_metrics_table(smu,
724 NULL,
725 false);
726 if (ret)
62d35163 727 return ret;
62d35163 728
cf24dd27
EQ
729 switch (member) {
730 case METRICS_CURR_GFXCLK:
731 *value = metrics->CurrClock[PPCLK_GFXCLK];
732 break;
733 case METRICS_CURR_SOCCLK:
734 *value = metrics->CurrClock[PPCLK_SOCCLK];
735 break;
736 case METRICS_CURR_UCLK:
737 *value = metrics->CurrClock[PPCLK_UCLK];
738 break;
739 case METRICS_CURR_VCLK:
740 *value = metrics->CurrClock[PPCLK_VCLK];
741 break;
742 case METRICS_CURR_DCLK:
743 *value = metrics->CurrClock[PPCLK_DCLK];
744 break;
9d09fa6f
ND
745 case METRICS_CURR_DCEFCLK:
746 *value = metrics->CurrClock[PPCLK_DCEFCLK];
747 break;
cf24dd27
EQ
748 case METRICS_AVERAGE_GFXCLK:
749 *value = metrics->AverageGfxclkFrequency;
750 break;
751 case METRICS_AVERAGE_SOCCLK:
752 *value = metrics->AverageSocclkFrequency;
753 break;
754 case METRICS_AVERAGE_UCLK:
755 *value = metrics->AverageUclkFrequency;
756 break;
757 case METRICS_AVERAGE_GFXACTIVITY:
758 *value = metrics->AverageGfxActivity;
759 break;
760 case METRICS_AVERAGE_MEMACTIVITY:
761 *value = metrics->AverageUclkActivity;
762 break;
763 case METRICS_AVERAGE_SOCKETPOWER:
764 *value = metrics->AverageSocketPower << 8;
765 break;
766 case METRICS_TEMPERATURE_EDGE:
767 *value = metrics->TemperatureEdge *
768 SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
769 break;
770 case METRICS_TEMPERATURE_HOTSPOT:
771 *value = metrics->TemperatureHotspot *
772 SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
773 break;
774 case METRICS_TEMPERATURE_MEM:
775 *value = metrics->TemperatureMem *
776 SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
777 break;
778 case METRICS_TEMPERATURE_VRGFX:
779 *value = metrics->TemperatureVrGfx *
780 SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
781 break;
782 case METRICS_TEMPERATURE_VRSOC:
783 *value = metrics->TemperatureVrSoc *
784 SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
785 break;
786 case METRICS_THROTTLER_STATUS:
787 *value = metrics->ThrottlerStatus;
788 break;
789 case METRICS_CURR_FANSPEED:
790 *value = metrics->CurrFanSpeed;
791 break;
792 default:
793 *value = UINT_MAX;
794 break;
795 }
796
b94afb61
KW
797 return ret;
798}
799
7d6c13ef
EQ
800static int navi12_get_smu_metrics_data(struct smu_context *smu,
801 MetricsMember_t member,
802 uint32_t *value)
803{
804 struct smu_table_context *smu_table= &smu->smu_table;
805 SmuMetrics_NV12_t *metrics =
806 (SmuMetrics_NV12_t *)smu_table->metrics_table;
807 int ret = 0;
808
da11407f
EQ
809 ret = smu_cmn_get_metrics_table(smu,
810 NULL,
811 false);
812 if (ret)
7d6c13ef 813 return ret;
7d6c13ef
EQ
814
815 switch (member) {
816 case METRICS_CURR_GFXCLK:
817 *value = metrics->CurrClock[PPCLK_GFXCLK];
818 break;
819 case METRICS_CURR_SOCCLK:
820 *value = metrics->CurrClock[PPCLK_SOCCLK];
821 break;
822 case METRICS_CURR_UCLK:
823 *value = metrics->CurrClock[PPCLK_UCLK];
824 break;
825 case METRICS_CURR_VCLK:
826 *value = metrics->CurrClock[PPCLK_VCLK];
827 break;
828 case METRICS_CURR_DCLK:
829 *value = metrics->CurrClock[PPCLK_DCLK];
830 break;
831 case METRICS_CURR_DCEFCLK:
832 *value = metrics->CurrClock[PPCLK_DCEFCLK];
833 break;
834 case METRICS_AVERAGE_GFXCLK:
835 if (metrics->AverageGfxActivity > SMU_11_0_GFX_BUSY_THRESHOLD)
836 *value = metrics->AverageGfxclkFrequencyPreDs;
837 else
838 *value = metrics->AverageGfxclkFrequencyPostDs;
839 break;
840 case METRICS_AVERAGE_SOCCLK:
841 *value = metrics->AverageSocclkFrequency;
842 break;
843 case METRICS_AVERAGE_UCLK:
844 *value = metrics->AverageUclkFrequencyPostDs;
845 break;
846 case METRICS_AVERAGE_GFXACTIVITY:
847 *value = metrics->AverageGfxActivity;
848 break;
849 case METRICS_AVERAGE_MEMACTIVITY:
850 *value = metrics->AverageUclkActivity;
851 break;
852 case METRICS_AVERAGE_SOCKETPOWER:
853 *value = metrics->AverageSocketPower << 8;
854 break;
855 case METRICS_TEMPERATURE_EDGE:
856 *value = metrics->TemperatureEdge *
857 SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
858 break;
859 case METRICS_TEMPERATURE_HOTSPOT:
860 *value = metrics->TemperatureHotspot *
861 SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
862 break;
863 case METRICS_TEMPERATURE_MEM:
864 *value = metrics->TemperatureMem *
865 SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
866 break;
867 case METRICS_TEMPERATURE_VRGFX:
868 *value = metrics->TemperatureVrGfx *
869 SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
870 break;
871 case METRICS_TEMPERATURE_VRSOC:
872 *value = metrics->TemperatureVrSoc *
873 SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
874 break;
875 case METRICS_THROTTLER_STATUS:
876 *value = metrics->ThrottlerStatus;
877 break;
878 case METRICS_CURR_FANSPEED:
879 *value = metrics->CurrFanSpeed;
880 break;
881 default:
882 *value = UINT_MAX;
883 break;
884 }
885
7d6c13ef
EQ
886 return ret;
887}
888
889static int navi1x_get_smu_metrics_data(struct smu_context *smu,
890 MetricsMember_t member,
891 uint32_t *value)
892{
893 struct amdgpu_device *adev = smu->adev;
894 uint32_t smu_version;
895 int ret = 0;
896
897 ret = smu_cmn_get_smc_version(smu, NULL, &smu_version);
898 if (ret) {
899 dev_err(adev->dev, "Failed to get smu version!\n");
900 return ret;
901 }
902
1d789535 903 switch (adev->ip_versions[MP1_HWIP][0]) {
ea0d730a 904 case IP_VERSION(11, 0, 9):
7d6c13ef
EQ
905 if (smu_version > 0x00341C00)
906 ret = navi12_get_smu_metrics_data(smu, member, value);
907 else
908 ret = navi12_get_legacy_smu_metrics_data(smu, member, value);
909 break;
ea0d730a
AD
910 case IP_VERSION(11, 0, 0):
911 case IP_VERSION(11, 0, 5):
7d6c13ef 912 default:
1d789535
AD
913 if (((adev->ip_versions[MP1_HWIP][0] == IP_VERSION(11, 0, 5)) && smu_version > 0x00351F00) ||
914 ((adev->ip_versions[MP1_HWIP][0] == IP_VERSION(11, 0, 0)) && smu_version > 0x002A3B00))
7d6c13ef
EQ
915 ret = navi10_get_smu_metrics_data(smu, member, value);
916 else
917 ret = navi10_get_legacy_smu_metrics_data(smu, member, value);
918 break;
919 }
920
921 return ret;
922}
923
b3490673
HR
924static int navi10_allocate_dpm_context(struct smu_context *smu)
925{
926 struct smu_dpm_context *smu_dpm = &smu->smu_dpm;
927
b3490673
HR
928 smu_dpm->dpm_context = kzalloc(sizeof(struct smu_11_0_dpm_context),
929 GFP_KERNEL);
930 if (!smu_dpm->dpm_context)
931 return -ENOMEM;
932
933 smu_dpm->dpm_context_size = sizeof(struct smu_11_0_dpm_context);
934
935 return 0;
936}
937
c1b353b7
EQ
938static int navi10_init_smc_tables(struct smu_context *smu)
939{
940 int ret = 0;
941
942 ret = navi10_tables_init(smu);
943 if (ret)
944 return ret;
945
946 ret = navi10_allocate_dpm_context(smu);
947 if (ret)
948 return ret;
949
950 return smu_v11_0_init_smc_tables(smu);
951}
952
b3490673
HR
953static int navi10_set_default_dpm_table(struct smu_context *smu)
954{
3afb244b
EQ
955 struct smu_11_0_dpm_context *dpm_context = smu->smu_dpm.dpm_context;
956 PPTable_t *driver_ppt = smu->smu_table.driver_pptable;
957 struct smu_11_0_dpm_table *dpm_table;
958 int ret = 0;
b3490673 959
3afb244b
EQ
960 /* socclk dpm table setup */
961 dpm_table = &dpm_context->dpm_tables.soc_table;
b4bb3aaf 962 if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_SOCCLK_BIT)) {
3afb244b
EQ
963 ret = smu_v11_0_set_single_dpm_table(smu,
964 SMU_SOCCLK,
965 dpm_table);
966 if (ret)
967 return ret;
968 dpm_table->is_fine_grained =
969 !driver_ppt->DpmDescriptor[PPCLK_SOCCLK].SnapToDiscrete;
970 } else {
971 dpm_table->count = 1;
972 dpm_table->dpm_levels[0].value = smu->smu_table.boot_values.socclk / 100;
973 dpm_table->dpm_levels[0].enabled = true;
974 dpm_table->min = dpm_table->dpm_levels[0].value;
975 dpm_table->max = dpm_table->dpm_levels[0].value;
976 }
b3490673 977
3afb244b
EQ
978 /* gfxclk dpm table setup */
979 dpm_table = &dpm_context->dpm_tables.gfx_table;
b4bb3aaf 980 if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_GFXCLK_BIT)) {
3afb244b
EQ
981 ret = smu_v11_0_set_single_dpm_table(smu,
982 SMU_GFXCLK,
983 dpm_table);
984 if (ret)
985 return ret;
986 dpm_table->is_fine_grained =
987 !driver_ppt->DpmDescriptor[PPCLK_GFXCLK].SnapToDiscrete;
988 } else {
989 dpm_table->count = 1;
990 dpm_table->dpm_levels[0].value = smu->smu_table.boot_values.gfxclk / 100;
991 dpm_table->dpm_levels[0].enabled = true;
992 dpm_table->min = dpm_table->dpm_levels[0].value;
993 dpm_table->max = dpm_table->dpm_levels[0].value;
994 }
b3490673 995
3afb244b
EQ
996 /* uclk dpm table setup */
997 dpm_table = &dpm_context->dpm_tables.uclk_table;
b4bb3aaf 998 if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_UCLK_BIT)) {
3afb244b
EQ
999 ret = smu_v11_0_set_single_dpm_table(smu,
1000 SMU_UCLK,
1001 dpm_table);
1002 if (ret)
1003 return ret;
1004 dpm_table->is_fine_grained =
1005 !driver_ppt->DpmDescriptor[PPCLK_UCLK].SnapToDiscrete;
1006 } else {
1007 dpm_table->count = 1;
1008 dpm_table->dpm_levels[0].value = smu->smu_table.boot_values.uclk / 100;
1009 dpm_table->dpm_levels[0].enabled = true;
1010 dpm_table->min = dpm_table->dpm_levels[0].value;
1011 dpm_table->max = dpm_table->dpm_levels[0].value;
1012 }
b3490673 1013
3afb244b
EQ
1014 /* vclk dpm table setup */
1015 dpm_table = &dpm_context->dpm_tables.vclk_table;
b4bb3aaf 1016 if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_VCN_PG_BIT)) {
3afb244b
EQ
1017 ret = smu_v11_0_set_single_dpm_table(smu,
1018 SMU_VCLK,
1019 dpm_table);
1020 if (ret)
1021 return ret;
1022 dpm_table->is_fine_grained =
1023 !driver_ppt->DpmDescriptor[PPCLK_VCLK].SnapToDiscrete;
1024 } else {
1025 dpm_table->count = 1;
1026 dpm_table->dpm_levels[0].value = smu->smu_table.boot_values.vclk / 100;
1027 dpm_table->dpm_levels[0].enabled = true;
1028 dpm_table->min = dpm_table->dpm_levels[0].value;
1029 dpm_table->max = dpm_table->dpm_levels[0].value;
1030 }
b3490673 1031
3afb244b
EQ
1032 /* dclk dpm table setup */
1033 dpm_table = &dpm_context->dpm_tables.dclk_table;
b4bb3aaf 1034 if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_VCN_PG_BIT)) {
3afb244b
EQ
1035 ret = smu_v11_0_set_single_dpm_table(smu,
1036 SMU_DCLK,
1037 dpm_table);
1038 if (ret)
1039 return ret;
1040 dpm_table->is_fine_grained =
1041 !driver_ppt->DpmDescriptor[PPCLK_DCLK].SnapToDiscrete;
1042 } else {
1043 dpm_table->count = 1;
1044 dpm_table->dpm_levels[0].value = smu->smu_table.boot_values.dclk / 100;
1045 dpm_table->dpm_levels[0].enabled = true;
1046 dpm_table->min = dpm_table->dpm_levels[0].value;
1047 dpm_table->max = dpm_table->dpm_levels[0].value;
1048 }
b3490673 1049
3afb244b
EQ
1050 /* dcefclk dpm table setup */
1051 dpm_table = &dpm_context->dpm_tables.dcef_table;
b4bb3aaf 1052 if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_DCEFCLK_BIT)) {
3afb244b
EQ
1053 ret = smu_v11_0_set_single_dpm_table(smu,
1054 SMU_DCEFCLK,
1055 dpm_table);
1056 if (ret)
1057 return ret;
1058 dpm_table->is_fine_grained =
1059 !driver_ppt->DpmDescriptor[PPCLK_DCEFCLK].SnapToDiscrete;
1060 } else {
1061 dpm_table->count = 1;
1062 dpm_table->dpm_levels[0].value = smu->smu_table.boot_values.dcefclk / 100;
1063 dpm_table->dpm_levels[0].enabled = true;
1064 dpm_table->min = dpm_table->dpm_levels[0].value;
1065 dpm_table->max = dpm_table->dpm_levels[0].value;
1066 }
b3490673 1067
3afb244b
EQ
1068 /* pixelclk dpm table setup */
1069 dpm_table = &dpm_context->dpm_tables.pixel_table;
b4bb3aaf 1070 if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_DCEFCLK_BIT)) {
3afb244b
EQ
1071 ret = smu_v11_0_set_single_dpm_table(smu,
1072 SMU_PIXCLK,
1073 dpm_table);
1074 if (ret)
1075 return ret;
1076 dpm_table->is_fine_grained =
1077 !driver_ppt->DpmDescriptor[PPCLK_PIXCLK].SnapToDiscrete;
1078 } else {
1079 dpm_table->count = 1;
1080 dpm_table->dpm_levels[0].value = smu->smu_table.boot_values.dcefclk / 100;
1081 dpm_table->dpm_levels[0].enabled = true;
1082 dpm_table->min = dpm_table->dpm_levels[0].value;
1083 dpm_table->max = dpm_table->dpm_levels[0].value;
1084 }
b3490673 1085
3afb244b
EQ
1086 /* displayclk dpm table setup */
1087 dpm_table = &dpm_context->dpm_tables.display_table;
b4bb3aaf 1088 if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_DCEFCLK_BIT)) {
3afb244b
EQ
1089 ret = smu_v11_0_set_single_dpm_table(smu,
1090 SMU_DISPCLK,
1091 dpm_table);
1092 if (ret)
1093 return ret;
1094 dpm_table->is_fine_grained =
1095 !driver_ppt->DpmDescriptor[PPCLK_DISPCLK].SnapToDiscrete;
1096 } else {
1097 dpm_table->count = 1;
1098 dpm_table->dpm_levels[0].value = smu->smu_table.boot_values.dcefclk / 100;
1099 dpm_table->dpm_levels[0].enabled = true;
1100 dpm_table->min = dpm_table->dpm_levels[0].value;
1101 dpm_table->max = dpm_table->dpm_levels[0].value;
1102 }
b3490673 1103
3afb244b
EQ
1104 /* phyclk dpm table setup */
1105 dpm_table = &dpm_context->dpm_tables.phy_table;
b4bb3aaf 1106 if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_DCEFCLK_BIT)) {
3afb244b
EQ
1107 ret = smu_v11_0_set_single_dpm_table(smu,
1108 SMU_PHYCLK,
1109 dpm_table);
1110 if (ret)
1111 return ret;
1112 dpm_table->is_fine_grained =
1113 !driver_ppt->DpmDescriptor[PPCLK_PHYCLK].SnapToDiscrete;
1114 } else {
1115 dpm_table->count = 1;
1116 dpm_table->dpm_levels[0].value = smu->smu_table.boot_values.dcefclk / 100;
1117 dpm_table->dpm_levels[0].enabled = true;
1118 dpm_table->min = dpm_table->dpm_levels[0].value;
1119 dpm_table->max = dpm_table->dpm_levels[0].value;
1120 }
b3490673
HR
1121
1122 return 0;
1123}
1124
f6b4b4a1 1125static int navi10_dpm_set_vcn_enable(struct smu_context *smu, bool enable)
a8179d62
KF
1126{
1127 int ret = 0;
5fa790f6
EQ
1128
1129 if (enable) {
706e5082 1130 /* vcn dpm on is a prerequisite for vcn power gate messages */
b4bb3aaf 1131 if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_VCN_PG_BIT)) {
66c86828 1132 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_PowerUpVcn, 1, NULL);
706e5082
EQ
1133 if (ret)
1134 return ret;
1135 }
a8179d62 1136 } else {
b4bb3aaf 1137 if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_VCN_PG_BIT)) {
66c86828 1138 ret = smu_cmn_send_smc_msg(smu, SMU_MSG_PowerDownVcn, NULL);
706e5082
EQ
1139 if (ret)
1140 return ret;
1141 }
a8179d62
KF
1142 }
1143
5fa790f6 1144 return ret;
a8179d62
KF
1145}
1146
43717ff6
LL
1147static int navi10_dpm_set_jpeg_enable(struct smu_context *smu, bool enable)
1148{
43717ff6
LL
1149 int ret = 0;
1150
1151 if (enable) {
b4bb3aaf 1152 if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_JPEG_PG_BIT)) {
66c86828 1153 ret = smu_cmn_send_smc_msg(smu, SMU_MSG_PowerUpJpeg, NULL);
43717ff6
LL
1154 if (ret)
1155 return ret;
1156 }
43717ff6 1157 } else {
b4bb3aaf 1158 if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_JPEG_PG_BIT)) {
66c86828 1159 ret = smu_cmn_send_smc_msg(smu, SMU_MSG_PowerDownJpeg, NULL);
43717ff6
LL
1160 if (ret)
1161 return ret;
1162 }
43717ff6
LL
1163 }
1164
1165 return ret;
1166}
1167
98e1a543
KW
1168static int navi10_get_current_clk_freq_by_table(struct smu_context *smu,
1169 enum smu_clk_type clk_type,
1170 uint32_t *value)
1171{
cf24dd27
EQ
1172 MetricsMember_t member_type;
1173 int clk_id = 0;
98e1a543 1174
6c339f37
EQ
1175 clk_id = smu_cmn_to_asic_specific_index(smu,
1176 CMN2ASIC_MAPPING_CLK,
1177 clk_type);
98e1a543
KW
1178 if (clk_id < 0)
1179 return clk_id;
1180
cf24dd27
EQ
1181 switch (clk_id) {
1182 case PPCLK_GFXCLK:
1183 member_type = METRICS_CURR_GFXCLK;
1184 break;
1185 case PPCLK_UCLK:
1186 member_type = METRICS_CURR_UCLK;
1187 break;
1188 case PPCLK_SOCCLK:
1189 member_type = METRICS_CURR_SOCCLK;
1190 break;
1191 case PPCLK_VCLK:
1192 member_type = METRICS_CURR_VCLK;
1193 break;
1194 case PPCLK_DCLK:
1195 member_type = METRICS_CURR_DCLK;
1196 break;
1197 case PPCLK_DCEFCLK:
1198 member_type = METRICS_CURR_DCEFCLK;
1199 break;
1200 default:
1201 return -EINVAL;
1202 }
98e1a543 1203
7d6c13ef 1204 return navi1x_get_smu_metrics_data(smu,
cf24dd27
EQ
1205 member_type,
1206 value);
98e1a543
KW
1207}
1208
c49b1b59
KW
1209static bool navi10_is_support_fine_grained_dpm(struct smu_context *smu, enum smu_clk_type clk_type)
1210{
1211 PPTable_t *pptable = smu->smu_table.driver_pptable;
1212 DpmDescriptor_t *dpm_desc = NULL;
1213 uint32_t clk_index = 0;
1214
6c339f37
EQ
1215 clk_index = smu_cmn_to_asic_specific_index(smu,
1216 CMN2ASIC_MAPPING_CLK,
1217 clk_type);
c49b1b59
KW
1218 dpm_desc = &pptable->DpmDescriptor[clk_index];
1219
1220 /* 0 - Fine grained DPM, 1 - Discrete DPM */
94576d03 1221 return dpm_desc->SnapToDiscrete == 0;
c49b1b59
KW
1222}
1223
e33a8cfd 1224static inline bool navi10_od_feature_is_supported(struct smu_11_0_overdrive_table *od_table, enum SMU_11_0_ODFEATURE_CAP cap)
7f3353f6 1225{
e33a8cfd 1226 return od_table->cap[cap];
7f3353f6
MC
1227}
1228
ee23a518
AD
1229static void navi10_od_setting_get_range(struct smu_11_0_overdrive_table *od_table,
1230 enum SMU_11_0_ODSETTING_ID setting,
1231 uint32_t *min, uint32_t *max)
1232{
1233 if (min)
1234 *min = od_table->min[setting];
1235 if (max)
1236 *max = od_table->max[setting];
1237}
7f3353f6 1238
b1e7e224
KW
1239static int navi10_print_clk_levels(struct smu_context *smu,
1240 enum smu_clk_type clk_type, char *buf)
1241{
7f3353f6 1242 uint16_t *curve_settings;
33155ce6 1243 int i, levels, size = 0, ret = 0;
b1e7e224 1244 uint32_t cur_value = 0, value = 0, count = 0;
c49b1b59
KW
1245 uint32_t freq_values[3] = {0};
1246 uint32_t mark_index = 0;
7f3353f6 1247 struct smu_table_context *table_context = &smu->smu_table;
fddbfb1c
KF
1248 uint32_t gen_speed, lane_width;
1249 struct smu_dpm_context *smu_dpm = &smu->smu_dpm;
1250 struct smu_11_0_dpm_context *dpm_context = smu_dpm->dpm_context;
fddbfb1c
KF
1251 PPTable_t *pptable = (PPTable_t *)table_context->driver_pptable;
1252 OverDriveTable_t *od_table =
1253 (OverDriveTable_t *)table_context->overdrive_table;
1254 struct smu_11_0_overdrive_table *od_settings = smu->od_settings;
ee23a518 1255 uint32_t min_value, max_value;
b1e7e224 1256
ee121f7e
LY
1257 smu_cmn_get_sysfs_buf(&buf, &size);
1258
b1e7e224
KW
1259 switch (clk_type) {
1260 case SMU_GFXCLK:
1261 case SMU_SCLK:
1262 case SMU_SOCCLK:
1263 case SMU_MCLK:
1264 case SMU_UCLK:
1265 case SMU_FCLK:
78842457
DN
1266 case SMU_VCLK:
1267 case SMU_DCLK:
b1e7e224 1268 case SMU_DCEFCLK:
5e6dc8fe 1269 ret = navi10_get_current_clk_freq_by_table(smu, clk_type, &cur_value);
b1e7e224
KW
1270 if (ret)
1271 return size;
c49b1b59 1272
d8d3493a 1273 ret = smu_v11_0_get_dpm_level_count(smu, clk_type, &count);
b1e7e224
KW
1274 if (ret)
1275 return size;
1276
c49b1b59
KW
1277 if (!navi10_is_support_fine_grained_dpm(smu, clk_type)) {
1278 for (i = 0; i < count; i++) {
d8d3493a 1279 ret = smu_v11_0_get_dpm_freq_by_index(smu, clk_type, i, &value);
c49b1b59
KW
1280 if (ret)
1281 return size;
1282
828db598 1283 size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n", i, value,
c49b1b59
KW
1284 cur_value == value ? "*" : "");
1285 }
1286 } else {
d8d3493a 1287 ret = smu_v11_0_get_dpm_freq_by_index(smu, clk_type, 0, &freq_values[0]);
c49b1b59
KW
1288 if (ret)
1289 return size;
d8d3493a 1290 ret = smu_v11_0_get_dpm_freq_by_index(smu, clk_type, count - 1, &freq_values[2]);
b1e7e224
KW
1291 if (ret)
1292 return size;
1293
c49b1b59
KW
1294 freq_values[1] = cur_value;
1295 mark_index = cur_value == freq_values[0] ? 0 :
1296 cur_value == freq_values[2] ? 2 : 1;
c49b1b59 1297
33155ce6
LL
1298 levels = 3;
1299 if (mark_index != 1) {
1300 levels = 2;
1301 freq_values[1] = freq_values[2];
1302 }
1303
1304 for (i = 0; i < levels; i++) {
828db598 1305 size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n", i, freq_values[i],
c49b1b59
KW
1306 i == mark_index ? "*" : "");
1307 }
b1e7e224
KW
1308 }
1309 break;
fddbfb1c 1310 case SMU_PCIE:
e4c9200d
EQ
1311 gen_speed = smu_v11_0_get_current_pcie_link_speed_level(smu);
1312 lane_width = smu_v11_0_get_current_pcie_link_width_level(smu);
fddbfb1c 1313 for (i = 0; i < NUM_LINK_LEVELS; i++)
828db598 1314 size += sysfs_emit_at(buf, size, "%d: %s %s %dMhz %s\n", i,
fddbfb1c
KF
1315 (dpm_context->dpm_tables.pcie_table.pcie_gen[i] == 0) ? "2.5GT/s," :
1316 (dpm_context->dpm_tables.pcie_table.pcie_gen[i] == 1) ? "5.0GT/s," :
1317 (dpm_context->dpm_tables.pcie_table.pcie_gen[i] == 2) ? "8.0GT/s," :
1318 (dpm_context->dpm_tables.pcie_table.pcie_gen[i] == 3) ? "16.0GT/s," : "",
1319 (dpm_context->dpm_tables.pcie_table.pcie_lane[i] == 1) ? "x1" :
1320 (dpm_context->dpm_tables.pcie_table.pcie_lane[i] == 2) ? "x2" :
1321 (dpm_context->dpm_tables.pcie_table.pcie_lane[i] == 3) ? "x4" :
1322 (dpm_context->dpm_tables.pcie_table.pcie_lane[i] == 4) ? "x8" :
1323 (dpm_context->dpm_tables.pcie_table.pcie_lane[i] == 5) ? "x12" :
1324 (dpm_context->dpm_tables.pcie_table.pcie_lane[i] == 6) ? "x16" : "",
1325 pptable->LclkFreq[i],
1326 (gen_speed == dpm_context->dpm_tables.pcie_table.pcie_gen[i]) &&
1327 (lane_width == dpm_context->dpm_tables.pcie_table.pcie_lane[i]) ?
1328 "*" : "");
1329 break;
7f3353f6
MC
1330 case SMU_OD_SCLK:
1331 if (!smu->od_enabled || !od_table || !od_settings)
1332 break;
e33a8cfd 1333 if (!navi10_od_feature_is_supported(od_settings, SMU_11_0_ODCAP_GFXCLK_LIMITS))
7f3353f6 1334 break;
828db598
DP
1335 size += sysfs_emit_at(buf, size, "OD_SCLK:\n");
1336 size += sysfs_emit_at(buf, size, "0: %uMhz\n1: %uMhz\n",
1337 od_table->GfxclkFmin, od_table->GfxclkFmax);
7f3353f6
MC
1338 break;
1339 case SMU_OD_MCLK:
1340 if (!smu->od_enabled || !od_table || !od_settings)
1341 break;
e33a8cfd 1342 if (!navi10_od_feature_is_supported(od_settings, SMU_11_0_ODCAP_UCLK_MAX))
7f3353f6 1343 break;
828db598
DP
1344 size += sysfs_emit_at(buf, size, "OD_MCLK:\n");
1345 size += sysfs_emit_at(buf, size, "1: %uMHz\n", od_table->UclkFmax);
7f3353f6
MC
1346 break;
1347 case SMU_OD_VDDC_CURVE:
1348 if (!smu->od_enabled || !od_table || !od_settings)
1349 break;
e33a8cfd 1350 if (!navi10_od_feature_is_supported(od_settings, SMU_11_0_ODCAP_GFXCLK_CURVE))
7f3353f6 1351 break;
828db598 1352 size += sysfs_emit_at(buf, size, "OD_VDDC_CURVE:\n");
7f3353f6
MC
1353 for (i = 0; i < 3; i++) {
1354 switch (i) {
1355 case 0:
1356 curve_settings = &od_table->GfxclkFreq1;
1357 break;
1358 case 1:
1359 curve_settings = &od_table->GfxclkFreq2;
1360 break;
1361 case 2:
1362 curve_settings = &od_table->GfxclkFreq3;
1363 break;
1364 default:
1365 break;
1366 }
828db598
DP
1367 size += sysfs_emit_at(buf, size, "%d: %uMHz %umV\n",
1368 i, curve_settings[0],
1369 curve_settings[1] / NAVI10_VOLTAGE_SCALE);
7f3353f6 1370 }
ee23a518
AD
1371 break;
1372 case SMU_OD_RANGE:
1373 if (!smu->od_enabled || !od_table || !od_settings)
1374 break;
ee121f7e 1375 size += sysfs_emit_at(buf, size, "%s:\n", "OD_RANGE");
ee23a518 1376
e33a8cfd 1377 if (navi10_od_feature_is_supported(od_settings, SMU_11_0_ODCAP_GFXCLK_LIMITS)) {
ee23a518
AD
1378 navi10_od_setting_get_range(od_settings, SMU_11_0_ODSETTING_GFXCLKFMIN,
1379 &min_value, NULL);
1380 navi10_od_setting_get_range(od_settings, SMU_11_0_ODSETTING_GFXCLKFMAX,
1381 NULL, &max_value);
828db598 1382 size += sysfs_emit_at(buf, size, "SCLK: %7uMhz %10uMhz\n",
ee23a518
AD
1383 min_value, max_value);
1384 }
1385
e33a8cfd 1386 if (navi10_od_feature_is_supported(od_settings, SMU_11_0_ODCAP_UCLK_MAX)) {
ee23a518
AD
1387 navi10_od_setting_get_range(od_settings, SMU_11_0_ODSETTING_UCLKFMAX,
1388 &min_value, &max_value);
828db598 1389 size += sysfs_emit_at(buf, size, "MCLK: %7uMhz %10uMhz\n",
ee23a518
AD
1390 min_value, max_value);
1391 }
1392
e33a8cfd 1393 if (navi10_od_feature_is_supported(od_settings, SMU_11_0_ODCAP_GFXCLK_CURVE)) {
ee23a518
AD
1394 navi10_od_setting_get_range(od_settings, SMU_11_0_ODSETTING_VDDGFXCURVEFREQ_P1,
1395 &min_value, &max_value);
828db598
DP
1396 size += sysfs_emit_at(buf, size, "VDDC_CURVE_SCLK[0]: %7uMhz %10uMhz\n",
1397 min_value, max_value);
ee23a518
AD
1398 navi10_od_setting_get_range(od_settings, SMU_11_0_ODSETTING_VDDGFXCURVEVOLTAGE_P1,
1399 &min_value, &max_value);
828db598
DP
1400 size += sysfs_emit_at(buf, size, "VDDC_CURVE_VOLT[0]: %7dmV %11dmV\n",
1401 min_value, max_value);
ee23a518
AD
1402 navi10_od_setting_get_range(od_settings, SMU_11_0_ODSETTING_VDDGFXCURVEFREQ_P2,
1403 &min_value, &max_value);
828db598
DP
1404 size += sysfs_emit_at(buf, size, "VDDC_CURVE_SCLK[1]: %7uMhz %10uMhz\n",
1405 min_value, max_value);
ee23a518
AD
1406 navi10_od_setting_get_range(od_settings, SMU_11_0_ODSETTING_VDDGFXCURVEVOLTAGE_P2,
1407 &min_value, &max_value);
828db598
DP
1408 size += sysfs_emit_at(buf, size, "VDDC_CURVE_VOLT[1]: %7dmV %11dmV\n",
1409 min_value, max_value);
ee23a518
AD
1410 navi10_od_setting_get_range(od_settings, SMU_11_0_ODSETTING_VDDGFXCURVEFREQ_P3,
1411 &min_value, &max_value);
828db598
DP
1412 size += sysfs_emit_at(buf, size, "VDDC_CURVE_SCLK[2]: %7uMhz %10uMhz\n",
1413 min_value, max_value);
ee23a518
AD
1414 navi10_od_setting_get_range(od_settings, SMU_11_0_ODSETTING_VDDGFXCURVEVOLTAGE_P3,
1415 &min_value, &max_value);
828db598
DP
1416 size += sysfs_emit_at(buf, size, "VDDC_CURVE_VOLT[2]: %7dmV %11dmV\n",
1417 min_value, max_value);
ee23a518
AD
1418 }
1419
7f3353f6 1420 break;
b1e7e224
KW
1421 default:
1422 break;
1423 }
1424
1425 return size;
1426}
1427
db439ca2
KW
1428static int navi10_force_clk_levels(struct smu_context *smu,
1429 enum smu_clk_type clk_type, uint32_t mask)
1430{
1431
1432 int ret = 0, size = 0;
1433 uint32_t soft_min_level = 0, soft_max_level = 0, min_freq = 0, max_freq = 0;
1434
1435 soft_min_level = mask ? (ffs(mask) - 1) : 0;
1436 soft_max_level = mask ? (fls(mask) - 1) : 0;
1437
1438 switch (clk_type) {
1439 case SMU_GFXCLK:
c0b9d6d2 1440 case SMU_SCLK:
db439ca2
KW
1441 case SMU_SOCCLK:
1442 case SMU_MCLK:
1443 case SMU_UCLK:
db439ca2 1444 case SMU_FCLK:
09ba2e7d
EQ
1445 /* There is only 2 levels for fine grained DPM */
1446 if (navi10_is_support_fine_grained_dpm(smu, clk_type)) {
1447 soft_max_level = (soft_max_level >= 1 ? 1 : 0);
1448 soft_min_level = (soft_min_level >= 1 ? 1 : 0);
1449 }
1450
d8d3493a 1451 ret = smu_v11_0_get_dpm_freq_by_index(smu, clk_type, soft_min_level, &min_freq);
db439ca2
KW
1452 if (ret)
1453 return size;
1454
d8d3493a 1455 ret = smu_v11_0_get_dpm_freq_by_index(smu, clk_type, soft_max_level, &max_freq);
db439ca2
KW
1456 if (ret)
1457 return size;
1458
c98f31d1 1459 ret = smu_v11_0_set_soft_freq_limited_range(smu, clk_type, min_freq, max_freq);
db439ca2
KW
1460 if (ret)
1461 return size;
1462 break;
b117b396
DP
1463 case SMU_DCEFCLK:
1464 dev_info(smu->adev->dev,"Setting DCEFCLK min/max dpm level is not supported!\n");
1465 break;
1466
db439ca2
KW
1467 default:
1468 break;
1469 }
1470
1471 return size;
1472}
1473
fa51bfc2
KW
1474static int navi10_populate_umd_state_clk(struct smu_context *smu)
1475{
62cc9dd1
EQ
1476 struct smu_11_0_dpm_context *dpm_context =
1477 smu->smu_dpm.dpm_context;
1478 struct smu_11_0_dpm_table *gfx_table =
1479 &dpm_context->dpm_tables.gfx_table;
1480 struct smu_11_0_dpm_table *mem_table =
1481 &dpm_context->dpm_tables.uclk_table;
1482 struct smu_11_0_dpm_table *soc_table =
1483 &dpm_context->dpm_tables.soc_table;
1484 struct smu_umd_pstate_table *pstate_table =
1485 &smu->pstate_table;
1486 struct amdgpu_device *adev = smu->adev;
1487 uint32_t sclk_freq;
1488
1489 pstate_table->gfxclk_pstate.min = gfx_table->min;
1d789535 1490 switch (adev->ip_versions[MP1_HWIP][0]) {
ea0d730a 1491 case IP_VERSION(11, 0, 0):
62cc9dd1
EQ
1492 switch (adev->pdev->revision) {
1493 case 0xf0: /* XTX */
1494 case 0xc0:
1495 sclk_freq = NAVI10_PEAK_SCLK_XTX;
1496 break;
1497 case 0xf1: /* XT */
1498 case 0xc1:
1499 sclk_freq = NAVI10_PEAK_SCLK_XT;
1500 break;
1501 default: /* XL */
1502 sclk_freq = NAVI10_PEAK_SCLK_XL;
1503 break;
1504 }
1505 break;
ea0d730a 1506 case IP_VERSION(11, 0, 5):
62cc9dd1
EQ
1507 switch (adev->pdev->revision) {
1508 case 0xc7: /* XT */
1509 case 0xf4:
1510 sclk_freq = NAVI14_UMD_PSTATE_PEAK_XT_GFXCLK;
1511 break;
1512 case 0xc1: /* XTM */
1513 case 0xf2:
1514 sclk_freq = NAVI14_UMD_PSTATE_PEAK_XTM_GFXCLK;
1515 break;
1516 case 0xc3: /* XLM */
1517 case 0xf3:
1518 sclk_freq = NAVI14_UMD_PSTATE_PEAK_XLM_GFXCLK;
1519 break;
1520 case 0xc5: /* XTX */
1521 case 0xf6:
1522 sclk_freq = NAVI14_UMD_PSTATE_PEAK_XLM_GFXCLK;
1523 break;
1524 default: /* XL */
1525 sclk_freq = NAVI14_UMD_PSTATE_PEAK_XL_GFXCLK;
1526 break;
1527 }
1528 break;
ea0d730a 1529 case IP_VERSION(11, 0, 9):
62cc9dd1
EQ
1530 sclk_freq = NAVI12_UMD_PSTATE_PEAK_GFXCLK;
1531 break;
1532 default:
1533 sclk_freq = gfx_table->dpm_levels[gfx_table->count - 1].value;
1534 break;
1535 }
1536 pstate_table->gfxclk_pstate.peak = sclk_freq;
1537
1538 pstate_table->uclk_pstate.min = mem_table->min;
1539 pstate_table->uclk_pstate.peak = mem_table->max;
1540
1541 pstate_table->socclk_pstate.min = soc_table->min;
1542 pstate_table->socclk_pstate.peak = soc_table->max;
1543
1544 if (gfx_table->max > NAVI10_UMD_PSTATE_PROFILING_GFXCLK &&
1545 mem_table->max > NAVI10_UMD_PSTATE_PROFILING_MEMCLK &&
1546 soc_table->max > NAVI10_UMD_PSTATE_PROFILING_SOCCLK) {
1547 pstate_table->gfxclk_pstate.standard =
1548 NAVI10_UMD_PSTATE_PROFILING_GFXCLK;
1549 pstate_table->uclk_pstate.standard =
1550 NAVI10_UMD_PSTATE_PROFILING_MEMCLK;
1551 pstate_table->socclk_pstate.standard =
1552 NAVI10_UMD_PSTATE_PROFILING_SOCCLK;
1553 } else {
1554 pstate_table->gfxclk_pstate.standard =
1555 pstate_table->gfxclk_pstate.min;
1556 pstate_table->uclk_pstate.standard =
1557 pstate_table->uclk_pstate.min;
1558 pstate_table->socclk_pstate.standard =
1559 pstate_table->socclk_pstate.min;
1560 }
64974ab2 1561
62cc9dd1 1562 return 0;
fa51bfc2
KW
1563}
1564
a43913ea
KW
1565static int navi10_get_clock_by_type_with_latency(struct smu_context *smu,
1566 enum smu_clk_type clk_type,
1567 struct pp_clock_levels_with_latency *clocks)
1568{
1569 int ret = 0, i = 0;
1570 uint32_t level_count = 0, freq = 0;
1571
1572 switch (clk_type) {
1573 case SMU_GFXCLK:
1574 case SMU_DCEFCLK:
1575 case SMU_SOCCLK:
e0d5322c
AD
1576 case SMU_MCLK:
1577 case SMU_UCLK:
d8d3493a 1578 ret = smu_v11_0_get_dpm_level_count(smu, clk_type, &level_count);
a43913ea
KW
1579 if (ret)
1580 return ret;
1581
1582 level_count = min(level_count, (uint32_t)MAX_NUM_CLOCKS);
1583 clocks->num_levels = level_count;
1584
1585 for (i = 0; i < level_count; i++) {
d8d3493a 1586 ret = smu_v11_0_get_dpm_freq_by_index(smu, clk_type, i, &freq);
a43913ea
KW
1587 if (ret)
1588 return ret;
1589
1590 clocks->data[i].clocks_in_khz = freq * 1000;
1591 clocks->data[i].latency_in_us = 0;
1592 }
1593 break;
1594 default:
1595 break;
1596 }
1597
1598 return ret;
1599}
1600
28430544
KW
1601static int navi10_pre_display_config_changed(struct smu_context *smu)
1602{
1603 int ret = 0;
1604 uint32_t max_freq = 0;
1605
66c86828 1606 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_NumOfDisplays, 0, NULL);
28430544
KW
1607 if (ret)
1608 return ret;
1609
b4bb3aaf 1610 if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_UCLK_BIT)) {
e5ef784b 1611 ret = smu_v11_0_get_dpm_ultimate_freq(smu, SMU_UCLK, NULL, &max_freq);
28430544
KW
1612 if (ret)
1613 return ret;
661b94f5 1614 ret = smu_v11_0_set_hard_freq_limited_range(smu, SMU_UCLK, 0, max_freq);
28430544
KW
1615 if (ret)
1616 return ret;
1617 }
1618
1619 return ret;
1620}
1621
0a6430da
KW
1622static int navi10_display_config_changed(struct smu_context *smu)
1623{
1624 int ret = 0;
1625
0a6430da 1626 if ((smu->watermarks_bitmap & WATERMARKS_EXIST) &&
7ade3ca9
EQ
1627 smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_DCEFCLK_BIT) &&
1628 smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_SOCCLK_BIT)) {
66c86828 1629 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_NumOfDisplays,
1c58267c
MC
1630 smu->display_config->num_display,
1631 NULL);
0a6430da
KW
1632 if (ret)
1633 return ret;
1634 }
1635
1636 return ret;
1637}
50add63b 1638
4228b601
KW
1639static bool navi10_is_dpm_running(struct smu_context *smu)
1640{
1641 int ret = 0;
3d14a79b
KW
1642 uint64_t feature_enabled;
1643
2d282665 1644 ret = smu_cmn_get_enabled_mask(smu, &feature_enabled);
3d14a79b
KW
1645 if (ret)
1646 return false;
1647
4228b601
KW
1648 return !!(feature_enabled & SMC_DPM_FEATURE);
1649}
1650
d9ca7567
EQ
1651static int navi10_get_fan_speed_rpm(struct smu_context *smu,
1652 uint32_t *speed)
1653{
1654 int ret = 0;
1655
1656 if (!speed)
1657 return -EINVAL;
1658
1659 switch (smu_v11_0_get_fan_control_mode(smu)) {
1660 case AMD_FAN_CTRL_AUTO:
1661 ret = navi10_get_smu_metrics_data(smu,
1662 METRICS_CURR_FANSPEED,
1663 speed);
1664 break;
1665 default:
1666 ret = smu_v11_0_get_fan_speed_rpm(smu,
1667 speed);
1668 break;
1669 }
1670
1671 return ret;
1672}
1673
3204ff3e
AD
1674static int navi10_get_fan_parameters(struct smu_context *smu)
1675{
1676 PPTable_t *pptable = smu->smu_table.driver_pptable;
1677
1678 smu->fan_max_rpm = pptable->FanMaximumRpm;
1679
1680 return 0;
1681}
1682
b45dc20b
KW
1683static int navi10_get_power_profile_mode(struct smu_context *smu, char *buf)
1684{
1685 DpmActivityMonitorCoeffInt_t activity_monitor;
1686 uint32_t i, size = 0;
c0640304 1687 int16_t workload_type = 0;
b45dc20b
KW
1688 static const char *title[] = {
1689 "PROFILE_INDEX(NAME)",
1690 "CLOCK_TYPE(NAME)",
1691 "FPS",
1692 "MinFreqType",
1693 "MinActiveFreqType",
1694 "MinActiveFreq",
1695 "BoosterFreqType",
1696 "BoosterFreq",
1697 "PD_Data_limit_c",
1698 "PD_Data_error_coeff",
1699 "PD_Data_error_rate_coeff"};
1700 int result = 0;
1701
1702 if (!buf)
1703 return -EINVAL;
1704
828db598 1705 size += sysfs_emit_at(buf, size, "%16s %s %s %s %s %s %s %s %s %s %s\n",
b45dc20b
KW
1706 title[0], title[1], title[2], title[3], title[4], title[5],
1707 title[6], title[7], title[8], title[9], title[10]);
1708
1709 for (i = 0; i <= PP_SMC_POWER_PROFILE_CUSTOM; i++) {
1710 /* conv PP_SMC_POWER_PROFILE* to WORKLOAD_PPLIB_*_BIT */
6c339f37
EQ
1711 workload_type = smu_cmn_to_asic_specific_index(smu,
1712 CMN2ASIC_MAPPING_WORKLOAD,
1713 i);
c0640304
EQ
1714 if (workload_type < 0)
1715 return -EINVAL;
1716
caad2613 1717 result = smu_cmn_update_table(smu,
0d9d78b5 1718 SMU_TABLE_ACTIVITY_MONITOR_COEFF, workload_type,
b45dc20b
KW
1719 (void *)(&activity_monitor), false);
1720 if (result) {
d9811cfc 1721 dev_err(smu->adev->dev, "[%s] Failed to get activity monitor!", __func__);
b45dc20b
KW
1722 return result;
1723 }
1724
828db598 1725 size += sysfs_emit_at(buf, size, "%2d %14s%s:\n",
3867e370 1726 i, amdgpu_pp_profile_name[i], (i == smu->power_profile_mode) ? "*" : " ");
b45dc20b 1727
828db598 1728 size += sysfs_emit_at(buf, size, "%19s %d(%13s) %7d %7d %7d %7d %7d %7d %7d %7d %7d\n",
b45dc20b
KW
1729 " ",
1730 0,
1731 "GFXCLK",
1732 activity_monitor.Gfx_FPS,
1733 activity_monitor.Gfx_MinFreqStep,
1734 activity_monitor.Gfx_MinActiveFreqType,
1735 activity_monitor.Gfx_MinActiveFreq,
1736 activity_monitor.Gfx_BoosterFreqType,
1737 activity_monitor.Gfx_BoosterFreq,
1738 activity_monitor.Gfx_PD_Data_limit_c,
1739 activity_monitor.Gfx_PD_Data_error_coeff,
1740 activity_monitor.Gfx_PD_Data_error_rate_coeff);
1741
828db598 1742 size += sysfs_emit_at(buf, size, "%19s %d(%13s) %7d %7d %7d %7d %7d %7d %7d %7d %7d\n",
b45dc20b
KW
1743 " ",
1744 1,
1745 "SOCCLK",
1746 activity_monitor.Soc_FPS,
1747 activity_monitor.Soc_MinFreqStep,
1748 activity_monitor.Soc_MinActiveFreqType,
1749 activity_monitor.Soc_MinActiveFreq,
1750 activity_monitor.Soc_BoosterFreqType,
1751 activity_monitor.Soc_BoosterFreq,
1752 activity_monitor.Soc_PD_Data_limit_c,
1753 activity_monitor.Soc_PD_Data_error_coeff,
1754 activity_monitor.Soc_PD_Data_error_rate_coeff);
1755
828db598 1756 size += sysfs_emit_at(buf, size, "%19s %d(%13s) %7d %7d %7d %7d %7d %7d %7d %7d %7d\n",
b45dc20b
KW
1757 " ",
1758 2,
1759 "MEMLK",
1760 activity_monitor.Mem_FPS,
1761 activity_monitor.Mem_MinFreqStep,
1762 activity_monitor.Mem_MinActiveFreqType,
1763 activity_monitor.Mem_MinActiveFreq,
1764 activity_monitor.Mem_BoosterFreqType,
1765 activity_monitor.Mem_BoosterFreq,
1766 activity_monitor.Mem_PD_Data_limit_c,
1767 activity_monitor.Mem_PD_Data_error_coeff,
1768 activity_monitor.Mem_PD_Data_error_rate_coeff);
1769 }
1770
1771 return size;
1772}
1773
1774static int navi10_set_power_profile_mode(struct smu_context *smu, long *input, uint32_t size)
1775{
1776 DpmActivityMonitorCoeffInt_t activity_monitor;
1777 int workload_type, ret = 0;
1778
1779 smu->power_profile_mode = input[size];
1780
1781 if (smu->power_profile_mode > PP_SMC_POWER_PROFILE_CUSTOM) {
d9811cfc 1782 dev_err(smu->adev->dev, "Invalid power profile mode %d\n", smu->power_profile_mode);
b45dc20b
KW
1783 return -EINVAL;
1784 }
1785
1786 if (smu->power_profile_mode == PP_SMC_POWER_PROFILE_CUSTOM) {
b45dc20b 1787
caad2613 1788 ret = smu_cmn_update_table(smu,
0d9d78b5 1789 SMU_TABLE_ACTIVITY_MONITOR_COEFF, WORKLOAD_PPLIB_CUSTOM_BIT,
b45dc20b
KW
1790 (void *)(&activity_monitor), false);
1791 if (ret) {
d9811cfc 1792 dev_err(smu->adev->dev, "[%s] Failed to get activity monitor!", __func__);
b45dc20b
KW
1793 return ret;
1794 }
1795
1796 switch (input[0]) {
1797 case 0: /* Gfxclk */
1798 activity_monitor.Gfx_FPS = input[1];
1799 activity_monitor.Gfx_MinFreqStep = input[2];
1800 activity_monitor.Gfx_MinActiveFreqType = input[3];
1801 activity_monitor.Gfx_MinActiveFreq = input[4];
1802 activity_monitor.Gfx_BoosterFreqType = input[5];
1803 activity_monitor.Gfx_BoosterFreq = input[6];
1804 activity_monitor.Gfx_PD_Data_limit_c = input[7];
1805 activity_monitor.Gfx_PD_Data_error_coeff = input[8];
1806 activity_monitor.Gfx_PD_Data_error_rate_coeff = input[9];
1807 break;
1808 case 1: /* Socclk */
1809 activity_monitor.Soc_FPS = input[1];
1810 activity_monitor.Soc_MinFreqStep = input[2];
1811 activity_monitor.Soc_MinActiveFreqType = input[3];
1812 activity_monitor.Soc_MinActiveFreq = input[4];
1813 activity_monitor.Soc_BoosterFreqType = input[5];
1814 activity_monitor.Soc_BoosterFreq = input[6];
1815 activity_monitor.Soc_PD_Data_limit_c = input[7];
1816 activity_monitor.Soc_PD_Data_error_coeff = input[8];
1817 activity_monitor.Soc_PD_Data_error_rate_coeff = input[9];
1818 break;
1819 case 2: /* Memlk */
1820 activity_monitor.Mem_FPS = input[1];
1821 activity_monitor.Mem_MinFreqStep = input[2];
1822 activity_monitor.Mem_MinActiveFreqType = input[3];
1823 activity_monitor.Mem_MinActiveFreq = input[4];
1824 activity_monitor.Mem_BoosterFreqType = input[5];
1825 activity_monitor.Mem_BoosterFreq = input[6];
1826 activity_monitor.Mem_PD_Data_limit_c = input[7];
1827 activity_monitor.Mem_PD_Data_error_coeff = input[8];
1828 activity_monitor.Mem_PD_Data_error_rate_coeff = input[9];
1829 break;
1830 }
1831
caad2613 1832 ret = smu_cmn_update_table(smu,
0d9d78b5 1833 SMU_TABLE_ACTIVITY_MONITOR_COEFF, WORKLOAD_PPLIB_CUSTOM_BIT,
b45dc20b
KW
1834 (void *)(&activity_monitor), true);
1835 if (ret) {
d9811cfc 1836 dev_err(smu->adev->dev, "[%s] Failed to set activity monitor!", __func__);
b45dc20b
KW
1837 return ret;
1838 }
1839 }
1840
1841 /* conv PP_SMC_POWER_PROFILE* to WORKLOAD_PPLIB_*_BIT */
6c339f37
EQ
1842 workload_type = smu_cmn_to_asic_specific_index(smu,
1843 CMN2ASIC_MAPPING_WORKLOAD,
1844 smu->power_profile_mode);
c0640304
EQ
1845 if (workload_type < 0)
1846 return -EINVAL;
66c86828 1847 smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetWorkloadMask,
1c58267c 1848 1 << workload_type, NULL);
b45dc20b
KW
1849
1850 return ret;
1851}
1852
19796597 1853static int navi10_notify_smc_display_config(struct smu_context *smu)
4f963b01
KW
1854{
1855 struct smu_clocks min_clocks = {0};
1856 struct pp_display_clock_request clock_req;
1857 int ret = 0;
1858
1859 min_clocks.dcef_clock = smu->display_config->min_dcef_set_clk;
1860 min_clocks.dcef_clock_in_sr = smu->display_config->min_dcef_deep_sleep_set_clk;
1861 min_clocks.memory_clock = smu->display_config->min_mem_set_clock;
1862
7ade3ca9 1863 if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_DCEFCLK_BIT)) {
4f963b01
KW
1864 clock_req.clock_type = amd_pp_dcef_clock;
1865 clock_req.clock_freq_in_khz = min_clocks.dcef_clock * 10;
3697b339 1866
6c45e480 1867 ret = smu_v11_0_display_clock_voltage_request(smu, &clock_req);
3697b339 1868 if (!ret) {
7ade3ca9 1869 if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DS_DCEFCLK_BIT)) {
66c86828 1870 ret = smu_cmn_send_smc_msg_with_param(smu,
4f963b01 1871 SMU_MSG_SetMinDeepSleepDcefclk,
1c58267c
MC
1872 min_clocks.dcef_clock_in_sr/100,
1873 NULL);
4f963b01 1874 if (ret) {
d9811cfc 1875 dev_err(smu->adev->dev, "Attempt to set divider for DCEFCLK Failed!");
4f963b01
KW
1876 return ret;
1877 }
1878 }
1879 } else {
d9811cfc 1880 dev_info(smu->adev->dev, "Attempt to set Hard Min for DCEFCLK Failed!");
4f963b01
KW
1881 }
1882 }
1883
b4bb3aaf 1884 if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_UCLK_BIT)) {
661b94f5 1885 ret = smu_v11_0_set_hard_freq_limited_range(smu, SMU_UCLK, min_clocks.memory_clock/100, 0);
4f963b01 1886 if (ret) {
d9811cfc 1887 dev_err(smu->adev->dev, "[%s] Set hard min uclk failed!", __func__);
4f963b01
KW
1888 return ret;
1889 }
1890 }
1891
1892 return 0;
1893}
1894
5bbb0994 1895static int navi10_set_watermarks_table(struct smu_context *smu,
7b9c7e30 1896 struct pp_smu_wm_range_sets *clock_ranges)
5bbb0994 1897{
e7a95eea 1898 Watermarks_t *table = smu->smu_table.watermarks_table;
2622e2ae 1899 int ret = 0;
e7a95eea 1900 int i;
5bbb0994 1901
e7a95eea 1902 if (clock_ranges) {
7b9c7e30
EQ
1903 if (clock_ranges->num_reader_wm_sets > NUM_WM_RANGES ||
1904 clock_ranges->num_writer_wm_sets > NUM_WM_RANGES)
e7a95eea 1905 return -EINVAL;
5bbb0994 1906
7b9c7e30
EQ
1907 for (i = 0; i < clock_ranges->num_reader_wm_sets; i++) {
1908 table->WatermarkRow[WM_DCEFCLK][i].MinClock =
1909 clock_ranges->reader_wm_sets[i].min_drain_clk_mhz;
1910 table->WatermarkRow[WM_DCEFCLK][i].MaxClock =
1911 clock_ranges->reader_wm_sets[i].max_drain_clk_mhz;
1912 table->WatermarkRow[WM_DCEFCLK][i].MinUclk =
1913 clock_ranges->reader_wm_sets[i].min_fill_clk_mhz;
1914 table->WatermarkRow[WM_DCEFCLK][i].MaxUclk =
1915 clock_ranges->reader_wm_sets[i].max_fill_clk_mhz;
1916
1917 table->WatermarkRow[WM_DCEFCLK][i].WmSetting =
1918 clock_ranges->reader_wm_sets[i].wm_inst;
e7a95eea 1919 }
5bbb0994 1920
7b9c7e30
EQ
1921 for (i = 0; i < clock_ranges->num_writer_wm_sets; i++) {
1922 table->WatermarkRow[WM_SOCCLK][i].MinClock =
1923 clock_ranges->writer_wm_sets[i].min_fill_clk_mhz;
1924 table->WatermarkRow[WM_SOCCLK][i].MaxClock =
1925 clock_ranges->writer_wm_sets[i].max_fill_clk_mhz;
1926 table->WatermarkRow[WM_SOCCLK][i].MinUclk =
1927 clock_ranges->writer_wm_sets[i].min_drain_clk_mhz;
1928 table->WatermarkRow[WM_SOCCLK][i].MaxUclk =
1929 clock_ranges->writer_wm_sets[i].max_drain_clk_mhz;
1930
1931 table->WatermarkRow[WM_SOCCLK][i].WmSetting =
1932 clock_ranges->writer_wm_sets[i].wm_inst;
e7a95eea 1933 }
5bbb0994 1934
e7a95eea 1935 smu->watermarks_bitmap |= WATERMARKS_EXIST;
5bbb0994
KW
1936 }
1937
2622e2ae 1938 /* pass data to smu controller */
e7a95eea
EQ
1939 if ((smu->watermarks_bitmap & WATERMARKS_EXIST) &&
1940 !(smu->watermarks_bitmap & WATERMARKS_LOADED)) {
caad2613 1941 ret = smu_cmn_write_watermarks_table(smu);
2622e2ae 1942 if (ret) {
d9811cfc 1943 dev_err(smu->adev->dev, "Failed to update WMTABLE!");
2622e2ae
HW
1944 return ret;
1945 }
1946 smu->watermarks_bitmap |= WATERMARKS_LOADED;
1947 }
1948
5bbb0994
KW
1949 return 0;
1950}
1951
9c62f993
KW
1952static int navi10_read_sensor(struct smu_context *smu,
1953 enum amd_pp_sensors sensor,
1954 void *data, uint32_t *size)
1955{
1956 int ret = 0;
1957 struct smu_table_context *table_context = &smu->smu_table;
1958 PPTable_t *pptable = table_context->driver_pptable;
1959
9b4e63f4
KF
1960 if(!data || !size)
1961 return -EINVAL;
1962
9c62f993
KW
1963 switch (sensor) {
1964 case AMDGPU_PP_SENSOR_MAX_FAN_RPM:
1965 *(uint32_t *)data = pptable->FanMaximumRpm;
1966 *size = 4;
1967 break;
7f963d9f 1968 case AMDGPU_PP_SENSOR_MEM_LOAD:
7d6c13ef 1969 ret = navi1x_get_smu_metrics_data(smu,
fae3a572
AD
1970 METRICS_AVERAGE_MEMACTIVITY,
1971 (uint32_t *)data);
1972 *size = 4;
1973 break;
d573bb21 1974 case AMDGPU_PP_SENSOR_GPU_LOAD:
7d6c13ef 1975 ret = navi1x_get_smu_metrics_data(smu,
fae3a572
AD
1976 METRICS_AVERAGE_GFXACTIVITY,
1977 (uint32_t *)data);
d573bb21
KW
1978 *size = 4;
1979 break;
564c4c7f 1980 case AMDGPU_PP_SENSOR_GPU_POWER:
7d6c13ef 1981 ret = navi1x_get_smu_metrics_data(smu,
fae3a572
AD
1982 METRICS_AVERAGE_SOCKETPOWER,
1983 (uint32_t *)data);
564c4c7f
KW
1984 *size = 4;
1985 break;
e5aa29ce 1986 case AMDGPU_PP_SENSOR_HOTSPOT_TEMP:
7d6c13ef 1987 ret = navi1x_get_smu_metrics_data(smu,
fae3a572
AD
1988 METRICS_TEMPERATURE_HOTSPOT,
1989 (uint32_t *)data);
1990 *size = 4;
1991 break;
e5aa29ce 1992 case AMDGPU_PP_SENSOR_EDGE_TEMP:
7d6c13ef 1993 ret = navi1x_get_smu_metrics_data(smu,
fae3a572
AD
1994 METRICS_TEMPERATURE_EDGE,
1995 (uint32_t *)data);
1996 *size = 4;
1997 break;
e5aa29ce 1998 case AMDGPU_PP_SENSOR_MEM_TEMP:
7d6c13ef 1999 ret = navi1x_get_smu_metrics_data(smu,
fae3a572
AD
2000 METRICS_TEMPERATURE_MEM,
2001 (uint32_t *)data);
e5aa29ce
KW
2002 *size = 4;
2003 break;
e0f9e936
EQ
2004 case AMDGPU_PP_SENSOR_GFX_MCLK:
2005 ret = navi10_get_current_clk_freq_by_table(smu, SMU_UCLK, (uint32_t *)data);
2006 *(uint32_t *)data *= 100;
2007 *size = 4;
2008 break;
2009 case AMDGPU_PP_SENSOR_GFX_SCLK:
7d6c13ef 2010 ret = navi1x_get_smu_metrics_data(smu, METRICS_AVERAGE_GFXCLK, (uint32_t *)data);
e0f9e936
EQ
2011 *(uint32_t *)data *= 100;
2012 *size = 4;
2013 break;
b2febc99
EQ
2014 case AMDGPU_PP_SENSOR_VDDGFX:
2015 ret = smu_v11_0_get_gfx_vdd(smu, (uint32_t *)data);
2016 *size = 4;
2017 break;
9c62f993 2018 default:
b2febc99
EQ
2019 ret = -EOPNOTSUPP;
2020 break;
9c62f993
KW
2021 }
2022
2023 return ret;
2024}
2025
f4b3295f 2026static int navi10_get_uclk_dpm_states(struct smu_context *smu, uint32_t *clocks_in_khz, uint32_t *num_states)
2027{
2028 uint32_t num_discrete_levels = 0;
2029 uint16_t *dpm_levels = NULL;
2030 uint16_t i = 0;
2031 struct smu_table_context *table_context = &smu->smu_table;
2032 PPTable_t *driver_ppt = NULL;
2033
2034 if (!clocks_in_khz || !num_states || !table_context->driver_pptable)
2035 return -EINVAL;
2036
2037 driver_ppt = table_context->driver_pptable;
2038 num_discrete_levels = driver_ppt->DpmDescriptor[PPCLK_UCLK].NumDiscreteLevels;
2039 dpm_levels = driver_ppt->FreqTableUclk;
2040
2041 if (num_discrete_levels == 0 || dpm_levels == NULL)
2042 return -EINVAL;
2043
2044 *num_states = num_discrete_levels;
2045 for (i = 0; i < num_discrete_levels; i++) {
2046 /* convert to khz */
2047 *clocks_in_khz = (*dpm_levels) * 1000;
2048 clocks_in_khz++;
2049 dpm_levels++;
2050 }
2051
2052 return 0;
2053}
2054
7a816371
KW
2055static int navi10_get_thermal_temperature_range(struct smu_context *smu,
2056 struct smu_temperature_range *range)
2057{
e02e4d51
EQ
2058 struct smu_table_context *table_context = &smu->smu_table;
2059 struct smu_11_0_powerplay_table *powerplay_table =
2060 table_context->power_play_table;
cbf3f132 2061 PPTable_t *pptable = smu->smu_table.driver_pptable;
7a816371 2062
cbf3f132 2063 if (!range)
7a816371
KW
2064 return -EINVAL;
2065
0540eced
EQ
2066 memcpy(range, &smu11_thermal_policy[0], sizeof(struct smu_temperature_range));
2067
cbf3f132
EQ
2068 range->max = pptable->TedgeLimit *
2069 SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
2070 range->edge_emergency_max = (pptable->TedgeLimit + CTF_OFFSET_EDGE) *
2071 SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
2072 range->hotspot_crit_max = pptable->ThotspotLimit *
2073 SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
2074 range->hotspot_emergency_max = (pptable->ThotspotLimit + CTF_OFFSET_HOTSPOT) *
2075 SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
2076 range->mem_crit_max = pptable->TmemLimit *
2077 SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
2078 range->mem_emergency_max = (pptable->TmemLimit + CTF_OFFSET_MEM)*
a056ddce 2079 SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
e02e4d51 2080 range->software_shutdown_temp = powerplay_table->software_shutdown_temp;
7a816371
KW
2081
2082 return 0;
2083}
2084
6e92e156
KF
2085static int navi10_display_disable_memory_clock_switch(struct smu_context *smu,
2086 bool disable_memory_clock_switch)
2087{
2088 int ret = 0;
2089 struct smu_11_0_max_sustainable_clocks *max_sustainable_clocks =
2090 (struct smu_11_0_max_sustainable_clocks *)
2091 smu->smu_table.max_sustainable_clocks;
2092 uint32_t min_memory_clock = smu->hard_min_uclk_req_from_dal;
2093 uint32_t max_memory_clock = max_sustainable_clocks->uclock;
2094
2095 if(smu->disable_uclk_switch == disable_memory_clock_switch)
2096 return 0;
2097
2098 if(disable_memory_clock_switch)
661b94f5 2099 ret = smu_v11_0_set_hard_freq_limited_range(smu, SMU_UCLK, max_memory_clock, 0);
6e92e156 2100 else
661b94f5 2101 ret = smu_v11_0_set_hard_freq_limited_range(smu, SMU_UCLK, min_memory_clock, 0);
6e92e156
KF
2102
2103 if(!ret)
2104 smu->disable_uclk_switch = disable_memory_clock_switch;
2105
2106 return ret;
2107}
2108
488f211d
EQ
2109static int navi10_get_power_limit(struct smu_context *smu,
2110 uint32_t *current_power_limit,
2111 uint32_t *default_power_limit,
2112 uint32_t *max_power_limit)
b4af964e 2113{
1e239fdd
EQ
2114 struct smu_11_0_powerplay_table *powerplay_table =
2115 (struct smu_11_0_powerplay_table *)smu->smu_table.power_play_table;
549db526 2116 struct smu_11_0_overdrive_table *od_settings = smu->od_settings;
b4af964e 2117 PPTable_t *pptable = smu->smu_table.driver_pptable;
1e239fdd
EQ
2118 uint32_t power_limit, od_percent;
2119
2120 if (smu_v11_0_get_current_power_limit(smu, &power_limit)) {
2121 /* the last hope to figure out the ppt limit */
2122 if (!pptable) {
2123 dev_err(smu->adev->dev, "Cannot get PPT limit due to pptable missing!");
2124 return -EINVAL;
b4af964e 2125 }
1e239fdd
EQ
2126 power_limit =
2127 pptable->SocketPowerLimitAc[PPT_THROTTLER_PPT0];
2128 }
b4af964e 2129
488f211d
EQ
2130 if (current_power_limit)
2131 *current_power_limit = power_limit;
2132 if (default_power_limit)
2133 *default_power_limit = power_limit;
1e239fdd 2134
488f211d
EQ
2135 if (max_power_limit) {
2136 if (smu->od_enabled &&
2137 navi10_od_feature_is_supported(od_settings, SMU_11_0_ODCAP_POWER_LIMIT)) {
2138 od_percent = le32_to_cpu(powerplay_table->overdrive_table.max[SMU_11_0_ODSETTING_POWERPERCENTAGE]);
1e239fdd 2139
488f211d
EQ
2140 dev_dbg(smu->adev->dev, "ODSETTING_POWERPERCENTAGE: %d (default: %d)\n", od_percent, power_limit);
2141
2142 power_limit *= (100 + od_percent);
2143 power_limit /= 100;
2144 }
2145
2146 *max_power_limit = power_limit;
b4af964e
EQ
2147 }
2148
b4af964e
EQ
2149 return 0;
2150}
2151
372120f0
KF
2152static int navi10_update_pcie_parameters(struct smu_context *smu,
2153 uint32_t pcie_gen_cap,
2154 uint32_t pcie_width_cap)
2155{
0b590970 2156 struct smu_11_0_dpm_context *dpm_context = smu->smu_dpm.dpm_context;
372120f0 2157 PPTable_t *pptable = smu->smu_table.driver_pptable;
372120f0 2158 uint32_t smu_pcie_arg;
0b590970 2159 int ret, i;
372120f0 2160
0b590970
EQ
2161 /* lclk dpm table setup */
2162 for (i = 0; i < MAX_PCIE_CONF; i++) {
2163 dpm_context->dpm_tables.pcie_table.pcie_gen[i] = pptable->PcieGenSpeed[i];
2164 dpm_context->dpm_tables.pcie_table.pcie_lane[i] = pptable->PcieLaneCount[i];
2165 }
fddbfb1c 2166
372120f0
KF
2167 for (i = 0; i < NUM_LINK_LEVELS; i++) {
2168 smu_pcie_arg = (i << 16) |
2169 ((pptable->PcieGenSpeed[i] <= pcie_gen_cap) ? (pptable->PcieGenSpeed[i] << 8) :
2170 (pcie_gen_cap << 8)) | ((pptable->PcieLaneCount[i] <= pcie_width_cap) ?
2171 pptable->PcieLaneCount[i] : pcie_width_cap);
66c86828 2172 ret = smu_cmn_send_smc_msg_with_param(smu,
372120f0 2173 SMU_MSG_OverridePcieParameters,
1c58267c
MC
2174 smu_pcie_arg,
2175 NULL);
fddbfb1c
KF
2176
2177 if (ret)
2178 return ret;
2179
2180 if (pptable->PcieGenSpeed[i] > pcie_gen_cap)
2181 dpm_context->dpm_tables.pcie_table.pcie_gen[i] = pcie_gen_cap;
2182 if (pptable->PcieLaneCount[i] > pcie_width_cap)
2183 dpm_context->dpm_tables.pcie_table.pcie_lane[i] = pcie_width_cap;
372120f0 2184 }
fddbfb1c
KF
2185
2186 return 0;
21677d08
MC
2187}
2188
d9811cfc
EQ
2189static inline void navi10_dump_od_table(struct smu_context *smu,
2190 OverDriveTable_t *od_table)
2191{
2192 dev_dbg(smu->adev->dev, "OD: Gfxclk: (%d, %d)\n", od_table->GfxclkFmin, od_table->GfxclkFmax);
2193 dev_dbg(smu->adev->dev, "OD: Gfx1: (%d, %d)\n", od_table->GfxclkFreq1, od_table->GfxclkVolt1);
2194 dev_dbg(smu->adev->dev, "OD: Gfx2: (%d, %d)\n", od_table->GfxclkFreq2, od_table->GfxclkVolt2);
2195 dev_dbg(smu->adev->dev, "OD: Gfx3: (%d, %d)\n", od_table->GfxclkFreq3, od_table->GfxclkVolt3);
2196 dev_dbg(smu->adev->dev, "OD: UclkFmax: %d\n", od_table->UclkFmax);
2197 dev_dbg(smu->adev->dev, "OD: OverDrivePct: %d\n", od_table->OverDrivePct);
21677d08
MC
2198}
2199
d9811cfc
EQ
2200static int navi10_od_setting_check_range(struct smu_context *smu,
2201 struct smu_11_0_overdrive_table *od_table,
2202 enum SMU_11_0_ODSETTING_ID setting,
2203 uint32_t value)
21677d08
MC
2204{
2205 if (value < od_table->min[setting]) {
d9811cfc 2206 dev_warn(smu->adev->dev, "OD setting (%d, %d) is less than the minimum allowed (%d)\n", setting, value, od_table->min[setting]);
21677d08
MC
2207 return -EINVAL;
2208 }
2209 if (value > od_table->max[setting]) {
d9811cfc 2210 dev_warn(smu->adev->dev, "OD setting (%d, %d) is greater than the maximum allowed (%d)\n", setting, value, od_table->max[setting]);
21677d08
MC
2211 return -EINVAL;
2212 }
2213 return 0;
2214}
2215
0531aa6e
AD
2216static int navi10_overdrive_get_gfx_clk_base_voltage(struct smu_context *smu,
2217 uint16_t *voltage,
2218 uint32_t freq)
2219{
2220 uint32_t param = (freq & 0xFFFF) | (PPCLK_GFXCLK << 16);
2221 uint32_t value = 0;
2222 int ret;
2223
66c86828 2224 ret = smu_cmn_send_smc_msg_with_param(smu,
0531aa6e 2225 SMU_MSG_GetVoltageByDpm,
1c58267c
MC
2226 param,
2227 &value);
0531aa6e 2228 if (ret) {
d9811cfc 2229 dev_err(smu->adev->dev, "[GetBaseVoltage] failed to get GFXCLK AVFS voltage from SMU!");
0531aa6e
AD
2230 return ret;
2231 }
2232
0531aa6e
AD
2233 *voltage = (uint16_t)value;
2234
2235 return 0;
2236}
2237
13d75ead
EQ
2238static int navi10_baco_enter(struct smu_context *smu)
2239{
2240 struct amdgpu_device *adev = smu->adev;
2241
be68d44b
EQ
2242 /*
2243 * This aims the case below:
2244 * amdgpu driver loaded -> runpm suspend kicked -> sound driver loaded
2245 *
2246 * For NAVI10 and later ASICs, we rely on PMFW to handle the runpm. To
2247 * make that possible, PMFW needs to acknowledge the dstate transition
2248 * process for both gfx(function 0) and audio(function 1) function of
2249 * the ASIC.
2250 *
2251 * The PCI device's initial runpm status is RUNPM_SUSPENDED. So as the
2252 * device representing the audio function of the ASIC. And that means
2253 * even if the sound driver(snd_hda_intel) was not loaded yet, it's still
2254 * possible runpm suspend kicked on the ASIC. However without the dstate
2255 * transition notification from audio function, pmfw cannot handle the
2256 * BACO in/exit correctly. And that will cause driver hang on runpm
2257 * resuming.
2258 *
2259 * To address this, we revert to legacy message way(driver masters the
2260 * timing for BACO in/exit) on sound driver missing.
2261 */
2262 if (adev->in_runpm && smu_cmn_is_audio_func_enabled(adev))
13d75ead
EQ
2263 return smu_v11_0_baco_set_armd3_sequence(smu, BACO_SEQ_BACO);
2264 else
2265 return smu_v11_0_baco_enter(smu);
2266}
2267
2268static int navi10_baco_exit(struct smu_context *smu)
2269{
2270 struct amdgpu_device *adev = smu->adev;
2271
be68d44b 2272 if (adev->in_runpm && smu_cmn_is_audio_func_enabled(adev)) {
13d75ead
EQ
2273 /* Wait for PMFW handling for the Dstate change */
2274 msleep(10);
2275 return smu_v11_0_baco_set_armd3_sequence(smu, BACO_SEQ_ULPS);
2276 } else {
2277 return smu_v11_0_baco_exit(smu);
2278 }
2279}
2280
792f80d1
EQ
2281static int navi10_set_default_od_settings(struct smu_context *smu)
2282{
2283 OverDriveTable_t *od_table =
2284 (OverDriveTable_t *)smu->smu_table.overdrive_table;
2285 OverDriveTable_t *boot_od_table =
2286 (OverDriveTable_t *)smu->smu_table.boot_overdrive_table;
92cf0508
EQ
2287 OverDriveTable_t *user_od_table =
2288 (OverDriveTable_t *)smu->smu_table.user_overdrive_table;
21677d08
MC
2289 int ret = 0;
2290
92cf0508
EQ
2291 /*
2292 * For S3/S4/Runpm resume, no need to setup those overdrive tables again as
2293 * - either they already have the default OD settings got during cold bootup
2294 * - or they have some user customized OD settings which cannot be overwritten
2295 */
2296 if (smu->adev->in_suspend)
2297 return 0;
2298
2299 ret = smu_cmn_update_table(smu, SMU_TABLE_OVERDRIVE, 0, (void *)boot_od_table, false);
792f80d1 2300 if (ret) {
d9811cfc 2301 dev_err(smu->adev->dev, "Failed to get overdrive table!\n");
21677d08 2302 return ret;
792f80d1 2303 }
21677d08 2304
92cf0508 2305 if (!boot_od_table->GfxclkVolt1) {
792f80d1 2306 ret = navi10_overdrive_get_gfx_clk_base_voltage(smu,
92cf0508
EQ
2307 &boot_od_table->GfxclkVolt1,
2308 boot_od_table->GfxclkFreq1);
792f80d1
EQ
2309 if (ret)
2310 return ret;
2311 }
21677d08 2312
92cf0508 2313 if (!boot_od_table->GfxclkVolt2) {
792f80d1 2314 ret = navi10_overdrive_get_gfx_clk_base_voltage(smu,
92cf0508
EQ
2315 &boot_od_table->GfxclkVolt2,
2316 boot_od_table->GfxclkFreq2);
792f80d1
EQ
2317 if (ret)
2318 return ret;
21677d08
MC
2319 }
2320
92cf0508 2321 if (!boot_od_table->GfxclkVolt3) {
792f80d1 2322 ret = navi10_overdrive_get_gfx_clk_base_voltage(smu,
92cf0508
EQ
2323 &boot_od_table->GfxclkVolt3,
2324 boot_od_table->GfxclkFreq3);
792f80d1
EQ
2325 if (ret)
2326 return ret;
21677d08 2327 }
372120f0 2328
92cf0508 2329 navi10_dump_od_table(smu, boot_od_table);
792f80d1 2330
92cf0508
EQ
2331 memcpy(od_table, boot_od_table, sizeof(OverDriveTable_t));
2332 memcpy(user_od_table, boot_od_table, sizeof(OverDriveTable_t));
792f80d1
EQ
2333
2334 return 0;
372120f0
KF
2335}
2336
21677d08
MC
2337static int navi10_od_edit_dpm_table(struct smu_context *smu, enum PP_OD_DPM_TABLE_COMMAND type, long input[], uint32_t size) {
2338 int i;
2339 int ret = 0;
2340 struct smu_table_context *table_context = &smu->smu_table;
2341 OverDriveTable_t *od_table;
2342 struct smu_11_0_overdrive_table *od_settings;
66107132
MC
2343 enum SMU_11_0_ODSETTING_ID freq_setting, voltage_setting;
2344 uint16_t *freq_ptr, *voltage_ptr;
21677d08
MC
2345 od_table = (OverDriveTable_t *)table_context->overdrive_table;
2346
2347 if (!smu->od_enabled) {
d9811cfc 2348 dev_warn(smu->adev->dev, "OverDrive is not enabled!\n");
21677d08
MC
2349 return -EINVAL;
2350 }
2351
2352 if (!smu->od_settings) {
d9811cfc 2353 dev_err(smu->adev->dev, "OD board limits are not set!\n");
21677d08
MC
2354 return -ENOENT;
2355 }
2356
2357 od_settings = smu->od_settings;
2358
2359 switch (type) {
2360 case PP_OD_EDIT_SCLK_VDDC_TABLE:
e33a8cfd 2361 if (!navi10_od_feature_is_supported(od_settings, SMU_11_0_ODCAP_GFXCLK_LIMITS)) {
d9811cfc 2362 dev_warn(smu->adev->dev, "GFXCLK_LIMITS not supported!\n");
21677d08
MC
2363 return -ENOTSUPP;
2364 }
2365 if (!table_context->overdrive_table) {
d9811cfc 2366 dev_err(smu->adev->dev, "Overdrive is not initialized\n");
21677d08
MC
2367 return -EINVAL;
2368 }
2369 for (i = 0; i < size; i += 2) {
2370 if (i + 2 > size) {
d9811cfc 2371 dev_info(smu->adev->dev, "invalid number of input parameters %d\n", size);
21677d08
MC
2372 return -EINVAL;
2373 }
2374 switch (input[i]) {
2375 case 0:
2376 freq_setting = SMU_11_0_ODSETTING_GFXCLKFMIN;
2377 freq_ptr = &od_table->GfxclkFmin;
2378 if (input[i + 1] > od_table->GfxclkFmax) {
d9811cfc 2379 dev_info(smu->adev->dev, "GfxclkFmin (%ld) must be <= GfxclkFmax (%u)!\n",
21677d08
MC
2380 input[i + 1],
2381 od_table->GfxclkFmin);
2382 return -EINVAL;
2383 }
2384 break;
2385 case 1:
2386 freq_setting = SMU_11_0_ODSETTING_GFXCLKFMAX;
2387 freq_ptr = &od_table->GfxclkFmax;
2388 if (input[i + 1] < od_table->GfxclkFmin) {
d9811cfc 2389 dev_info(smu->adev->dev, "GfxclkFmax (%ld) must be >= GfxclkFmin (%u)!\n",
21677d08
MC
2390 input[i + 1],
2391 od_table->GfxclkFmax);
2392 return -EINVAL;
2393 }
2394 break;
2395 default:
d9811cfc
EQ
2396 dev_info(smu->adev->dev, "Invalid SCLK_VDDC_TABLE index: %ld\n", input[i]);
2397 dev_info(smu->adev->dev, "Supported indices: [0:min,1:max]\n");
21677d08
MC
2398 return -EINVAL;
2399 }
d9811cfc 2400 ret = navi10_od_setting_check_range(smu, od_settings, freq_setting, input[i + 1]);
21677d08
MC
2401 if (ret)
2402 return ret;
2403 *freq_ptr = input[i + 1];
2404 }
2405 break;
2406 case PP_OD_EDIT_MCLK_VDDC_TABLE:
e33a8cfd 2407 if (!navi10_od_feature_is_supported(od_settings, SMU_11_0_ODCAP_UCLK_MAX)) {
d9811cfc 2408 dev_warn(smu->adev->dev, "UCLK_MAX not supported!\n");
21677d08
MC
2409 return -ENOTSUPP;
2410 }
2411 if (size < 2) {
d9811cfc 2412 dev_info(smu->adev->dev, "invalid number of parameters: %d\n", size);
21677d08
MC
2413 return -EINVAL;
2414 }
2415 if (input[0] != 1) {
d9811cfc
EQ
2416 dev_info(smu->adev->dev, "Invalid MCLK_VDDC_TABLE index: %ld\n", input[0]);
2417 dev_info(smu->adev->dev, "Supported indices: [1:max]\n");
21677d08
MC
2418 return -EINVAL;
2419 }
d9811cfc 2420 ret = navi10_od_setting_check_range(smu, od_settings, SMU_11_0_ODSETTING_UCLKFMAX, input[1]);
21677d08
MC
2421 if (ret)
2422 return ret;
2423 od_table->UclkFmax = input[1];
2424 break;
93c5f1f6
MC
2425 case PP_OD_RESTORE_DEFAULT_TABLE:
2426 if (!(table_context->overdrive_table && table_context->boot_overdrive_table)) {
d9811cfc 2427 dev_err(smu->adev->dev, "Overdrive table was not initialized!\n");
93c5f1f6
MC
2428 return -EINVAL;
2429 }
2430 memcpy(table_context->overdrive_table, table_context->boot_overdrive_table, sizeof(OverDriveTable_t));
2431 break;
21677d08 2432 case PP_OD_COMMIT_DPM_TABLE:
92cf0508
EQ
2433 if (memcmp(od_table, table_context->user_overdrive_table, sizeof(OverDriveTable_t))) {
2434 navi10_dump_od_table(smu, od_table);
2435 ret = smu_cmn_update_table(smu, SMU_TABLE_OVERDRIVE, 0, (void *)od_table, true);
2436 if (ret) {
2437 dev_err(smu->adev->dev, "Failed to import overdrive table!\n");
2438 return ret;
2439 }
2440 memcpy(table_context->user_overdrive_table, od_table, sizeof(OverDriveTable_t));
2441 smu->user_dpm_profile.user_od = true;
2442
2443 if (!memcmp(table_context->user_overdrive_table,
2444 table_context->boot_overdrive_table,
2445 sizeof(OverDriveTable_t)))
2446 smu->user_dpm_profile.user_od = false;
21677d08 2447 }
21677d08
MC
2448 break;
2449 case PP_OD_EDIT_VDDC_CURVE:
e33a8cfd 2450 if (!navi10_od_feature_is_supported(od_settings, SMU_11_0_ODCAP_GFXCLK_CURVE)) {
d9811cfc 2451 dev_warn(smu->adev->dev, "GFXCLK_CURVE not supported!\n");
66107132
MC
2452 return -ENOTSUPP;
2453 }
2454 if (size < 3) {
d9811cfc 2455 dev_info(smu->adev->dev, "invalid number of parameters: %d\n", size);
66107132
MC
2456 return -EINVAL;
2457 }
2458 if (!od_table) {
d9811cfc 2459 dev_info(smu->adev->dev, "Overdrive is not initialized\n");
66107132
MC
2460 return -EINVAL;
2461 }
2462
2463 switch (input[0]) {
2464 case 0:
2465 freq_setting = SMU_11_0_ODSETTING_VDDGFXCURVEFREQ_P1;
2466 voltage_setting = SMU_11_0_ODSETTING_VDDGFXCURVEVOLTAGE_P1;
2467 freq_ptr = &od_table->GfxclkFreq1;
2468 voltage_ptr = &od_table->GfxclkVolt1;
2469 break;
2470 case 1:
2471 freq_setting = SMU_11_0_ODSETTING_VDDGFXCURVEFREQ_P2;
2472 voltage_setting = SMU_11_0_ODSETTING_VDDGFXCURVEVOLTAGE_P2;
2473 freq_ptr = &od_table->GfxclkFreq2;
2474 voltage_ptr = &od_table->GfxclkVolt2;
2475 break;
2476 case 2:
2477 freq_setting = SMU_11_0_ODSETTING_VDDGFXCURVEFREQ_P3;
2478 voltage_setting = SMU_11_0_ODSETTING_VDDGFXCURVEVOLTAGE_P3;
2479 freq_ptr = &od_table->GfxclkFreq3;
2480 voltage_ptr = &od_table->GfxclkVolt3;
2481 break;
2482 default:
d9811cfc
EQ
2483 dev_info(smu->adev->dev, "Invalid VDDC_CURVE index: %ld\n", input[0]);
2484 dev_info(smu->adev->dev, "Supported indices: [0, 1, 2]\n");
66107132
MC
2485 return -EINVAL;
2486 }
d9811cfc 2487 ret = navi10_od_setting_check_range(smu, od_settings, freq_setting, input[1]);
66107132
MC
2488 if (ret)
2489 return ret;
2490 // Allow setting zero to disable the OverDrive VDDC curve
2491 if (input[2] != 0) {
d9811cfc 2492 ret = navi10_od_setting_check_range(smu, od_settings, voltage_setting, input[2]);
66107132
MC
2493 if (ret)
2494 return ret;
2495 *freq_ptr = input[1];
2496 *voltage_ptr = ((uint16_t)input[2]) * NAVI10_VOLTAGE_SCALE;
d9811cfc 2497 dev_dbg(smu->adev->dev, "OD: set curve %ld: (%d, %d)\n", input[0], *freq_ptr, *voltage_ptr);
66107132
MC
2498 } else {
2499 // If setting 0, disable all voltage curve settings
2500 od_table->GfxclkVolt1 = 0;
2501 od_table->GfxclkVolt2 = 0;
2502 od_table->GfxclkVolt3 = 0;
2503 }
d9811cfc 2504 navi10_dump_od_table(smu, od_table);
66107132 2505 break;
21677d08
MC
2506 default:
2507 return -ENOSYS;
2508 }
2509 return ret;
2510}
372120f0 2511
0eeaa899
EQ
2512static int navi10_run_btc(struct smu_context *smu)
2513{
2514 int ret = 0;
2515
66c86828 2516 ret = smu_cmn_send_smc_msg(smu, SMU_MSG_RunBtc, NULL);
0eeaa899 2517 if (ret)
d9811cfc 2518 dev_err(smu->adev->dev, "RunBtc failed!\n");
0eeaa899
EQ
2519
2520 return ret;
2521}
2522
12f04120 2523static bool navi10_need_umc_cdr_workaround(struct smu_context *smu)
31157341 2524{
eb5f69e7
EQ
2525 struct amdgpu_device *adev = smu->adev;
2526
2527 if (!smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_UCLK_BIT))
31157341
EQ
2528 return false;
2529
1d789535
AD
2530 if (adev->ip_versions[MP1_HWIP][0] == IP_VERSION(11, 0, 0) ||
2531 adev->ip_versions[MP1_HWIP][0] == IP_VERSION(11, 0, 5))
31157341 2532 return true;
eb5f69e7
EQ
2533
2534 return false;
31157341
EQ
2535}
2536
3646c00e 2537static int navi10_umc_hybrid_cdr_workaround(struct smu_context *smu)
1cf8c930
EQ
2538{
2539 uint32_t uclk_count, uclk_min, uclk_max;
1cf8c930
EQ
2540 int ret = 0;
2541
3646c00e
EQ
2542 /* This workaround can be applied only with uclk dpm enabled */
2543 if (!smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_UCLK_BIT))
1cf8c930
EQ
2544 return 0;
2545
d8d3493a 2546 ret = smu_v11_0_get_dpm_level_count(smu, SMU_UCLK, &uclk_count);
1cf8c930
EQ
2547 if (ret)
2548 return ret;
2549
3646c00e 2550 ret = smu_v11_0_get_dpm_freq_by_index(smu, SMU_UCLK, (uint16_t)(uclk_count - 1), &uclk_max);
1cf8c930
EQ
2551 if (ret)
2552 return ret;
2553
3646c00e
EQ
2554 /*
2555 * The NAVI10_UMC_HYBRID_CDR_WORKAROUND_UCLK_THRESHOLD is 750Mhz.
2556 * This workaround is needed only when the max uclk frequency
2557 * not greater than that.
2558 */
2559 if (uclk_max > 0x2EE)
2560 return 0;
2561
2562 ret = smu_v11_0_get_dpm_freq_by_index(smu, SMU_UCLK, (uint16_t)0, &uclk_min);
1cf8c930
EQ
2563 if (ret)
2564 return ret;
2565
2566 /* Force UCLK out of the highest DPM */
661b94f5 2567 ret = smu_v11_0_set_hard_freq_limited_range(smu, SMU_UCLK, 0, uclk_min);
1cf8c930
EQ
2568 if (ret)
2569 return ret;
2570
2571 /* Revert the UCLK Hardmax */
661b94f5 2572 ret = smu_v11_0_set_hard_freq_limited_range(smu, SMU_UCLK, 0, uclk_max);
1cf8c930
EQ
2573 if (ret)
2574 return ret;
2575
2576 /*
2577 * In this case, SMU already disabled dummy pstate during enablement
2578 * of UCLK DPM, we have to re-enabled it.
3646c00e
EQ
2579 */
2580 return smu_cmn_send_smc_msg(smu, SMU_MSG_DAL_ENABLE_DUMMY_PSTATE_CHANGE, NULL);
2581}
2582
665945eb
EQ
2583static int navi10_set_dummy_pstates_table_location(struct smu_context *smu)
2584{
2585 struct smu_table_context *smu_table = &smu->smu_table;
2586 struct smu_table *dummy_read_table =
2587 &smu_table->dummy_read_1_table;
2588 char *dummy_table = dummy_read_table->cpu_addr;
2589 int ret = 0;
2590 uint32_t i;
2591
2592 for (i = 0; i < 0x40000; i += 0x1000 * 2) {
2593 memcpy(dummy_table, &NoDbiPrbs7[0], 0x1000);
2594 dummy_table += 0x1000;
2595 memcpy(dummy_table, &DbiPrbs7[0], 0x1000);
2596 dummy_table += 0x1000;
2597 }
2598
2599 amdgpu_asic_flush_hdp(smu->adev, NULL);
2600
2601 ret = smu_cmn_send_smc_msg_with_param(smu,
2602 SMU_MSG_SET_DRIVER_DUMMY_TABLE_DRAM_ADDR_HIGH,
2603 upper_32_bits(dummy_read_table->mc_address),
2604 NULL);
2605 if (ret)
2606 return ret;
2607
2608 return smu_cmn_send_smc_msg_with_param(smu,
2609 SMU_MSG_SET_DRIVER_DUMMY_TABLE_DRAM_ADDR_LOW,
2610 lower_32_bits(dummy_read_table->mc_address),
2611 NULL);
2612}
2613
12f04120 2614static int navi10_run_umc_cdr_workaround(struct smu_context *smu)
3646c00e 2615{
bb7257b5
EQ
2616 struct amdgpu_device *adev = smu->adev;
2617 uint8_t umc_fw_greater_than_v136 = false;
2618 uint8_t umc_fw_disable_cdr = false;
2619 uint32_t pmfw_version;
2620 uint32_t param;
3646c00e
EQ
2621 int ret = 0;
2622
12f04120 2623 if (!navi10_need_umc_cdr_workaround(smu))
3646c00e
EQ
2624 return 0;
2625
bb7257b5
EQ
2626 ret = smu_cmn_get_smc_version(smu, NULL, &pmfw_version);
2627 if (ret) {
2628 dev_err(adev->dev, "Failed to get smu version!\n");
3646c00e 2629 return ret;
bb7257b5 2630 }
3646c00e 2631
bb7257b5 2632 /*
b226ef95
EQ
2633 * The messages below are only supported by Navi10 42.53.0 and later
2634 * PMFWs and Navi14 53.29.0 and later PMFWs.
bb7257b5
EQ
2635 * - PPSMC_MSG_SetDriverDummyTableDramAddrHigh
2636 * - PPSMC_MSG_SetDriverDummyTableDramAddrLow
2637 * - PPSMC_MSG_GetUMCFWWA
2638 */
1d789535
AD
2639 if (((adev->ip_versions[MP1_HWIP][0] == IP_VERSION(11, 0, 0)) && (pmfw_version >= 0x2a3500)) ||
2640 ((adev->ip_versions[MP1_HWIP][0] == IP_VERSION(11, 0, 5)) && (pmfw_version >= 0x351D00))) {
bb7257b5
EQ
2641 ret = smu_cmn_send_smc_msg_with_param(smu,
2642 SMU_MSG_GET_UMC_FW_WA,
2643 0,
2644 &param);
2645 if (ret)
2646 return ret;
2647
2648 /* First bit indicates if the UMC f/w is above v137 */
2649 umc_fw_greater_than_v136 = param & 0x1;
2650
2651 /* Second bit indicates if hybrid-cdr is disabled */
2652 umc_fw_disable_cdr = param & 0x2;
3646c00e 2653
bb7257b5
EQ
2654 /* w/a only allowed if UMC f/w is <= 136 */
2655 if (umc_fw_greater_than_v136)
2656 return 0;
2657
e4912146 2658 if (umc_fw_disable_cdr) {
1d789535 2659 if (adev->ip_versions[MP1_HWIP][0] == IP_VERSION(11, 0, 0))
e4912146
EQ
2660 return navi10_umc_hybrid_cdr_workaround(smu);
2661 } else {
bb7257b5 2662 return navi10_set_dummy_pstates_table_location(smu);
e4912146 2663 }
bb7257b5 2664 } else {
1d789535 2665 if (adev->ip_versions[MP1_HWIP][0] == IP_VERSION(11, 0, 0))
bb7257b5
EQ
2666 return navi10_umc_hybrid_cdr_workaround(smu);
2667 }
2668
2669 return 0;
1cf8c930
EQ
2670}
2671
7d6c13ef
EQ
2672static ssize_t navi10_get_legacy_gpu_metrics(struct smu_context *smu,
2673 void **table)
2674{
2675 struct smu_table_context *smu_table = &smu->smu_table;
61e2d322
DN
2676 struct gpu_metrics_v1_3 *gpu_metrics =
2677 (struct gpu_metrics_v1_3 *)smu_table->gpu_metrics_table;
7d6c13ef
EQ
2678 SmuMetrics_legacy_t metrics;
2679 int ret = 0;
2680
da11407f
EQ
2681 ret = smu_cmn_get_metrics_table(smu,
2682 NULL,
2683 true);
2684 if (ret)
7d6c13ef 2685 return ret;
7d6c13ef
EQ
2686
2687 memcpy(&metrics, smu_table->metrics_table, sizeof(SmuMetrics_legacy_t));
2688
61e2d322 2689 smu_cmn_init_soft_gpu_metrics(gpu_metrics, 1, 3);
7d6c13ef
EQ
2690
2691 gpu_metrics->temperature_edge = metrics.TemperatureEdge;
2692 gpu_metrics->temperature_hotspot = metrics.TemperatureHotspot;
2693 gpu_metrics->temperature_mem = metrics.TemperatureMem;
2694 gpu_metrics->temperature_vrgfx = metrics.TemperatureVrGfx;
2695 gpu_metrics->temperature_vrsoc = metrics.TemperatureVrSoc;
2696 gpu_metrics->temperature_vrmem = metrics.TemperatureVrMem0;
2697
2698 gpu_metrics->average_gfx_activity = metrics.AverageGfxActivity;
2699 gpu_metrics->average_umc_activity = metrics.AverageUclkActivity;
2700
2701 gpu_metrics->average_socket_power = metrics.AverageSocketPower;
2702
2703 gpu_metrics->average_gfxclk_frequency = metrics.AverageGfxclkFrequency;
2704 gpu_metrics->average_socclk_frequency = metrics.AverageSocclkFrequency;
2705 gpu_metrics->average_uclk_frequency = metrics.AverageUclkFrequency;
2706
2707 gpu_metrics->current_gfxclk = metrics.CurrClock[PPCLK_GFXCLK];
2708 gpu_metrics->current_socclk = metrics.CurrClock[PPCLK_SOCCLK];
2709 gpu_metrics->current_uclk = metrics.CurrClock[PPCLK_UCLK];
2710 gpu_metrics->current_vclk0 = metrics.CurrClock[PPCLK_VCLK];
2711 gpu_metrics->current_dclk0 = metrics.CurrClock[PPCLK_DCLK];
2712
2713 gpu_metrics->throttle_status = metrics.ThrottlerStatus;
64cdee43
GS
2714 gpu_metrics->indep_throttle_status =
2715 smu_cmn_get_indep_throttler_status(metrics.ThrottlerStatus,
2716 navi1x_throttler_map);
7d6c13ef
EQ
2717
2718 gpu_metrics->current_fan_speed = metrics.CurrFanSpeed;
2719
2720 gpu_metrics->pcie_link_width =
2721 smu_v11_0_get_current_pcie_link_width(smu);
2722 gpu_metrics->pcie_link_speed =
2723 smu_v11_0_get_current_pcie_link_speed(smu);
2724
2725 gpu_metrics->system_clock_counter = ktime_get_boottime_ns();
2726
61e2d322
DN
2727 if (metrics.CurrGfxVoltageOffset)
2728 gpu_metrics->voltage_gfx =
2729 (155000 - 625 * metrics.CurrGfxVoltageOffset) / 100;
2730 if (metrics.CurrMemVidOffset)
2731 gpu_metrics->voltage_mem =
2732 (155000 - 625 * metrics.CurrMemVidOffset) / 100;
2733 if (metrics.CurrSocVoltageOffset)
2734 gpu_metrics->voltage_soc =
2735 (155000 - 625 * metrics.CurrSocVoltageOffset) / 100;
2736
7d6c13ef
EQ
2737 *table = (void *)gpu_metrics;
2738
61e2d322 2739 return sizeof(struct gpu_metrics_v1_3);
7d6c13ef
EQ
2740}
2741
af01340b 2742static int navi10_i2c_xfer(struct i2c_adapter *i2c_adap,
ebe57d0c 2743 struct i2c_msg *msg, int num_msgs)
af01340b 2744{
2f60dd50
LT
2745 struct amdgpu_smu_i2c_bus *smu_i2c = i2c_get_adapdata(i2c_adap);
2746 struct amdgpu_device *adev = smu_i2c->adev;
ebfc2533
EQ
2747 struct smu_context *smu = adev->powerplay.pp_handle;
2748 struct smu_table_context *smu_table = &smu->smu_table;
af01340b
AD
2749 struct smu_table *table = &smu_table->driver_table;
2750 SwI2cRequest_t *req, *res = (SwI2cRequest_t *)table->cpu_addr;
ebe57d0c
LT
2751 int i, j, r, c;
2752 u16 dir;
af01340b 2753
e281d594
AD
2754 if (!adev->pm.dpm_enabled)
2755 return -EBUSY;
2756
af01340b
AD
2757 req = kzalloc(sizeof(*req), GFP_KERNEL);
2758 if (!req)
2759 return -ENOMEM;
2760
2f60dd50 2761 req->I2CcontrollerPort = smu_i2c->port;
af01340b 2762 req->I2CSpeed = I2C_SPEED_FAST_400K;
ebe57d0c
LT
2763 req->SlaveAddress = msg[0].addr << 1; /* wants an 8-bit address */
2764 dir = msg[0].flags & I2C_M_RD;
af01340b 2765
ebe57d0c
LT
2766 for (c = i = 0; i < num_msgs; i++) {
2767 for (j = 0; j < msg[i].len; j++, c++) {
2768 SwI2cCmd_t *cmd = &req->SwI2cCmds[c];
af01340b 2769
af01340b
AD
2770 if (!(msg[i].flags & I2C_M_RD)) {
2771 /* write */
ebe57d0c
LT
2772 cmd->Cmd = I2C_CMD_WRITE;
2773 cmd->RegisterAddr = msg[i].buf[j];
2774 }
2775
2776 if ((dir ^ msg[i].flags) & I2C_M_RD) {
2777 /* The direction changes.
2778 */
2779 dir = msg[i].flags & I2C_M_RD;
2780 cmd->CmdConfig |= CMDCONFIG_RESTART_MASK;
af01340b 2781 }
14df5650 2782
ebe57d0c
LT
2783 req->NumCmds++;
2784
14df5650
AG
2785 /*
2786 * Insert STOP if we are at the last byte of either last
2787 * message for the transaction or the client explicitly
2788 * requires a STOP at this particular message.
2789 */
ebe57d0c
LT
2790 if ((j == msg[i].len - 1) &&
2791 ((i == num_msgs - 1) || (msg[i].flags & I2C_M_STOP))) {
2792 cmd->CmdConfig &= ~CMDCONFIG_RESTART_MASK;
af01340b 2793 cmd->CmdConfig |= CMDCONFIG_STOP_MASK;
ebe57d0c 2794 }
af01340b
AD
2795 }
2796 }
e0638c7a 2797 mutex_lock(&adev->pm.mutex);
ebfc2533 2798 r = smu_cmn_update_table(smu, SMU_TABLE_I2C_COMMANDS, 0, req, true);
e0638c7a 2799 mutex_unlock(&adev->pm.mutex);
af01340b
AD
2800 if (r)
2801 goto fail;
2802
ebe57d0c
LT
2803 for (c = i = 0; i < num_msgs; i++) {
2804 if (!(msg[i].flags & I2C_M_RD)) {
2805 c += msg[i].len;
2806 continue;
2807 }
2808 for (j = 0; j < msg[i].len; j++, c++) {
2809 SwI2cCmd_t *cmd = &res->SwI2cCmds[c];
af01340b 2810
ebe57d0c 2811 msg[i].buf[j] = cmd->Data;
af01340b
AD
2812 }
2813 }
ebe57d0c 2814 r = num_msgs;
af01340b
AD
2815fail:
2816 kfree(req);
af01340b
AD
2817 return r;
2818}
2819
2820static u32 navi10_i2c_func(struct i2c_adapter *adap)
2821{
2822 return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
2823}
2824
2825
2826static const struct i2c_algorithm navi10_i2c_algo = {
2827 .master_xfer = navi10_i2c_xfer,
2828 .functionality = navi10_i2c_func,
2829};
2830
35ed2703 2831static const struct i2c_adapter_quirks navi10_i2c_control_quirks = {
c0838d3a 2832 .flags = I2C_AQ_COMB | I2C_AQ_COMB_SAME_ADDR | I2C_AQ_NO_ZERO_LEN,
16736627 2833 .max_read_len = MAX_SW_I2C_COMMANDS,
35ed2703 2834 .max_write_len = MAX_SW_I2C_COMMANDS,
16736627
LT
2835 .max_comb_1st_msg_len = 2,
2836 .max_comb_2nd_msg_len = MAX_SW_I2C_COMMANDS - 2,
35ed2703
AG
2837};
2838
2f60dd50 2839static int navi10_i2c_control_init(struct smu_context *smu)
af01340b 2840{
2f60dd50
LT
2841 struct amdgpu_device *adev = smu->adev;
2842 int res, i;
2843
2844 for (i = 0; i < MAX_SMU_I2C_BUSES; i++) {
2845 struct amdgpu_smu_i2c_bus *smu_i2c = &adev->pm.smu_i2c[i];
2846 struct i2c_adapter *control = &smu_i2c->adapter;
2847
2848 smu_i2c->adev = adev;
2849 smu_i2c->port = i;
2850 mutex_init(&smu_i2c->mutex);
2851 control->owner = THIS_MODULE;
2852 control->class = I2C_CLASS_HWMON;
2853 control->dev.parent = &adev->pdev->dev;
2854 control->algo = &navi10_i2c_algo;
2855 snprintf(control->name, sizeof(control->name), "AMDGPU SMU %d", i);
2856 control->quirks = &navi10_i2c_control_quirks;
2857 i2c_set_adapdata(control, smu_i2c);
2858
2859 res = i2c_add_adapter(control);
2860 if (res) {
2861 DRM_ERROR("Failed to register hw i2c, err: %d\n", res);
2862 goto Out_err;
2863 }
2864 }
af01340b 2865
2f60dd50
LT
2866 adev->pm.ras_eeprom_i2c_bus = &adev->pm.smu_i2c[0].adapter;
2867 adev->pm.fru_eeprom_i2c_bus = &adev->pm.smu_i2c[1].adapter;
af01340b 2868
2f60dd50
LT
2869 return 0;
2870Out_err:
2871 for ( ; i >= 0; i--) {
2872 struct amdgpu_smu_i2c_bus *smu_i2c = &adev->pm.smu_i2c[i];
2873 struct i2c_adapter *control = &smu_i2c->adapter;
af01340b 2874
2f60dd50
LT
2875 i2c_del_adapter(control);
2876 }
af01340b
AD
2877 return res;
2878}
2879
2f60dd50 2880static void navi10_i2c_control_fini(struct smu_context *smu)
af01340b 2881{
2f60dd50
LT
2882 struct amdgpu_device *adev = smu->adev;
2883 int i;
2884
2885 for (i = 0; i < MAX_SMU_I2C_BUSES; i++) {
2886 struct amdgpu_smu_i2c_bus *smu_i2c = &adev->pm.smu_i2c[i];
2887 struct i2c_adapter *control = &smu_i2c->adapter;
2888
2889 i2c_del_adapter(control);
2890 }
2891 adev->pm.ras_eeprom_i2c_bus = NULL;
2892 adev->pm.fru_eeprom_i2c_bus = NULL;
af01340b
AD
2893}
2894
6d4ff50a
EQ
2895static ssize_t navi10_get_gpu_metrics(struct smu_context *smu,
2896 void **table)
2897{
2898 struct smu_table_context *smu_table = &smu->smu_table;
61e2d322
DN
2899 struct gpu_metrics_v1_3 *gpu_metrics =
2900 (struct gpu_metrics_v1_3 *)smu_table->gpu_metrics_table;
6d4ff50a
EQ
2901 SmuMetrics_t metrics;
2902 int ret = 0;
2903
da11407f
EQ
2904 ret = smu_cmn_get_metrics_table(smu,
2905 NULL,
2906 true);
2907 if (ret)
6d4ff50a 2908 return ret;
6d4ff50a 2909
fceafc9b 2910 memcpy(&metrics, smu_table->metrics_table, sizeof(SmuMetrics_t));
7d6c13ef 2911
61e2d322 2912 smu_cmn_init_soft_gpu_metrics(gpu_metrics, 1, 3);
7d6c13ef
EQ
2913
2914 gpu_metrics->temperature_edge = metrics.TemperatureEdge;
2915 gpu_metrics->temperature_hotspot = metrics.TemperatureHotspot;
2916 gpu_metrics->temperature_mem = metrics.TemperatureMem;
2917 gpu_metrics->temperature_vrgfx = metrics.TemperatureVrGfx;
2918 gpu_metrics->temperature_vrsoc = metrics.TemperatureVrSoc;
2919 gpu_metrics->temperature_vrmem = metrics.TemperatureVrMem0;
2920
2921 gpu_metrics->average_gfx_activity = metrics.AverageGfxActivity;
2922 gpu_metrics->average_umc_activity = metrics.AverageUclkActivity;
2923
2924 gpu_metrics->average_socket_power = metrics.AverageSocketPower;
2925
2926 if (metrics.AverageGfxActivity > SMU_11_0_GFX_BUSY_THRESHOLD)
2927 gpu_metrics->average_gfxclk_frequency = metrics.AverageGfxclkFrequencyPreDs;
2928 else
2929 gpu_metrics->average_gfxclk_frequency = metrics.AverageGfxclkFrequencyPostDs;
2930
2931 gpu_metrics->average_socclk_frequency = metrics.AverageSocclkFrequency;
2932 gpu_metrics->average_uclk_frequency = metrics.AverageUclkFrequencyPostDs;
2933
2934 gpu_metrics->current_gfxclk = metrics.CurrClock[PPCLK_GFXCLK];
2935 gpu_metrics->current_socclk = metrics.CurrClock[PPCLK_SOCCLK];
2936 gpu_metrics->current_uclk = metrics.CurrClock[PPCLK_UCLK];
2937 gpu_metrics->current_vclk0 = metrics.CurrClock[PPCLK_VCLK];
2938 gpu_metrics->current_dclk0 = metrics.CurrClock[PPCLK_DCLK];
2939
2940 gpu_metrics->throttle_status = metrics.ThrottlerStatus;
64cdee43
GS
2941 gpu_metrics->indep_throttle_status =
2942 smu_cmn_get_indep_throttler_status(metrics.ThrottlerStatus,
2943 navi1x_throttler_map);
7d6c13ef
EQ
2944
2945 gpu_metrics->current_fan_speed = metrics.CurrFanSpeed;
2946
c524c1c9
EQ
2947 gpu_metrics->pcie_link_width = metrics.PcieWidth;
2948 gpu_metrics->pcie_link_speed = link_speed[metrics.PcieRate];
7d6c13ef
EQ
2949
2950 gpu_metrics->system_clock_counter = ktime_get_boottime_ns();
2951
61e2d322
DN
2952 if (metrics.CurrGfxVoltageOffset)
2953 gpu_metrics->voltage_gfx =
2954 (155000 - 625 * metrics.CurrGfxVoltageOffset) / 100;
2955 if (metrics.CurrMemVidOffset)
2956 gpu_metrics->voltage_mem =
2957 (155000 - 625 * metrics.CurrMemVidOffset) / 100;
2958 if (metrics.CurrSocVoltageOffset)
2959 gpu_metrics->voltage_soc =
2960 (155000 - 625 * metrics.CurrSocVoltageOffset) / 100;
2961
7d6c13ef
EQ
2962 *table = (void *)gpu_metrics;
2963
61e2d322 2964 return sizeof(struct gpu_metrics_v1_3);
7d6c13ef
EQ
2965}
2966
2967static ssize_t navi12_get_legacy_gpu_metrics(struct smu_context *smu,
2968 void **table)
2969{
2970 struct smu_table_context *smu_table = &smu->smu_table;
61e2d322
DN
2971 struct gpu_metrics_v1_3 *gpu_metrics =
2972 (struct gpu_metrics_v1_3 *)smu_table->gpu_metrics_table;
7d6c13ef
EQ
2973 SmuMetrics_NV12_legacy_t metrics;
2974 int ret = 0;
2975
da11407f
EQ
2976 ret = smu_cmn_get_metrics_table(smu,
2977 NULL,
2978 true);
2979 if (ret)
7d6c13ef 2980 return ret;
7d6c13ef
EQ
2981
2982 memcpy(&metrics, smu_table->metrics_table, sizeof(SmuMetrics_NV12_legacy_t));
6d4ff50a 2983
61e2d322 2984 smu_cmn_init_soft_gpu_metrics(gpu_metrics, 1, 3);
6d4ff50a
EQ
2985
2986 gpu_metrics->temperature_edge = metrics.TemperatureEdge;
2987 gpu_metrics->temperature_hotspot = metrics.TemperatureHotspot;
2988 gpu_metrics->temperature_mem = metrics.TemperatureMem;
2989 gpu_metrics->temperature_vrgfx = metrics.TemperatureVrGfx;
2990 gpu_metrics->temperature_vrsoc = metrics.TemperatureVrSoc;
2991 gpu_metrics->temperature_vrmem = metrics.TemperatureVrMem0;
2992
2993 gpu_metrics->average_gfx_activity = metrics.AverageGfxActivity;
2994 gpu_metrics->average_umc_activity = metrics.AverageUclkActivity;
2995
2996 gpu_metrics->average_socket_power = metrics.AverageSocketPower;
2997
2998 gpu_metrics->average_gfxclk_frequency = metrics.AverageGfxclkFrequency;
2999 gpu_metrics->average_socclk_frequency = metrics.AverageSocclkFrequency;
3000 gpu_metrics->average_uclk_frequency = metrics.AverageUclkFrequency;
3001
7d6c13ef
EQ
3002 gpu_metrics->energy_accumulator = metrics.EnergyAccumulator;
3003 gpu_metrics->average_vclk0_frequency = metrics.AverageVclkFrequency;
3004 gpu_metrics->average_dclk0_frequency = metrics.AverageDclkFrequency;
3005 gpu_metrics->average_mm_activity = metrics.VcnActivityPercentage;
3006
3007 gpu_metrics->current_gfxclk = metrics.CurrClock[PPCLK_GFXCLK];
3008 gpu_metrics->current_socclk = metrics.CurrClock[PPCLK_SOCCLK];
3009 gpu_metrics->current_uclk = metrics.CurrClock[PPCLK_UCLK];
3010 gpu_metrics->current_vclk0 = metrics.CurrClock[PPCLK_VCLK];
3011 gpu_metrics->current_dclk0 = metrics.CurrClock[PPCLK_DCLK];
3012
3013 gpu_metrics->throttle_status = metrics.ThrottlerStatus;
64cdee43
GS
3014 gpu_metrics->indep_throttle_status =
3015 smu_cmn_get_indep_throttler_status(metrics.ThrottlerStatus,
3016 navi1x_throttler_map);
7d6c13ef
EQ
3017
3018 gpu_metrics->current_fan_speed = metrics.CurrFanSpeed;
3019
3020 gpu_metrics->pcie_link_width =
3021 smu_v11_0_get_current_pcie_link_width(smu);
3022 gpu_metrics->pcie_link_speed =
3023 smu_v11_0_get_current_pcie_link_speed(smu);
3024
3025 gpu_metrics->system_clock_counter = ktime_get_boottime_ns();
3026
61e2d322
DN
3027 if (metrics.CurrGfxVoltageOffset)
3028 gpu_metrics->voltage_gfx =
3029 (155000 - 625 * metrics.CurrGfxVoltageOffset) / 100;
3030 if (metrics.CurrMemVidOffset)
3031 gpu_metrics->voltage_mem =
3032 (155000 - 625 * metrics.CurrMemVidOffset) / 100;
3033 if (metrics.CurrSocVoltageOffset)
3034 gpu_metrics->voltage_soc =
3035 (155000 - 625 * metrics.CurrSocVoltageOffset) / 100;
3036
7d6c13ef
EQ
3037 *table = (void *)gpu_metrics;
3038
61e2d322 3039 return sizeof(struct gpu_metrics_v1_3);
7d6c13ef
EQ
3040}
3041
3042static ssize_t navi12_get_gpu_metrics(struct smu_context *smu,
3043 void **table)
3044{
3045 struct smu_table_context *smu_table = &smu->smu_table;
61e2d322
DN
3046 struct gpu_metrics_v1_3 *gpu_metrics =
3047 (struct gpu_metrics_v1_3 *)smu_table->gpu_metrics_table;
7d6c13ef
EQ
3048 SmuMetrics_NV12_t metrics;
3049 int ret = 0;
3050
da11407f
EQ
3051 ret = smu_cmn_get_metrics_table(smu,
3052 NULL,
3053 true);
3054 if (ret)
7d6c13ef 3055 return ret;
6d4ff50a 3056
7d6c13ef
EQ
3057 memcpy(&metrics, smu_table->metrics_table, sizeof(SmuMetrics_NV12_t));
3058
61e2d322 3059 smu_cmn_init_soft_gpu_metrics(gpu_metrics, 1, 3);
7d6c13ef
EQ
3060
3061 gpu_metrics->temperature_edge = metrics.TemperatureEdge;
3062 gpu_metrics->temperature_hotspot = metrics.TemperatureHotspot;
3063 gpu_metrics->temperature_mem = metrics.TemperatureMem;
3064 gpu_metrics->temperature_vrgfx = metrics.TemperatureVrGfx;
3065 gpu_metrics->temperature_vrsoc = metrics.TemperatureVrSoc;
3066 gpu_metrics->temperature_vrmem = metrics.TemperatureVrMem0;
3067
3068 gpu_metrics->average_gfx_activity = metrics.AverageGfxActivity;
3069 gpu_metrics->average_umc_activity = metrics.AverageUclkActivity;
3070
3071 gpu_metrics->average_socket_power = metrics.AverageSocketPower;
3072
3073 if (metrics.AverageGfxActivity > SMU_11_0_GFX_BUSY_THRESHOLD)
3074 gpu_metrics->average_gfxclk_frequency = metrics.AverageGfxclkFrequencyPreDs;
3075 else
3076 gpu_metrics->average_gfxclk_frequency = metrics.AverageGfxclkFrequencyPostDs;
3077
3078 gpu_metrics->average_socclk_frequency = metrics.AverageSocclkFrequency;
3079 gpu_metrics->average_uclk_frequency = metrics.AverageUclkFrequencyPostDs;
3080
3081 gpu_metrics->energy_accumulator = metrics.EnergyAccumulator;
3082 gpu_metrics->average_vclk0_frequency = metrics.AverageVclkFrequency;
3083 gpu_metrics->average_dclk0_frequency = metrics.AverageDclkFrequency;
3084 gpu_metrics->average_mm_activity = metrics.VcnActivityPercentage;
3085
6d4ff50a
EQ
3086 gpu_metrics->current_gfxclk = metrics.CurrClock[PPCLK_GFXCLK];
3087 gpu_metrics->current_socclk = metrics.CurrClock[PPCLK_SOCCLK];
3088 gpu_metrics->current_uclk = metrics.CurrClock[PPCLK_UCLK];
3089 gpu_metrics->current_vclk0 = metrics.CurrClock[PPCLK_VCLK];
3090 gpu_metrics->current_dclk0 = metrics.CurrClock[PPCLK_DCLK];
3091
3092 gpu_metrics->throttle_status = metrics.ThrottlerStatus;
64cdee43
GS
3093 gpu_metrics->indep_throttle_status =
3094 smu_cmn_get_indep_throttler_status(metrics.ThrottlerStatus,
3095 navi1x_throttler_map);
6d4ff50a
EQ
3096
3097 gpu_metrics->current_fan_speed = metrics.CurrFanSpeed;
3098
c524c1c9
EQ
3099 gpu_metrics->pcie_link_width = metrics.PcieWidth;
3100 gpu_metrics->pcie_link_speed = link_speed[metrics.PcieRate];
6d4ff50a 3101
de4b7cd8
KW
3102 gpu_metrics->system_clock_counter = ktime_get_boottime_ns();
3103
61e2d322
DN
3104 if (metrics.CurrGfxVoltageOffset)
3105 gpu_metrics->voltage_gfx =
3106 (155000 - 625 * metrics.CurrGfxVoltageOffset) / 100;
3107 if (metrics.CurrMemVidOffset)
3108 gpu_metrics->voltage_mem =
3109 (155000 - 625 * metrics.CurrMemVidOffset) / 100;
3110 if (metrics.CurrSocVoltageOffset)
3111 gpu_metrics->voltage_soc =
3112 (155000 - 625 * metrics.CurrSocVoltageOffset) / 100;
3113
6d4ff50a
EQ
3114 *table = (void *)gpu_metrics;
3115
61e2d322 3116 return sizeof(struct gpu_metrics_v1_3);
6d4ff50a 3117}
1bc73475 3118
7d6c13ef
EQ
3119static ssize_t navi1x_get_gpu_metrics(struct smu_context *smu,
3120 void **table)
3121{
3122 struct amdgpu_device *adev = smu->adev;
3123 uint32_t smu_version;
3124 int ret = 0;
3125
3126 ret = smu_cmn_get_smc_version(smu, NULL, &smu_version);
3127 if (ret) {
3128 dev_err(adev->dev, "Failed to get smu version!\n");
3129 return ret;
3130 }
3131
1d789535 3132 switch (adev->ip_versions[MP1_HWIP][0]) {
ea0d730a 3133 case IP_VERSION(11, 0, 9):
7d6c13ef
EQ
3134 if (smu_version > 0x00341C00)
3135 ret = navi12_get_gpu_metrics(smu, table);
3136 else
3137 ret = navi12_get_legacy_gpu_metrics(smu, table);
3138 break;
ea0d730a
AD
3139 case IP_VERSION(11, 0, 0):
3140 case IP_VERSION(11, 0, 5):
7d6c13ef 3141 default:
1d789535
AD
3142 if (((adev->ip_versions[MP1_HWIP][0] == IP_VERSION(11, 0, 5)) && smu_version > 0x00351F00) ||
3143 ((adev->ip_versions[MP1_HWIP][0] == IP_VERSION(11, 0, 0)) && smu_version > 0x002A3B00))
7d6c13ef
EQ
3144 ret = navi10_get_gpu_metrics(smu, table);
3145 else
3146 ret =navi10_get_legacy_gpu_metrics(smu, table);
3147 break;
3148 }
3149
3150 return ret;
3151}
3152
94a670d5
EQ
3153static int navi10_enable_mgpu_fan_boost(struct smu_context *smu)
3154{
b804a75d
EQ
3155 struct smu_table_context *table_context = &smu->smu_table;
3156 PPTable_t *smc_pptable = table_context->driver_pptable;
94a670d5
EQ
3157 struct amdgpu_device *adev = smu->adev;
3158 uint32_t param = 0;
3159
3160 /* Navi12 does not support this */
1d789535 3161 if (adev->ip_versions[MP1_HWIP][0] == IP_VERSION(11, 0, 9))
94a670d5
EQ
3162 return 0;
3163
b804a75d
EQ
3164 /*
3165 * Skip the MGpuFanBoost setting for those ASICs
3166 * which do not support it
3167 */
3168 if (!smc_pptable->MGpuFanBoostLimitRpm)
3169 return 0;
3170
94a670d5
EQ
3171 /* Workaround for WS SKU */
3172 if (adev->pdev->device == 0x7312 &&
3173 adev->pdev->revision == 0)
3174 param = 0xD188;
3175
3176 return smu_cmn_send_smc_msg_with_param(smu,
3177 SMU_MSG_SetMGpuFanBoostLimitRpm,
3178 param,
3179 NULL);
3180}
3181
10144762
EQ
3182static int navi10_post_smu_init(struct smu_context *smu)
3183{
10144762 3184 struct amdgpu_device *adev = smu->adev;
82cac71c 3185 int ret = 0;
10144762 3186
911d5bd5
JC
3187 if (amdgpu_sriov_vf(adev))
3188 return 0;
3189
12f04120 3190 ret = navi10_run_umc_cdr_workaround(smu);
1653a179 3191 if (ret) {
82cac71c 3192 dev_err(adev->dev, "Failed to apply umc cdr workaround!\n");
1653a179
EQ
3193 return ret;
3194 }
3195
3196 if (!smu->dc_controlled_by_gpio) {
3197 /*
3198 * For Navi1X, manually switch it to AC mode as PMFW
3199 * may boot it with DC mode.
3200 */
3201 ret = smu_v11_0_set_power_source(smu,
3202 adev->pm.ac_power ?
3203 SMU_POWER_SOURCE_AC :
3204 SMU_POWER_SOURCE_DC);
3205 if (ret) {
3206 dev_err(adev->dev, "Failed to switch to %s mode!\n",
3207 adev->pm.ac_power ? "AC" : "DC");
3208 return ret;
3209 }
3210 }
82cac71c
EQ
3211
3212 return ret;
10144762
EQ
3213}
3214
b3490673 3215static const struct pptable_funcs navi10_ppt_funcs = {
74c958a3 3216 .get_allowed_feature_mask = navi10_get_allowed_feature_mask,
b3490673 3217 .set_default_dpm_table = navi10_set_default_dpm_table,
f6b4b4a1 3218 .dpm_set_vcn_enable = navi10_dpm_set_vcn_enable,
43717ff6 3219 .dpm_set_jpeg_enable = navi10_dpm_set_jpeg_enable,
af01340b
AD
3220 .i2c_init = navi10_i2c_control_init,
3221 .i2c_fini = navi10_i2c_control_fini,
b1e7e224 3222 .print_clk_levels = navi10_print_clk_levels,
db439ca2 3223 .force_clk_levels = navi10_force_clk_levels,
fa51bfc2 3224 .populate_umd_state_clk = navi10_populate_umd_state_clk,
a43913ea 3225 .get_clock_by_type_with_latency = navi10_get_clock_by_type_with_latency,
28430544 3226 .pre_display_config_changed = navi10_pre_display_config_changed,
0a6430da 3227 .display_config_changed = navi10_display_config_changed,
19796597 3228 .notify_smc_display_config = navi10_notify_smc_display_config,
4228b601 3229 .is_dpm_running = navi10_is_dpm_running,
0d8318e1 3230 .get_fan_speed_pwm = smu_v11_0_get_fan_speed_pwm,
d9ca7567 3231 .get_fan_speed_rpm = navi10_get_fan_speed_rpm,
b45dc20b
KW
3232 .get_power_profile_mode = navi10_get_power_profile_mode,
3233 .set_power_profile_mode = navi10_set_power_profile_mode,
5bbb0994 3234 .set_watermarks_table = navi10_set_watermarks_table,
9c62f993 3235 .read_sensor = navi10_read_sensor,
f4b3295f 3236 .get_uclk_dpm_states = navi10_get_uclk_dpm_states,
46a301e1 3237 .set_performance_level = smu_v11_0_set_performance_level,
7a816371 3238 .get_thermal_temperature_range = navi10_get_thermal_temperature_range,
6e92e156 3239 .display_disable_memory_clock_switch = navi10_display_disable_memory_clock_switch,
b4af964e 3240 .get_power_limit = navi10_get_power_limit,
372120f0 3241 .update_pcie_parameters = navi10_update_pcie_parameters,
6c45e480
EQ
3242 .init_microcode = smu_v11_0_init_microcode,
3243 .load_microcode = smu_v11_0_load_microcode,
6f47116e 3244 .fini_microcode = smu_v11_0_fini_microcode,
c1b353b7 3245 .init_smc_tables = navi10_init_smc_tables,
6c45e480
EQ
3246 .fini_smc_tables = smu_v11_0_fini_smc_tables,
3247 .init_power = smu_v11_0_init_power,
3248 .fini_power = smu_v11_0_fini_power,
3249 .check_fw_status = smu_v11_0_check_fw_status,
4a13b4ce 3250 .setup_pptable = navi10_setup_pptable,
6c45e480 3251 .get_vbios_bootup_values = smu_v11_0_get_vbios_bootup_values,
6c45e480 3252 .check_fw_version = smu_v11_0_check_fw_version,
caad2613 3253 .write_pptable = smu_cmn_write_pptable,
ce0d0ec3 3254 .set_driver_table_location = smu_v11_0_set_driver_table_location,
6c45e480
EQ
3255 .set_tool_table_location = smu_v11_0_set_tool_table_location,
3256 .notify_memory_pool_location = smu_v11_0_notify_memory_pool_location,
3257 .system_features_control = smu_v11_0_system_features_control,
66c86828
EQ
3258 .send_smc_msg_with_param = smu_cmn_send_smc_msg_with_param,
3259 .send_smc_msg = smu_cmn_send_smc_msg,
6c45e480
EQ
3260 .init_display_count = smu_v11_0_init_display_count,
3261 .set_allowed_mask = smu_v11_0_set_allowed_mask,
28251d72 3262 .get_enabled_mask = smu_cmn_get_enabled_mask,
b4bb3aaf 3263 .feature_is_enabled = smu_cmn_feature_is_enabled,
af5ba6d2 3264 .disable_all_features_with_exception = smu_cmn_disable_all_features_with_exception,
6c45e480
EQ
3265 .notify_display_change = smu_v11_0_notify_display_change,
3266 .set_power_limit = smu_v11_0_set_power_limit,
6c45e480 3267 .init_max_sustainable_clocks = smu_v11_0_init_max_sustainable_clocks,
22f1e0e8
EQ
3268 .enable_thermal_alert = smu_v11_0_enable_thermal_alert,
3269 .disable_thermal_alert = smu_v11_0_disable_thermal_alert,
ce63d8f8 3270 .set_min_dcef_deep_sleep = smu_v11_0_set_min_deep_sleep_dcefclk,
6c45e480
EQ
3271 .display_clock_voltage_request = smu_v11_0_display_clock_voltage_request,
3272 .get_fan_control_mode = smu_v11_0_get_fan_control_mode,
3273 .set_fan_control_mode = smu_v11_0_set_fan_control_mode,
0d8318e1 3274 .set_fan_speed_pwm = smu_v11_0_set_fan_speed_pwm,
f3289d04 3275 .set_fan_speed_rpm = smu_v11_0_set_fan_speed_rpm,
6c45e480
EQ
3276 .set_xgmi_pstate = smu_v11_0_set_xgmi_pstate,
3277 .gfx_off_control = smu_v11_0_gfx_off_control,
3278 .register_irq_handler = smu_v11_0_register_irq_handler,
3279 .set_azalia_d3_pme = smu_v11_0_set_azalia_d3_pme,
3280 .get_max_sustainable_clocks_by_dc = smu_v11_0_get_max_sustainable_clocks_by_dc,
9fd4781b 3281 .baco_is_support = smu_v11_0_baco_is_support,
6c45e480
EQ
3282 .baco_get_state = smu_v11_0_baco_get_state,
3283 .baco_set_state = smu_v11_0_baco_set_state,
13d75ead
EQ
3284 .baco_enter = navi10_baco_enter,
3285 .baco_exit = navi10_baco_exit,
6c45e480
EQ
3286 .get_dpm_ultimate_freq = smu_v11_0_get_dpm_ultimate_freq,
3287 .set_soft_freq_limited_range = smu_v11_0_set_soft_freq_limited_range,
21677d08
MC
3288 .set_default_od_settings = navi10_set_default_od_settings,
3289 .od_edit_dpm_table = navi10_od_edit_dpm_table,
92cf0508 3290 .restore_user_od_settings = smu_v11_0_restore_user_od_settings,
0eeaa899 3291 .run_btc = navi10_run_btc,
fa34520c 3292 .set_power_source = smu_v11_0_set_power_source,
7dbf7805
EQ
3293 .get_pp_feature_mask = smu_cmn_get_pp_feature_mask,
3294 .set_pp_feature_mask = smu_cmn_set_pp_feature_mask,
7d6c13ef 3295 .get_gpu_metrics = navi1x_get_gpu_metrics,
94a670d5 3296 .enable_mgpu_fan_boost = navi10_enable_mgpu_fan_boost,
e988026f 3297 .gfx_ulv_control = smu_v11_0_gfx_ulv_control,
5ce99853 3298 .deep_sleep_control = smu_v11_0_deep_sleep_control,
3204ff3e 3299 .get_fan_parameters = navi10_get_fan_parameters,
10144762 3300 .post_init = navi10_post_smu_init,
234676d6 3301 .interrupt_work = smu_v11_0_interrupt_work,
5f0f1727 3302 .set_mp1_state = smu_cmn_set_mp1_state,
b3490673
HR
3303};
3304
3305void navi10_set_ppt_funcs(struct smu_context *smu)
3306{
3307 smu->ppt_funcs = &navi10_ppt_funcs;
6c339f37
EQ
3308 smu->message_map = navi10_message_map;
3309 smu->clock_map = navi10_clk_map;
3310 smu->feature_map = navi10_feature_mask_map;
3311 smu->table_map = navi10_table_map;
3312 smu->pwr_src_map = navi10_pwr_src_map;
3313 smu->workload_map = navi10_workload_map;
b3490673 3314}