Merge tag 'drm-misc-next-fixes-2023-07-06' of git://anongit.freedesktop.org/drm/drm...
[linux-block.git] / drivers / gpu / drm / amd / pm / swsmu / smu13 / smu_v13_0_6_ppt.c
CommitLineData
511a9555
LL
1/*
2 * Copyright 2021 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23
24#define SWSMU_CODE_LAYER_L2
25
26#include <linux/firmware.h>
27#include "amdgpu.h"
28#include "amdgpu_smu.h"
29#include "atomfirmware.h"
30#include "amdgpu_atomfirmware.h"
31#include "amdgpu_atombios.h"
32#include "smu_v13_0_6_pmfw.h"
33#include "smu13_driver_if_v13_0_6.h"
34#include "smu_v13_0_6_ppsmc.h"
35#include "soc15_common.h"
36#include "atom.h"
37#include "power_state.h"
38#include "smu_v13_0.h"
39#include "smu_v13_0_6_ppt.h"
40#include "nbio/nbio_7_4_offset.h"
41#include "nbio/nbio_7_4_sh_mask.h"
42#include "thm/thm_11_0_2_offset.h"
43#include "thm/thm_11_0_2_sh_mask.h"
44#include "amdgpu_xgmi.h"
45#include <linux/pci.h>
46#include "amdgpu_ras.h"
47#include "smu_cmn.h"
48#include "mp/mp_13_0_6_offset.h"
49#include "mp/mp_13_0_6_sh_mask.h"
50
51#undef MP1_Public
52#undef smnMP1_FIRMWARE_FLAGS
53
54/* TODO: Check final register offsets */
55#define MP1_Public 0x03b00000
56#define smnMP1_FIRMWARE_FLAGS 0x3010028
57/*
58 * DO NOT use these for err/warn/info/debug messages.
59 * Use dev_err, dev_warn, dev_info and dev_dbg instead.
60 * They are more MGPU friendly.
61 */
62#undef pr_err
63#undef pr_warn
64#undef pr_info
65#undef pr_debug
66
67#define to_amdgpu_device(x) (container_of(x, struct amdgpu_device, pm.smu_i2c))
68
69#define SMU_13_0_6_FEA_MAP(smu_feature, smu_13_0_6_feature) \
70 [smu_feature] = { 1, (smu_13_0_6_feature) }
71
72#define FEATURE_MASK(feature) (1ULL << feature)
73#define SMC_DPM_FEATURE \
74 (FEATURE_MASK(FEATURE_DATA_CALCULATION) | \
75 FEATURE_MASK(FEATURE_DPM_GFXCLK) | FEATURE_MASK(FEATURE_DPM_UCLK) | \
76 FEATURE_MASK(FEATURE_DPM_SOCCLK) | FEATURE_MASK(FEATURE_DPM_FCLK) | \
77 FEATURE_MASK(FEATURE_DPM_LCLK) | FEATURE_MASK(FEATURE_DPM_XGMI) | \
78 FEATURE_MASK(FEATURE_DPM_VCN))
79
80/* possible frequency drift (1Mhz) */
81#define EPSILON 1
82
1718e973
LL
83#define smnPCIE_ESM_CTRL 0x193D0
84#define smnPCIE_LC_LINK_WIDTH_CNTL 0x1ab40288
85#define PCIE_LC_LINK_WIDTH_CNTL__LC_LINK_WIDTH_RD_MASK 0x00000070L
86#define PCIE_LC_LINK_WIDTH_CNTL__LC_LINK_WIDTH_RD__SHIFT 0x4
511a9555 87
511a9555
LL
88static const struct cmn2asic_msg_mapping smu_v13_0_6_message_map[SMU_MSG_MAX_COUNT] = {
89 MSG_MAP(TestMessage, PPSMC_MSG_TestMessage, 0),
90 MSG_MAP(GetSmuVersion, PPSMC_MSG_GetSmuVersion, 1),
91 MSG_MAP(GetDriverIfVersion, PPSMC_MSG_GetDriverIfVersion, 1),
92 MSG_MAP(EnableAllSmuFeatures, PPSMC_MSG_EnableAllSmuFeatures, 1),
93 MSG_MAP(DisableAllSmuFeatures, PPSMC_MSG_DisableAllSmuFeatures, 1),
94 MSG_MAP(RequestI2cTransaction, PPSMC_MSG_RequestI2cTransaction, 0),
95 MSG_MAP(GetMetricsTable, PPSMC_MSG_GetMetricsTable, 1),
96 MSG_MAP(GetEnabledSmuFeaturesHigh, PPSMC_MSG_GetEnabledSmuFeaturesHigh, 1),
97 MSG_MAP(GetEnabledSmuFeaturesLow, PPSMC_MSG_GetEnabledSmuFeaturesLow, 1),
98 MSG_MAP(SetDriverDramAddrHigh, PPSMC_MSG_SetDriverDramAddrHigh, 1),
99 MSG_MAP(SetDriverDramAddrLow, PPSMC_MSG_SetDriverDramAddrLow, 1),
100 MSG_MAP(SetToolsDramAddrHigh, PPSMC_MSG_SetToolsDramAddrHigh, 0),
101 MSG_MAP(SetToolsDramAddrLow, PPSMC_MSG_SetToolsDramAddrLow, 0),
102 MSG_MAP(SetSoftMinByFreq, PPSMC_MSG_SetSoftMinByFreq, 0),
103 MSG_MAP(SetSoftMaxByFreq, PPSMC_MSG_SetSoftMaxByFreq, 0),
104 MSG_MAP(GetMinDpmFreq, PPSMC_MSG_GetMinDpmFreq, 0),
105 MSG_MAP(GetMaxDpmFreq, PPSMC_MSG_GetMaxDpmFreq, 0),
106 MSG_MAP(GetDpmFreqByIndex, PPSMC_MSG_GetDpmFreqByIndex, 1),
107 MSG_MAP(SetPptLimit, PPSMC_MSG_SetPptLimit, 0),
108 MSG_MAP(GetPptLimit, PPSMC_MSG_GetPptLimit, 1),
109 MSG_MAP(GfxDeviceDriverReset, PPSMC_MSG_GfxDriverReset, 0),
110 MSG_MAP(DramLogSetDramAddrHigh, PPSMC_MSG_DramLogSetDramAddrHigh, 0),
111 MSG_MAP(DramLogSetDramAddrLow, PPSMC_MSG_DramLogSetDramAddrLow, 0),
112 MSG_MAP(DramLogSetDramSize, PPSMC_MSG_DramLogSetDramSize, 0),
113 MSG_MAP(GetDebugData, PPSMC_MSG_GetDebugData, 0),
114 MSG_MAP(SetNumBadHbmPagesRetired, PPSMC_MSG_SetNumBadHbmPagesRetired, 0),
115 MSG_MAP(DFCstateControl, PPSMC_MSG_DFCstateControl, 0),
116 MSG_MAP(GetGmiPwrDnHyst, PPSMC_MSG_GetGmiPwrDnHyst, 0),
117 MSG_MAP(SetGmiPwrDnHyst, PPSMC_MSG_SetGmiPwrDnHyst, 0),
118 MSG_MAP(GmiPwrDnControl, PPSMC_MSG_GmiPwrDnControl, 0),
119 MSG_MAP(EnterGfxoff, PPSMC_MSG_EnterGfxoff, 0),
120 MSG_MAP(ExitGfxoff, PPSMC_MSG_ExitGfxoff, 0),
121 MSG_MAP(EnableDeterminism, PPSMC_MSG_EnableDeterminism, 0),
122 MSG_MAP(DisableDeterminism, PPSMC_MSG_DisableDeterminism, 0),
123 MSG_MAP(GfxDriverResetRecovery, PPSMC_MSG_GfxDriverResetRecovery, 0),
124 MSG_MAP(GetMinGfxclkFrequency, PPSMC_MSG_GetMinGfxDpmFreq, 0),
125 MSG_MAP(GetMaxGfxclkFrequency, PPSMC_MSG_GetMaxGfxDpmFreq, 0),
126 MSG_MAP(SetSoftMinGfxclk, PPSMC_MSG_SetSoftMinGfxClk, 0),
127 MSG_MAP(SetSoftMaxGfxClk, PPSMC_MSG_SetSoftMaxGfxClk, 0),
7214c08c 128 MSG_MAP(PrepareMp1ForUnload, PPSMC_MSG_PrepareForDriverUnload, 0),
511a9555
LL
129};
130
131static const struct cmn2asic_mapping smu_v13_0_6_clk_map[SMU_CLK_COUNT] = {
132 CLK_MAP(SOCCLK, PPCLK_SOCCLK),
133 CLK_MAP(FCLK, PPCLK_FCLK),
134 CLK_MAP(UCLK, PPCLK_UCLK),
135 CLK_MAP(MCLK, PPCLK_UCLK),
136 CLK_MAP(DCLK, PPCLK_DCLK),
137 CLK_MAP(VCLK, PPCLK_VCLK),
138 CLK_MAP(LCLK, PPCLK_LCLK),
139};
140
141static const struct cmn2asic_mapping smu_v13_0_6_feature_mask_map[SMU_FEATURE_COUNT] = {
142 SMU_13_0_6_FEA_MAP(SMU_FEATURE_DATA_CALCULATIONS_BIT, FEATURE_DATA_CALCULATION),
143 SMU_13_0_6_FEA_MAP(SMU_FEATURE_DPM_GFXCLK_BIT, FEATURE_DPM_GFXCLK),
144 SMU_13_0_6_FEA_MAP(SMU_FEATURE_DPM_UCLK_BIT, FEATURE_DPM_UCLK),
145 SMU_13_0_6_FEA_MAP(SMU_FEATURE_DPM_SOCCLK_BIT, FEATURE_DPM_SOCCLK),
146 SMU_13_0_6_FEA_MAP(SMU_FEATURE_DPM_FCLK_BIT, FEATURE_DPM_FCLK),
147 SMU_13_0_6_FEA_MAP(SMU_FEATURE_DPM_LCLK_BIT, FEATURE_DPM_LCLK),
148 SMU_13_0_6_FEA_MAP(SMU_FEATURE_DPM_VCLK_BIT, FEATURE_DPM_VCN),
149 SMU_13_0_6_FEA_MAP(SMU_FEATURE_DPM_DCLK_BIT, FEATURE_DPM_VCN),
150 SMU_13_0_6_FEA_MAP(SMU_FEATURE_DPM_XGMI_BIT, FEATURE_DPM_XGMI),
151 SMU_13_0_6_FEA_MAP(SMU_FEATURE_DS_GFXCLK_BIT, FEATURE_DS_GFXCLK),
152 SMU_13_0_6_FEA_MAP(SMU_FEATURE_DS_SOCCLK_BIT, FEATURE_DS_SOCCLK),
153 SMU_13_0_6_FEA_MAP(SMU_FEATURE_DS_LCLK_BIT, FEATURE_DS_LCLK),
154 SMU_13_0_6_FEA_MAP(SMU_FEATURE_DS_FCLK_BIT, FEATURE_DS_FCLK),
155 SMU_13_0_6_FEA_MAP(SMU_FEATURE_VCN_DPM_BIT, FEATURE_DPM_VCN),
156 SMU_13_0_6_FEA_MAP(SMU_FEATURE_PPT_BIT, FEATURE_PPT),
157 SMU_13_0_6_FEA_MAP(SMU_FEATURE_TDC_BIT, FEATURE_TDC),
158 SMU_13_0_6_FEA_MAP(SMU_FEATURE_APCC_DFLL_BIT, FEATURE_APCC_DFLL),
159 SMU_13_0_6_FEA_MAP(SMU_FEATURE_MP1_CG_BIT, FEATURE_SMU_CG),
160 SMU_13_0_6_FEA_MAP(SMU_FEATURE_GFXOFF_BIT, FEATURE_GFXOFF),
161 SMU_13_0_6_FEA_MAP(SMU_FEATURE_FW_CTF_BIT, FEATURE_FW_CTF),
162 SMU_13_0_6_FEA_MAP(SMU_FEATURE_THERMAL_BIT, FEATURE_THERMAL),
163 SMU_13_0_6_FEA_MAP(SMU_FEATURE_XGMI_PER_LINK_PWR_DWN_BIT, FEATURE_XGMI_PER_LINK_PWR_DOWN),
164 SMU_13_0_6_FEA_MAP(SMU_FEATURE_DF_CSTATE_BIT, FEATURE_DF_CSTATE),
165};
166
167#define TABLE_PMSTATUSLOG 0
168#define TABLE_SMU_METRICS 1
169#define TABLE_I2C_COMMANDS 2
170#define TABLE_COUNT 3
171
172static const struct cmn2asic_mapping smu_v13_0_6_table_map[SMU_TABLE_COUNT] = {
173 TAB_MAP(PMSTATUSLOG),
174 TAB_MAP(SMU_METRICS),
175 TAB_MAP(I2C_COMMANDS),
176};
177
511a9555
LL
178static const uint8_t smu_v13_0_6_throttler_map[] = {
179 [THROTTLER_PPT_BIT] = (SMU_THROTTLER_PPT0_BIT),
93682f8a
LL
180 [THROTTLER_THERMAL_SOCKET_BIT] = (SMU_THROTTLER_TEMP_GPU_BIT),
181 [THROTTLER_THERMAL_HBM_BIT] = (SMU_THROTTLER_TEMP_MEM_BIT),
182 [THROTTLER_THERMAL_VR_BIT] = (SMU_THROTTLER_TEMP_VR_GFX_BIT),
183 [THROTTLER_PROCHOT_BIT] = (SMU_THROTTLER_PROCHOT_GFX_BIT),
511a9555
LL
184};
185
186struct PPTable_t {
187 uint32_t MaxSocketPowerLimit;
188 uint32_t MaxGfxclkFrequency;
189 uint32_t MinGfxclkFrequency;
190 uint32_t FclkFrequencyTable[4];
191 uint32_t UclkFrequencyTable[4];
192 uint32_t SocclkFrequencyTable[4];
193 uint32_t VclkFrequencyTable[4];
194 uint32_t DclkFrequencyTable[4];
195 uint32_t LclkFrequencyTable[4];
196 uint32_t MaxLclkDpmRange;
197 uint32_t MinLclkDpmRange;
5e86aa29 198 uint64_t PublicSerialNumber_AID;
511a9555
LL
199 bool Init;
200};
201
202#define SMUQ10_TO_UINT(x) ((x) >> 10)
203
204struct smu_v13_0_6_dpm_map {
205 enum smu_clk_type clk_type;
206 uint32_t feature_num;
207 struct smu_13_0_dpm_table *dpm_table;
208 uint32_t *freq_table;
209};
210
211static int smu_v13_0_6_tables_init(struct smu_context *smu)
212{
213 struct smu_table_context *smu_table = &smu->smu_table;
214 struct smu_table *tables = smu_table->tables;
215 struct amdgpu_device *adev = smu->adev;
216
217 if (!(adev->flags & AMD_IS_APU))
218 SMU_TABLE_INIT(tables, SMU_TABLE_PMSTATUSLOG, SMU13_TOOL_SIZE,
219 PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
220
221 SMU_TABLE_INIT(tables, SMU_TABLE_SMU_METRICS, sizeof(MetricsTable_t),
228ce176
RB
222 PAGE_SIZE,
223 AMDGPU_GEM_DOMAIN_VRAM | AMDGPU_GEM_DOMAIN_GTT);
511a9555
LL
224
225 SMU_TABLE_INIT(tables, SMU_TABLE_I2C_COMMANDS, sizeof(SwI2cRequest_t),
228ce176
RB
226 PAGE_SIZE,
227 AMDGPU_GEM_DOMAIN_VRAM | AMDGPU_GEM_DOMAIN_GTT);
511a9555
LL
228
229 smu_table->metrics_table = kzalloc(sizeof(MetricsTable_t), GFP_KERNEL);
230 if (!smu_table->metrics_table)
231 return -ENOMEM;
232 smu_table->metrics_time = 0;
233
234 smu_table->gpu_metrics_table_size = sizeof(struct gpu_metrics_v1_3);
235 smu_table->gpu_metrics_table =
236 kzalloc(smu_table->gpu_metrics_table_size, GFP_KERNEL);
237 if (!smu_table->gpu_metrics_table) {
238 kfree(smu_table->metrics_table);
239 return -ENOMEM;
240 }
241
242 smu_table->driver_pptable =
243 kzalloc(sizeof(struct PPTable_t), GFP_KERNEL);
244 if (!smu_table->driver_pptable) {
245 kfree(smu_table->metrics_table);
246 kfree(smu_table->gpu_metrics_table);
247 return -ENOMEM;
248 }
249
250 return 0;
251}
252
253static int smu_v13_0_6_allocate_dpm_context(struct smu_context *smu)
254{
255 struct smu_dpm_context *smu_dpm = &smu->smu_dpm;
256
257 smu_dpm->dpm_context =
258 kzalloc(sizeof(struct smu_13_0_dpm_context), GFP_KERNEL);
259 if (!smu_dpm->dpm_context)
260 return -ENOMEM;
261 smu_dpm->dpm_context_size = sizeof(struct smu_13_0_dpm_context);
262
263 return 0;
264}
265
266static int smu_v13_0_6_init_smc_tables(struct smu_context *smu)
267{
268 int ret = 0;
269
270 ret = smu_v13_0_6_tables_init(smu);
271 if (ret)
272 return ret;
273
274 ret = smu_v13_0_6_allocate_dpm_context(smu);
275
276 return ret;
277}
278
279static int smu_v13_0_6_get_allowed_feature_mask(struct smu_context *smu,
280 uint32_t *feature_mask,
281 uint32_t num)
282{
283 if (num > 2)
284 return -EINVAL;
285
286 /* pptable will handle the features to enable */
287 memset(feature_mask, 0xFF, sizeof(uint32_t) * num);
288
289 return 0;
290}
291
292static int smu_v13_0_6_get_metrics_table(struct smu_context *smu,
293 void *metrics_table, bool bypass_cache)
294{
295 struct smu_table_context *smu_table = &smu->smu_table;
296 uint32_t table_size = smu_table->tables[SMU_TABLE_SMU_METRICS].size;
297 struct smu_table *table = &smu_table->driver_table;
298 int ret;
299
300 if (bypass_cache || !smu_table->metrics_time ||
301 time_after(jiffies,
302 smu_table->metrics_time + msecs_to_jiffies(1))) {
303 ret = smu_cmn_send_smc_msg(smu, SMU_MSG_GetMetricsTable, NULL);
304 if (ret) {
305 dev_info(smu->adev->dev,
306 "Failed to export SMU metrics table!\n");
307 return ret;
308 }
309
310 amdgpu_asic_invalidate_hdp(smu->adev, NULL);
311 memcpy(smu_table->metrics_table, table->cpu_addr, table_size);
312
313 smu_table->metrics_time = jiffies;
314 }
315
316 if (metrics_table)
317 memcpy(metrics_table, smu_table->metrics_table, table_size);
318
319 return 0;
320}
321
322static int smu_v13_0_6_setup_driver_pptable(struct smu_context *smu)
323{
324 struct smu_table_context *smu_table = &smu->smu_table;
325 MetricsTable_t *metrics = (MetricsTable_t *)smu_table->metrics_table;
326 struct PPTable_t *pptable =
327 (struct PPTable_t *)smu_table->driver_pptable;
328 int ret;
0bad3200 329 int i;
511a9555
LL
330
331 /* Store one-time values in driver PPTable */
332 if (!pptable->Init) {
333 ret = smu_v13_0_6_get_metrics_table(smu, NULL, false);
334 if (ret)
335 return ret;
336
337 pptable->MaxSocketPowerLimit =
338 SMUQ10_TO_UINT(metrics->MaxSocketPowerLimit);
339 pptable->MaxGfxclkFrequency =
340 SMUQ10_TO_UINT(metrics->MaxGfxclkFrequency);
341 pptable->MinGfxclkFrequency =
342 SMUQ10_TO_UINT(metrics->MinGfxclkFrequency);
343
0bad3200 344 for (i = 0; i < 4; ++i) {
511a9555
LL
345 pptable->FclkFrequencyTable[i] =
346 SMUQ10_TO_UINT(metrics->FclkFrequencyTable[i]);
347 pptable->UclkFrequencyTable[i] =
348 SMUQ10_TO_UINT(metrics->UclkFrequencyTable[i]);
349 pptable->SocclkFrequencyTable[i] = SMUQ10_TO_UINT(
350 metrics->SocclkFrequencyTable[i]);
351 pptable->VclkFrequencyTable[i] =
352 SMUQ10_TO_UINT(metrics->VclkFrequencyTable[i]);
353 pptable->DclkFrequencyTable[i] =
354 SMUQ10_TO_UINT(metrics->DclkFrequencyTable[i]);
355 pptable->LclkFrequencyTable[i] =
356 SMUQ10_TO_UINT(metrics->LclkFrequencyTable[i]);
357 }
358
5e86aa29
YW
359 /* use AID0 serial number by default */
360 pptable->PublicSerialNumber_AID = metrics->PublicSerialNumber_AID[0];
361
511a9555
LL
362 pptable->Init = true;
363 }
364
365 return 0;
366}
367
368static int smu_v13_0_6_get_dpm_ultimate_freq(struct smu_context *smu,
369 enum smu_clk_type clk_type,
370 uint32_t *min, uint32_t *max)
371{
372 struct smu_table_context *smu_table = &smu->smu_table;
373 struct PPTable_t *pptable =
374 (struct PPTable_t *)smu_table->driver_pptable;
375 uint32_t clock_limit = 0, param;
376 int ret = 0, clk_id = 0;
377
378 if (!smu_cmn_clk_dpm_is_enabled(smu, clk_type)) {
379 switch (clk_type) {
380 case SMU_MCLK:
381 case SMU_UCLK:
382 if (pptable->Init)
383 clock_limit = pptable->UclkFrequencyTable[0];
384 break;
385 case SMU_GFXCLK:
386 case SMU_SCLK:
387 if (pptable->Init)
388 clock_limit = pptable->MinGfxclkFrequency;
389 break;
390 case SMU_SOCCLK:
391 if (pptable->Init)
463e953e 392 clock_limit = pptable->SocclkFrequencyTable[0];
511a9555
LL
393 break;
394 case SMU_FCLK:
395 if (pptable->Init)
396 clock_limit = pptable->FclkFrequencyTable[0];
397 break;
398 case SMU_VCLK:
399 if (pptable->Init)
400 clock_limit = pptable->VclkFrequencyTable[0];
401 break;
402 case SMU_DCLK:
403 if (pptable->Init)
404 clock_limit = pptable->DclkFrequencyTable[0];
405 break;
406 default:
407 break;
408 }
409
410 if (min)
411 *min = clock_limit;
412
413 if (max)
414 *max = clock_limit;
415
416 return 0;
417 }
418
419 if (!(clk_type == SMU_GFXCLK || clk_type == SMU_SCLK)) {
420 clk_id = smu_cmn_to_asic_specific_index(
421 smu, CMN2ASIC_MAPPING_CLK, clk_type);
422 if (clk_id < 0) {
423 ret = -EINVAL;
424 goto failed;
425 }
426 param = (clk_id & 0xffff) << 16;
427 }
428
429 if (max) {
430 if (clk_type == SMU_GFXCLK || clk_type == SMU_SCLK)
431 ret = smu_cmn_send_smc_msg(
432 smu, SMU_MSG_GetMaxGfxclkFrequency, max);
433 else
434 ret = smu_cmn_send_smc_msg_with_param(
435 smu, SMU_MSG_GetMaxDpmFreq, param, max);
436 if (ret)
437 goto failed;
438 }
439
440 if (min) {
441 if (clk_type == SMU_GFXCLK || clk_type == SMU_SCLK)
442 ret = smu_cmn_send_smc_msg(
443 smu, SMU_MSG_GetMinGfxclkFrequency, min);
444 else
445 ret = smu_cmn_send_smc_msg_with_param(
446 smu, SMU_MSG_GetMinDpmFreq, param, min);
447 }
448
449failed:
450 return ret;
451}
452
453static int smu_v13_0_6_get_dpm_level_count(struct smu_context *smu,
454 enum smu_clk_type clk_type,
455 uint32_t *levels)
456{
457 int ret;
458
459 ret = smu_v13_0_get_dpm_freq_by_index(smu, clk_type, 0xff, levels);
460 if (!ret)
461 ++(*levels);
462
463 return ret;
464}
465
466static int smu_v13_0_6_set_default_dpm_table(struct smu_context *smu)
467{
468 struct smu_13_0_dpm_context *dpm_context = smu->smu_dpm.dpm_context;
469 struct smu_table_context *smu_table = &smu->smu_table;
470 struct smu_13_0_dpm_table *dpm_table = NULL;
471 struct PPTable_t *pptable =
472 (struct PPTable_t *)smu_table->driver_pptable;
473 uint32_t gfxclkmin, gfxclkmax, levels;
0bad3200 474 int ret = 0, i, j;
511a9555
LL
475 struct smu_v13_0_6_dpm_map dpm_map[] = {
476 { SMU_SOCCLK, SMU_FEATURE_DPM_SOCCLK_BIT,
477 &dpm_context->dpm_tables.soc_table,
478 pptable->SocclkFrequencyTable },
479 { SMU_UCLK, SMU_FEATURE_DPM_UCLK_BIT,
480 &dpm_context->dpm_tables.uclk_table,
481 pptable->UclkFrequencyTable },
482 { SMU_FCLK, SMU_FEATURE_DPM_FCLK_BIT,
483 &dpm_context->dpm_tables.fclk_table,
484 pptable->FclkFrequencyTable },
485 { SMU_VCLK, SMU_FEATURE_DPM_VCLK_BIT,
486 &dpm_context->dpm_tables.vclk_table,
487 pptable->VclkFrequencyTable },
488 { SMU_DCLK, SMU_FEATURE_DPM_DCLK_BIT,
489 &dpm_context->dpm_tables.dclk_table,
490 pptable->DclkFrequencyTable },
491 };
492
493 smu_v13_0_6_setup_driver_pptable(smu);
494
495 /* gfxclk dpm table setup */
496 dpm_table = &dpm_context->dpm_tables.gfx_table;
497 if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_GFXCLK_BIT)) {
498 /* In the case of gfxclk, only fine-grained dpm is honored.
499 * Get min/max values from FW.
500 */
501 ret = smu_v13_0_6_get_dpm_ultimate_freq(smu, SMU_GFXCLK,
502 &gfxclkmin, &gfxclkmax);
503 if (ret)
504 return ret;
505
506 dpm_table->count = 2;
507 dpm_table->dpm_levels[0].value = gfxclkmin;
508 dpm_table->dpm_levels[0].enabled = true;
509 dpm_table->dpm_levels[1].value = gfxclkmax;
510 dpm_table->dpm_levels[1].enabled = true;
511 dpm_table->min = dpm_table->dpm_levels[0].value;
512 dpm_table->max = dpm_table->dpm_levels[1].value;
513 } else {
514 dpm_table->count = 1;
515 dpm_table->dpm_levels[0].value = pptable->MinGfxclkFrequency;
516 dpm_table->dpm_levels[0].enabled = true;
517 dpm_table->min = dpm_table->dpm_levels[0].value;
518 dpm_table->max = dpm_table->dpm_levels[0].value;
519 }
520
0bad3200 521 for (j = 0; j < ARRAY_SIZE(dpm_map); j++) {
511a9555
LL
522 dpm_table = dpm_map[j].dpm_table;
523 levels = 1;
524 if (smu_cmn_feature_is_enabled(smu, dpm_map[j].feature_num)) {
525 ret = smu_v13_0_6_get_dpm_level_count(
526 smu, dpm_map[j].clk_type, &levels);
527 if (ret)
528 return ret;
529 }
530 dpm_table->count = levels;
531 for (i = 0; i < dpm_table->count; ++i) {
532 dpm_table->dpm_levels[i].value =
533 dpm_map[j].freq_table[i];
534 dpm_table->dpm_levels[i].enabled = true;
535
536 }
537 dpm_table->min = dpm_table->dpm_levels[0].value;
538 dpm_table->max = dpm_table->dpm_levels[levels - 1].value;
539
540 }
541
542 return 0;
543}
544
545static int smu_v13_0_6_setup_pptable(struct smu_context *smu)
546{
547 struct smu_table_context *table_context = &smu->smu_table;
548
549 /* TODO: PPTable is not available.
550 * 1) Find an alternate way to get 'PPTable values' here.
551 * 2) Check if there is SW CTF
552 */
553 table_context->thermal_controller_type = 0;
554
555 return 0;
556}
557
558static int smu_v13_0_6_check_fw_status(struct smu_context *smu)
559{
560 struct amdgpu_device *adev = smu->adev;
561 uint32_t mp1_fw_flags;
562
563 mp1_fw_flags =
564 RREG32_PCIE(MP1_Public | (smnMP1_FIRMWARE_FLAGS & 0xffffffff));
565
566 if ((mp1_fw_flags & MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED_MASK) >>
567 MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED__SHIFT)
568 return 0;
569
570 return -EIO;
571}
572
573static int smu_v13_0_6_populate_umd_state_clk(struct smu_context *smu)
574{
575 struct smu_13_0_dpm_context *dpm_context = smu->smu_dpm.dpm_context;
576 struct smu_13_0_dpm_table *gfx_table =
577 &dpm_context->dpm_tables.gfx_table;
578 struct smu_13_0_dpm_table *mem_table =
579 &dpm_context->dpm_tables.uclk_table;
580 struct smu_13_0_dpm_table *soc_table =
581 &dpm_context->dpm_tables.soc_table;
582 struct smu_umd_pstate_table *pstate_table = &smu->pstate_table;
583
584 pstate_table->gfxclk_pstate.min = gfx_table->min;
585 pstate_table->gfxclk_pstate.peak = gfx_table->max;
586 pstate_table->gfxclk_pstate.curr.min = gfx_table->min;
587 pstate_table->gfxclk_pstate.curr.max = gfx_table->max;
588
589 pstate_table->uclk_pstate.min = mem_table->min;
590 pstate_table->uclk_pstate.peak = mem_table->max;
591 pstate_table->uclk_pstate.curr.min = mem_table->min;
592 pstate_table->uclk_pstate.curr.max = mem_table->max;
593
594 pstate_table->socclk_pstate.min = soc_table->min;
595 pstate_table->socclk_pstate.peak = soc_table->max;
596 pstate_table->socclk_pstate.curr.min = soc_table->min;
597 pstate_table->socclk_pstate.curr.max = soc_table->max;
598
599 if (gfx_table->count > SMU_13_0_6_UMD_PSTATE_GFXCLK_LEVEL &&
600 mem_table->count > SMU_13_0_6_UMD_PSTATE_MCLK_LEVEL &&
601 soc_table->count > SMU_13_0_6_UMD_PSTATE_SOCCLK_LEVEL) {
602 pstate_table->gfxclk_pstate.standard =
603 gfx_table->dpm_levels[SMU_13_0_6_UMD_PSTATE_GFXCLK_LEVEL].value;
604 pstate_table->uclk_pstate.standard =
605 mem_table->dpm_levels[SMU_13_0_6_UMD_PSTATE_MCLK_LEVEL].value;
606 pstate_table->socclk_pstate.standard =
607 soc_table->dpm_levels[SMU_13_0_6_UMD_PSTATE_SOCCLK_LEVEL].value;
608 } else {
609 pstate_table->gfxclk_pstate.standard =
610 pstate_table->gfxclk_pstate.min;
611 pstate_table->uclk_pstate.standard =
612 pstate_table->uclk_pstate.min;
613 pstate_table->socclk_pstate.standard =
614 pstate_table->socclk_pstate.min;
615 }
616
617 return 0;
618}
619
620static int smu_v13_0_6_get_clk_table(struct smu_context *smu,
621 struct pp_clock_levels_with_latency *clocks,
622 struct smu_13_0_dpm_table *dpm_table)
623{
624 int i, count;
625
626 count = (dpm_table->count > MAX_NUM_CLOCKS) ? MAX_NUM_CLOCKS :
627 dpm_table->count;
628 clocks->num_levels = count;
629
630 for (i = 0; i < count; i++) {
631 clocks->data[i].clocks_in_khz =
632 dpm_table->dpm_levels[i].value * 1000;
633 clocks->data[i].latency_in_us = 0;
634 }
635
636 return 0;
637}
638
639static int smu_v13_0_6_freqs_in_same_level(int32_t frequency1,
640 int32_t frequency2)
641{
642 return (abs(frequency1 - frequency2) <= EPSILON);
643}
644
93682f8a 645static uint32_t smu_v13_0_6_get_throttler_status(struct smu_context *smu)
511a9555 646{
93682f8a
LL
647 struct smu_power_context *smu_power = &smu->smu_power;
648 struct smu_13_0_power_context *power_context = smu_power->power_context;
511a9555
LL
649 uint32_t throttler_status = 0;
650
93682f8a
LL
651 throttler_status = atomic_read(&power_context->throttle_status);
652 dev_dbg(smu->adev->dev, "SMU Throttler status: %u", throttler_status);
511a9555
LL
653
654 return throttler_status;
655}
656
657static int smu_v13_0_6_get_smu_metrics_data(struct smu_context *smu,
658 MetricsMember_t member,
659 uint32_t *value)
660{
661 struct smu_table_context *smu_table = &smu->smu_table;
662 MetricsTable_t *metrics = (MetricsTable_t *)smu_table->metrics_table;
a1b0dafa
A
663 struct amdgpu_device *adev = smu->adev;
664 uint32_t smu_version;
511a9555 665 int ret = 0;
a1b0dafa 666 int xcc_id;
511a9555
LL
667
668 ret = smu_v13_0_6_get_metrics_table(smu, NULL, false);
669 if (ret)
670 return ret;
671
672 /* For clocks with multiple instances, only report the first one */
673 switch (member) {
674 case METRICS_CURR_GFXCLK:
675 case METRICS_AVERAGE_GFXCLK:
a1b0dafa
A
676 smu_cmn_get_smc_version(smu, NULL, &smu_version);
677 if (smu_version >= 0x552F00) {
678 xcc_id = GET_INST(GC, 0);
679 *value = SMUQ10_TO_UINT(metrics->GfxclkFrequency[xcc_id]);
680 } else {
681 *value = 0;
682 }
511a9555
LL
683 break;
684 case METRICS_CURR_SOCCLK:
685 case METRICS_AVERAGE_SOCCLK:
686 *value = SMUQ10_TO_UINT(metrics->SocclkFrequency[0]);
687 break;
688 case METRICS_CURR_UCLK:
689 case METRICS_AVERAGE_UCLK:
690 *value = SMUQ10_TO_UINT(metrics->UclkFrequency);
691 break;
692 case METRICS_CURR_VCLK:
693 *value = SMUQ10_TO_UINT(metrics->VclkFrequency[0]);
694 break;
695 case METRICS_CURR_DCLK:
696 *value = SMUQ10_TO_UINT(metrics->DclkFrequency[0]);
697 break;
698 case METRICS_CURR_FCLK:
699 *value = SMUQ10_TO_UINT(metrics->FclkFrequency);
700 break;
701 case METRICS_AVERAGE_GFXACTIVITY:
702 *value = SMUQ10_TO_UINT(metrics->SocketGfxBusy);
703 break;
704 case METRICS_AVERAGE_MEMACTIVITY:
705 *value = SMUQ10_TO_UINT(metrics->DramBandwidthUtilization);
706 break;
707 case METRICS_AVERAGE_SOCKETPOWER:
708 *value = SMUQ10_TO_UINT(metrics->SocketPower) << 8;
709 break;
511a9555
LL
710 case METRICS_TEMPERATURE_HOTSPOT:
711 *value = SMUQ10_TO_UINT(metrics->MaxSocketTemperature);
712 break;
713 case METRICS_TEMPERATURE_MEM:
714 *value = SMUQ10_TO_UINT(metrics->MaxHbmTemperature);
715 break;
716 /* This is the max of all VRs and not just SOC VR.
717 * No need to define another data type for the same.
718 */
719 case METRICS_TEMPERATURE_VRSOC:
720 *value = SMUQ10_TO_UINT(metrics->MaxVrTemperature);
721 break;
511a9555
LL
722 default:
723 *value = UINT_MAX;
724 break;
725 }
726
727 return ret;
728}
729
730static int smu_v13_0_6_get_current_clk_freq_by_table(struct smu_context *smu,
731 enum smu_clk_type clk_type,
732 uint32_t *value)
733{
734 MetricsMember_t member_type;
735
736 if (!value)
737 return -EINVAL;
738
739 switch (clk_type) {
740 case SMU_GFXCLK:
741 member_type = METRICS_CURR_GFXCLK;
742 break;
743 case SMU_UCLK:
744 member_type = METRICS_CURR_UCLK;
745 break;
746 case SMU_SOCCLK:
747 member_type = METRICS_CURR_SOCCLK;
748 break;
749 case SMU_VCLK:
750 member_type = METRICS_CURR_VCLK;
751 break;
752 case SMU_DCLK:
753 member_type = METRICS_CURR_DCLK;
754 break;
755 case SMU_FCLK:
756 member_type = METRICS_CURR_FCLK;
757 break;
758 default:
759 return -EINVAL;
760 }
761
762 return smu_v13_0_6_get_smu_metrics_data(smu, member_type, value);
763}
764
765static int smu_v13_0_6_print_clk_levels(struct smu_context *smu,
766 enum smu_clk_type type, char *buf)
767{
768 int i, now, size = 0;
769 int ret = 0;
770 struct smu_umd_pstate_table *pstate_table = &smu->pstate_table;
771 struct pp_clock_levels_with_latency clocks;
772 struct smu_13_0_dpm_table *single_dpm_table;
773 struct smu_dpm_context *smu_dpm = &smu->smu_dpm;
774 struct smu_13_0_dpm_context *dpm_context = NULL;
775 uint32_t display_levels;
776 uint32_t freq_values[3] = { 0 };
777 uint32_t min_clk, max_clk;
778
779 smu_cmn_get_sysfs_buf(&buf, &size);
780
781 if (amdgpu_ras_intr_triggered()) {
782 size += sysfs_emit_at(buf, size, "unavailable\n");
783 return size;
784 }
785
786 dpm_context = smu_dpm->dpm_context;
787
788 switch (type) {
789 case SMU_OD_SCLK:
790 size += sysfs_emit_at(buf, size, "%s:\n", "GFXCLK");
791 fallthrough;
792 case SMU_SCLK:
793 ret = smu_v13_0_6_get_current_clk_freq_by_table(smu, SMU_GFXCLK,
794 &now);
795 if (ret) {
796 dev_err(smu->adev->dev,
797 "Attempt to get current gfx clk Failed!");
798 return ret;
799 }
800
801 single_dpm_table = &(dpm_context->dpm_tables.gfx_table);
802 ret = smu_v13_0_6_get_clk_table(smu, &clocks, single_dpm_table);
803 if (ret) {
804 dev_err(smu->adev->dev,
805 "Attempt to get gfx clk levels Failed!");
806 return ret;
807 }
808
809 display_levels = clocks.num_levels;
810
811 min_clk = pstate_table->gfxclk_pstate.curr.min;
812 max_clk = pstate_table->gfxclk_pstate.curr.max;
813
814 freq_values[0] = min_clk;
815 freq_values[1] = max_clk;
816
817 /* fine-grained dpm has only 2 levels */
818 if (now > min_clk && now < max_clk) {
819 display_levels = clocks.num_levels + 1;
820 freq_values[2] = max_clk;
821 freq_values[1] = now;
822 }
823
824 /*
825 * For DPM disabled case, there will be only one clock level.
826 * And it's safe to assume that is always the current clock.
827 */
828 if (display_levels == clocks.num_levels) {
829 for (i = 0; i < clocks.num_levels; i++)
830 size += sysfs_emit_at(
831 buf, size, "%d: %uMhz %s\n", i,
832 freq_values[i],
833 (clocks.num_levels == 1) ?
834 "*" :
835 (smu_v13_0_6_freqs_in_same_level(
836 freq_values[i], now) ?
837 "*" :
838 ""));
839 } else {
840 for (i = 0; i < display_levels; i++)
841 size += sysfs_emit_at(buf, size,
842 "%d: %uMhz %s\n", i,
843 freq_values[i],
844 i == 1 ? "*" : "");
845 }
846
847 break;
848
849 case SMU_OD_MCLK:
850 size += sysfs_emit_at(buf, size, "%s:\n", "MCLK");
851 fallthrough;
852 case SMU_MCLK:
853 ret = smu_v13_0_6_get_current_clk_freq_by_table(smu, SMU_UCLK,
854 &now);
855 if (ret) {
856 dev_err(smu->adev->dev,
857 "Attempt to get current mclk Failed!");
858 return ret;
859 }
860
861 single_dpm_table = &(dpm_context->dpm_tables.uclk_table);
862 ret = smu_v13_0_6_get_clk_table(smu, &clocks, single_dpm_table);
863 if (ret) {
864 dev_err(smu->adev->dev,
865 "Attempt to get memory clk levels Failed!");
866 return ret;
867 }
868
869 for (i = 0; i < clocks.num_levels; i++)
870 size += sysfs_emit_at(
871 buf, size, "%d: %uMhz %s\n", i,
872 clocks.data[i].clocks_in_khz / 1000,
873 (clocks.num_levels == 1) ?
874 "*" :
875 (smu_v13_0_6_freqs_in_same_level(
876 clocks.data[i].clocks_in_khz /
877 1000,
878 now) ?
879 "*" :
880 ""));
881 break;
882
883 case SMU_SOCCLK:
884 ret = smu_v13_0_6_get_current_clk_freq_by_table(smu, SMU_SOCCLK,
885 &now);
886 if (ret) {
887 dev_err(smu->adev->dev,
888 "Attempt to get current socclk Failed!");
889 return ret;
890 }
891
892 single_dpm_table = &(dpm_context->dpm_tables.soc_table);
893 ret = smu_v13_0_6_get_clk_table(smu, &clocks, single_dpm_table);
894 if (ret) {
895 dev_err(smu->adev->dev,
896 "Attempt to get socclk levels Failed!");
897 return ret;
898 }
899
900 for (i = 0; i < clocks.num_levels; i++)
901 size += sysfs_emit_at(
902 buf, size, "%d: %uMhz %s\n", i,
903 clocks.data[i].clocks_in_khz / 1000,
904 (clocks.num_levels == 1) ?
905 "*" :
906 (smu_v13_0_6_freqs_in_same_level(
907 clocks.data[i].clocks_in_khz /
908 1000,
909 now) ?
910 "*" :
911 ""));
912 break;
913
914 case SMU_FCLK:
915 ret = smu_v13_0_6_get_current_clk_freq_by_table(smu, SMU_FCLK,
916 &now);
917 if (ret) {
918 dev_err(smu->adev->dev,
919 "Attempt to get current fclk Failed!");
920 return ret;
921 }
922
923 single_dpm_table = &(dpm_context->dpm_tables.fclk_table);
924 ret = smu_v13_0_6_get_clk_table(smu, &clocks, single_dpm_table);
925 if (ret) {
926 dev_err(smu->adev->dev,
927 "Attempt to get fclk levels Failed!");
928 return ret;
929 }
930
931 for (i = 0; i < single_dpm_table->count; i++)
932 size += sysfs_emit_at(
933 buf, size, "%d: %uMhz %s\n", i,
934 single_dpm_table->dpm_levels[i].value,
935 (clocks.num_levels == 1) ?
936 "*" :
937 (smu_v13_0_6_freqs_in_same_level(
938 clocks.data[i].clocks_in_khz /
939 1000,
940 now) ?
941 "*" :
942 ""));
943 break;
944
945 case SMU_VCLK:
946 ret = smu_v13_0_6_get_current_clk_freq_by_table(smu, SMU_VCLK,
947 &now);
948 if (ret) {
949 dev_err(smu->adev->dev,
950 "Attempt to get current vclk Failed!");
951 return ret;
952 }
953
954 single_dpm_table = &(dpm_context->dpm_tables.vclk_table);
955 ret = smu_v13_0_6_get_clk_table(smu, &clocks, single_dpm_table);
956 if (ret) {
957 dev_err(smu->adev->dev,
958 "Attempt to get vclk levels Failed!");
959 return ret;
960 }
961
962 for (i = 0; i < single_dpm_table->count; i++)
963 size += sysfs_emit_at(
964 buf, size, "%d: %uMhz %s\n", i,
965 single_dpm_table->dpm_levels[i].value,
966 (clocks.num_levels == 1) ?
967 "*" :
968 (smu_v13_0_6_freqs_in_same_level(
969 clocks.data[i].clocks_in_khz /
970 1000,
971 now) ?
972 "*" :
973 ""));
974 break;
975
976 case SMU_DCLK:
977 ret = smu_v13_0_6_get_current_clk_freq_by_table(smu, SMU_DCLK,
978 &now);
979 if (ret) {
980 dev_err(smu->adev->dev,
981 "Attempt to get current dclk Failed!");
982 return ret;
983 }
984
985 single_dpm_table = &(dpm_context->dpm_tables.dclk_table);
986 ret = smu_v13_0_6_get_clk_table(smu, &clocks, single_dpm_table);
987 if (ret) {
988 dev_err(smu->adev->dev,
989 "Attempt to get dclk levels Failed!");
990 return ret;
991 }
992
993 for (i = 0; i < single_dpm_table->count; i++)
994 size += sysfs_emit_at(
995 buf, size, "%d: %uMhz %s\n", i,
996 single_dpm_table->dpm_levels[i].value,
997 (clocks.num_levels == 1) ?
998 "*" :
999 (smu_v13_0_6_freqs_in_same_level(
1000 clocks.data[i].clocks_in_khz /
1001 1000,
1002 now) ?
1003 "*" :
1004 ""));
1005 break;
1006
1007 default:
1008 break;
1009 }
1010
1011 return size;
1012}
1013
1014static int smu_v13_0_6_upload_dpm_level(struct smu_context *smu, bool max,
1015 uint32_t feature_mask, uint32_t level)
1016{
1017 struct smu_13_0_dpm_context *dpm_context = smu->smu_dpm.dpm_context;
1018 uint32_t freq;
1019 int ret = 0;
1020
1021 if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_GFXCLK_BIT) &&
1022 (feature_mask & FEATURE_MASK(FEATURE_DPM_GFXCLK))) {
1023 freq = dpm_context->dpm_tables.gfx_table.dpm_levels[level].value;
1024 ret = smu_cmn_send_smc_msg_with_param(
1025 smu,
1026 (max ? SMU_MSG_SetSoftMaxGfxClk :
1027 SMU_MSG_SetSoftMinGfxclk),
1028 freq & 0xffff, NULL);
1029 if (ret) {
1030 dev_err(smu->adev->dev,
1031 "Failed to set soft %s gfxclk !\n",
1032 max ? "max" : "min");
1033 return ret;
1034 }
1035 }
1036
1037 if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_UCLK_BIT) &&
1038 (feature_mask & FEATURE_MASK(FEATURE_DPM_UCLK))) {
1039 freq = dpm_context->dpm_tables.uclk_table.dpm_levels[level]
1040 .value;
1041 ret = smu_cmn_send_smc_msg_with_param(
1042 smu,
1043 (max ? SMU_MSG_SetSoftMaxByFreq :
1044 SMU_MSG_SetSoftMinByFreq),
1045 (PPCLK_UCLK << 16) | (freq & 0xffff), NULL);
1046 if (ret) {
1047 dev_err(smu->adev->dev,
1048 "Failed to set soft %s memclk !\n",
1049 max ? "max" : "min");
1050 return ret;
1051 }
1052 }
1053
1054 if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_SOCCLK_BIT) &&
1055 (feature_mask & FEATURE_MASK(FEATURE_DPM_SOCCLK))) {
1056 freq = dpm_context->dpm_tables.soc_table.dpm_levels[level].value;
1057 ret = smu_cmn_send_smc_msg_with_param(
1058 smu,
1059 (max ? SMU_MSG_SetSoftMaxByFreq :
1060 SMU_MSG_SetSoftMinByFreq),
1061 (PPCLK_SOCCLK << 16) | (freq & 0xffff), NULL);
1062 if (ret) {
1063 dev_err(smu->adev->dev,
1064 "Failed to set soft %s socclk !\n",
1065 max ? "max" : "min");
1066 return ret;
1067 }
1068 }
1069
1070 return ret;
1071}
1072
1073static int smu_v13_0_6_force_clk_levels(struct smu_context *smu,
1074 enum smu_clk_type type, uint32_t mask)
1075{
1076 struct smu_13_0_dpm_context *dpm_context = smu->smu_dpm.dpm_context;
1077 struct smu_13_0_dpm_table *single_dpm_table = NULL;
1078 uint32_t soft_min_level, soft_max_level;
1079 int ret = 0;
1080
1081 soft_min_level = mask ? (ffs(mask) - 1) : 0;
1082 soft_max_level = mask ? (fls(mask) - 1) : 0;
1083
1084 switch (type) {
1085 case SMU_SCLK:
1086 single_dpm_table = &(dpm_context->dpm_tables.gfx_table);
1087 if (soft_max_level >= single_dpm_table->count) {
1088 dev_err(smu->adev->dev,
1089 "Clock level specified %d is over max allowed %d\n",
1090 soft_max_level, single_dpm_table->count - 1);
1091 ret = -EINVAL;
1092 break;
1093 }
1094
1095 ret = smu_v13_0_6_upload_dpm_level(
1096 smu, false, FEATURE_MASK(FEATURE_DPM_GFXCLK),
1097 soft_min_level);
1098 if (ret) {
1099 dev_err(smu->adev->dev,
1100 "Failed to upload boot level to lowest!\n");
1101 break;
1102 }
1103
1104 ret = smu_v13_0_6_upload_dpm_level(
1105 smu, true, FEATURE_MASK(FEATURE_DPM_GFXCLK),
1106 soft_max_level);
1107 if (ret)
1108 dev_err(smu->adev->dev,
1109 "Failed to upload dpm max level to highest!\n");
1110
1111 break;
1112
1113 case SMU_MCLK:
1114 case SMU_SOCCLK:
1115 case SMU_FCLK:
1116 /*
1117 * Should not arrive here since smu_13_0_6 does not
1118 * support mclk/socclk/fclk softmin/softmax settings
1119 */
1120 ret = -EINVAL;
1121 break;
1122
1123 default:
1124 break;
1125 }
1126
1127 return ret;
1128}
1129
511a9555
LL
1130static int smu_v13_0_6_get_current_activity_percent(struct smu_context *smu,
1131 enum amd_pp_sensors sensor,
1132 uint32_t *value)
1133{
1134 int ret = 0;
1135
1136 if (!value)
1137 return -EINVAL;
1138
1139 switch (sensor) {
1140 case AMDGPU_PP_SENSOR_GPU_LOAD:
1141 ret = smu_v13_0_6_get_smu_metrics_data(
1142 smu, METRICS_AVERAGE_GFXACTIVITY, value);
1143 break;
1144 case AMDGPU_PP_SENSOR_MEM_LOAD:
1145 ret = smu_v13_0_6_get_smu_metrics_data(
1146 smu, METRICS_AVERAGE_MEMACTIVITY, value);
1147 break;
1148 default:
1149 dev_err(smu->adev->dev,
1150 "Invalid sensor for retrieving clock activity\n");
1151 return -EINVAL;
1152 }
1153
1154 return ret;
1155}
1156
1157static int smu_v13_0_6_get_gpu_power(struct smu_context *smu, uint32_t *value)
1158{
1159 if (!value)
1160 return -EINVAL;
1161
1162 return smu_v13_0_6_get_smu_metrics_data(smu, METRICS_AVERAGE_SOCKETPOWER,
1163 value);
1164}
1165
1166static int smu_v13_0_6_thermal_get_temperature(struct smu_context *smu,
1167 enum amd_pp_sensors sensor,
1168 uint32_t *value)
1169{
1170 int ret = 0;
1171
1172 if (!value)
1173 return -EINVAL;
1174
1175 switch (sensor) {
1176 case AMDGPU_PP_SENSOR_HOTSPOT_TEMP:
1177 ret = smu_v13_0_6_get_smu_metrics_data(
1178 smu, METRICS_TEMPERATURE_HOTSPOT, value);
1179 break;
511a9555
LL
1180 case AMDGPU_PP_SENSOR_MEM_TEMP:
1181 ret = smu_v13_0_6_get_smu_metrics_data(
1182 smu, METRICS_TEMPERATURE_MEM, value);
1183 break;
1184 default:
1185 dev_err(smu->adev->dev, "Invalid sensor for retrieving temp\n");
1186 return -EINVAL;
1187 }
1188
1189 return ret;
1190}
1191
1192static int smu_v13_0_6_read_sensor(struct smu_context *smu,
1193 enum amd_pp_sensors sensor, void *data,
1194 uint32_t *size)
1195{
1196 int ret = 0;
1197
1198 if (amdgpu_ras_intr_triggered())
1199 return 0;
1200
1201 if (!data || !size)
1202 return -EINVAL;
1203
1204 switch (sensor) {
1205 case AMDGPU_PP_SENSOR_MEM_LOAD:
1206 case AMDGPU_PP_SENSOR_GPU_LOAD:
1207 ret = smu_v13_0_6_get_current_activity_percent(smu, sensor,
1208 (uint32_t *)data);
1209 *size = 4;
1210 break;
1211 case AMDGPU_PP_SENSOR_GPU_POWER:
1212 ret = smu_v13_0_6_get_gpu_power(smu, (uint32_t *)data);
1213 *size = 4;
1214 break;
1215 case AMDGPU_PP_SENSOR_HOTSPOT_TEMP:
511a9555
LL
1216 case AMDGPU_PP_SENSOR_MEM_TEMP:
1217 ret = smu_v13_0_6_thermal_get_temperature(smu, sensor,
1218 (uint32_t *)data);
1219 *size = 4;
1220 break;
1221 case AMDGPU_PP_SENSOR_GFX_MCLK:
1222 ret = smu_v13_0_6_get_current_clk_freq_by_table(
1223 smu, SMU_UCLK, (uint32_t *)data);
1224 /* the output clock frequency in 10K unit */
1225 *(uint32_t *)data *= 100;
1226 *size = 4;
1227 break;
1228 case AMDGPU_PP_SENSOR_GFX_SCLK:
1229 ret = smu_v13_0_6_get_current_clk_freq_by_table(
1230 smu, SMU_GFXCLK, (uint32_t *)data);
1231 *(uint32_t *)data *= 100;
1232 *size = 4;
1233 break;
1234 case AMDGPU_PP_SENSOR_VDDGFX:
1235 ret = smu_v13_0_get_gfx_vdd(smu, (uint32_t *)data);
1236 *size = 4;
1237 break;
1238 default:
1239 ret = -EOPNOTSUPP;
1240 break;
1241 }
1242
1243 return ret;
1244}
1245
1246static int smu_v13_0_6_get_power_limit(struct smu_context *smu,
1247 uint32_t *current_power_limit,
1248 uint32_t *default_power_limit,
1249 uint32_t *max_power_limit)
1250{
1251 struct smu_table_context *smu_table = &smu->smu_table;
1252 struct PPTable_t *pptable =
1253 (struct PPTable_t *)smu_table->driver_pptable;
1254 uint32_t power_limit = 0;
1255 int ret;
1256
511a9555
LL
1257 ret = smu_cmn_send_smc_msg(smu, SMU_MSG_GetPptLimit, &power_limit);
1258
1259 if (ret) {
1260 dev_err(smu->adev->dev, "Couldn't get PPT limit");
1261 return -EINVAL;
1262 }
1263
1264 if (current_power_limit)
1265 *current_power_limit = power_limit;
1266 if (default_power_limit)
1267 *default_power_limit = power_limit;
1268
1269 if (max_power_limit) {
1270 *max_power_limit = pptable->MaxSocketPowerLimit;
1271 }
1272
1273 return 0;
1274}
1275
1276static int smu_v13_0_6_set_power_limit(struct smu_context *smu,
1277 enum smu_ppt_limit_type limit_type,
1278 uint32_t limit)
1279{
1280 return smu_v13_0_set_power_limit(smu, limit_type, limit);
1281}
1282
676915e4
A
1283static int smu_v13_0_6_irq_process(struct amdgpu_device *adev,
1284 struct amdgpu_irq_src *source,
1285 struct amdgpu_iv_entry *entry)
1286{
1287 struct smu_context *smu = adev->powerplay.pp_handle;
93682f8a
LL
1288 struct smu_power_context *smu_power = &smu->smu_power;
1289 struct smu_13_0_power_context *power_context = smu_power->power_context;
676915e4 1290 uint32_t client_id = entry->client_id;
676915e4 1291 uint32_t ctxid = entry->src_data[0];
93682f8a 1292 uint32_t src_id = entry->src_id;
676915e4
A
1293 uint32_t data;
1294
1295 if (client_id == SOC15_IH_CLIENTID_MP1) {
1296 if (src_id == IH_INTERRUPT_ID_TO_DRIVER) {
1297 /* ACK SMUToHost interrupt */
1298 data = RREG32_SOC15(MP1, 0, regMP1_SMN_IH_SW_INT_CTRL);
1299 data = REG_SET_FIELD(data, MP1_SMN_IH_SW_INT_CTRL, INT_ACK, 1);
1300 WREG32_SOC15(MP1, 0, regMP1_SMN_IH_SW_INT_CTRL, data);
93682f8a
LL
1301 /*
1302 * ctxid is used to distinguish different events for SMCToHost
1303 * interrupt.
1304 */
676915e4
A
1305 switch (ctxid) {
1306 case IH_INTERRUPT_CONTEXT_ID_THERMAL_THROTTLING:
1307 /*
1308 * Increment the throttle interrupt counter
1309 */
1310 atomic64_inc(&smu->throttle_int_counter);
1311
1312 if (!atomic_read(&adev->throttling_logging_enabled))
1313 return 0;
1314
93682f8a
LL
1315 /* This uses the new method which fixes the
1316 * incorrect throttling status reporting
1317 * through metrics table. For older FWs,
1318 * it will be ignored.
1319 */
1320 if (__ratelimit(&adev->throttling_logging_rs)) {
1321 atomic_set(
1322 &power_context->throttle_status,
1323 entry->src_data[1]);
676915e4 1324 schedule_work(&smu->throttling_logging_work);
93682f8a 1325 }
676915e4
A
1326
1327 break;
1328 }
1329 }
1330 }
1331
1332 return 0;
1333}
1334
87f4c2d9 1335static int smu_v13_0_6_set_irq_state(struct amdgpu_device *adev,
676915e4
A
1336 struct amdgpu_irq_src *source,
1337 unsigned tyep,
1338 enum amdgpu_interrupt_state state)
1339{
1340 uint32_t val = 0;
1341
1342 switch (state) {
1343 case AMDGPU_IRQ_STATE_DISABLE:
1344 /* For MP1 SW irqs */
1345 val = RREG32_SOC15(MP1, 0, regMP1_SMN_IH_SW_INT_CTRL);
1346 val = REG_SET_FIELD(val, MP1_SMN_IH_SW_INT_CTRL, INT_MASK, 1);
1347 WREG32_SOC15(MP1, 0, regMP1_SMN_IH_SW_INT_CTRL, val);
1348
1349 break;
1350 case AMDGPU_IRQ_STATE_ENABLE:
1351 /* For MP1 SW irqs */
1352 val = RREG32_SOC15(MP1, 0, regMP1_SMN_IH_SW_INT);
1353 val = REG_SET_FIELD(val, MP1_SMN_IH_SW_INT, ID, 0xFE);
1354 val = REG_SET_FIELD(val, MP1_SMN_IH_SW_INT, VALID, 0);
1355 WREG32_SOC15(MP1, 0, regMP1_SMN_IH_SW_INT, val);
1356
1357 val = RREG32_SOC15(MP1, 0, regMP1_SMN_IH_SW_INT_CTRL);
1358 val = REG_SET_FIELD(val, MP1_SMN_IH_SW_INT_CTRL, INT_MASK, 0);
1359 WREG32_SOC15(MP1, 0, regMP1_SMN_IH_SW_INT_CTRL, val);
1360
1361 break;
1362 default:
1363 break;
1364 }
1365
1366 return 0;
1367}
1368
1369static const struct amdgpu_irq_src_funcs smu_v13_0_6_irq_funcs =
1370{
1371 .set = smu_v13_0_6_set_irq_state,
1372 .process = smu_v13_0_6_irq_process,
1373};
1374
87f4c2d9 1375static int smu_v13_0_6_register_irq_handler(struct smu_context *smu)
676915e4
A
1376{
1377 struct amdgpu_device *adev = smu->adev;
1378 struct amdgpu_irq_src *irq_src = &smu->irq_source;
1379 int ret = 0;
1380
1381 if (amdgpu_sriov_vf(adev))
1382 return 0;
1383
1384 irq_src->num_types = 1;
1385 irq_src->funcs = &smu_v13_0_6_irq_funcs;
1386
1387 ret = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_MP1,
1388 IH_INTERRUPT_ID_TO_DRIVER,
1389 irq_src);
1390 if (ret)
1391 return ret;
1392
1393 return ret;
1394}
1395
7214c08c
LL
1396static int smu_v13_0_6_notify_unload(struct smu_context *smu)
1397{
1398 uint32_t smu_version;
1399
1400 smu_cmn_get_smc_version(smu, NULL, &smu_version);
1401 if (smu_version <= 0x553500)
1402 return 0;
1403
1404 dev_dbg(smu->adev->dev, "Notify PMFW about driver unload");
1405 /* Ignore return, just intimate FW that driver is not going to be there */
1406 smu_cmn_send_smc_msg(smu, SMU_MSG_PrepareMp1ForUnload, NULL);
1407
1408 return 0;
1409}
1410
511a9555
LL
1411static int smu_v13_0_6_system_features_control(struct smu_context *smu,
1412 bool enable)
1413{
7214c08c 1414 struct amdgpu_device *adev = smu->adev;
09a77a40 1415 int ret = 0;
7214c08c 1416
09a77a40
LM
1417 if (enable) {
1418 if (!(adev->flags & AMD_IS_APU))
1419 ret = smu_v13_0_system_features_control(smu, enable);
1420 } else {
1421 /* Notify FW that the device is no longer driver managed */
1422 smu_v13_0_6_notify_unload(smu);
7214c08c 1423 }
511a9555 1424
511a9555
LL
1425 return ret;
1426}
1427
1428static int smu_v13_0_6_set_gfx_soft_freq_limited_range(struct smu_context *smu,
1429 uint32_t min,
1430 uint32_t max)
1431{
1432 int ret;
1433
1434 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxGfxClk,
1435 max & 0xffff, NULL);
1436 if (ret)
1437 return ret;
1438
1439 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMinGfxclk,
1440 min & 0xffff, NULL);
1441
1442 return ret;
1443}
1444
1445static int smu_v13_0_6_set_performance_level(struct smu_context *smu,
1446 enum amd_dpm_forced_level level)
1447{
1448 struct smu_dpm_context *smu_dpm = &(smu->smu_dpm);
1449 struct smu_13_0_dpm_context *dpm_context = smu_dpm->dpm_context;
1450 struct smu_13_0_dpm_table *gfx_table =
1451 &dpm_context->dpm_tables.gfx_table;
1452 struct smu_umd_pstate_table *pstate_table = &smu->pstate_table;
1453 int ret;
1454
1455 /* Disable determinism if switching to another mode */
1456 if ((smu_dpm->dpm_level == AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM) &&
1457 (level != AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM)) {
1458 smu_cmn_send_smc_msg(smu, SMU_MSG_DisableDeterminism, NULL);
1459 pstate_table->gfxclk_pstate.curr.max = gfx_table->max;
1460 }
1461
1462 switch (level) {
1463 case AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM:
1464 return 0;
1465
1466 case AMD_DPM_FORCED_LEVEL_AUTO:
1467 if ((gfx_table->min == pstate_table->gfxclk_pstate.curr.min) &&
1468 (gfx_table->max == pstate_table->gfxclk_pstate.curr.max))
1469 return 0;
1470
1471 ret = smu_v13_0_6_set_gfx_soft_freq_limited_range(
1472 smu, gfx_table->min, gfx_table->max);
1473 if (ret)
1474 return ret;
1475
1476 pstate_table->gfxclk_pstate.curr.min = gfx_table->min;
1477 pstate_table->gfxclk_pstate.curr.max = gfx_table->max;
1478 return 0;
1479 case AMD_DPM_FORCED_LEVEL_MANUAL:
1480 return 0;
1481 default:
1482 break;
1483 }
1484
1485 return -EINVAL;
1486}
1487
1488static int smu_v13_0_6_set_soft_freq_limited_range(struct smu_context *smu,
1489 enum smu_clk_type clk_type,
1490 uint32_t min, uint32_t max)
1491{
1492 struct smu_dpm_context *smu_dpm = &(smu->smu_dpm);
1493 struct smu_13_0_dpm_context *dpm_context = smu_dpm->dpm_context;
1494 struct smu_umd_pstate_table *pstate_table = &smu->pstate_table;
1495 struct amdgpu_device *adev = smu->adev;
1496 uint32_t min_clk;
1497 uint32_t max_clk;
1498 int ret = 0;
1499
1500 if (clk_type != SMU_GFXCLK && clk_type != SMU_SCLK)
1501 return -EINVAL;
1502
1503 if ((smu_dpm->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL) &&
1504 (smu_dpm->dpm_level != AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM))
1505 return -EINVAL;
1506
1507 if (smu_dpm->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL) {
1508 if (min >= max) {
1509 dev_err(smu->adev->dev,
1510 "Minimum GFX clk should be less than the maximum allowed clock\n");
1511 return -EINVAL;
1512 }
1513
1514 if ((min == pstate_table->gfxclk_pstate.curr.min) &&
1515 (max == pstate_table->gfxclk_pstate.curr.max))
1516 return 0;
1517
1518 ret = smu_v13_0_6_set_gfx_soft_freq_limited_range(smu, min, max);
1519 if (!ret) {
1520 pstate_table->gfxclk_pstate.curr.min = min;
1521 pstate_table->gfxclk_pstate.curr.max = max;
1522 }
1523
1524 return ret;
1525 }
1526
1527 if (smu_dpm->dpm_level == AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM) {
1528 if (!max || (max < dpm_context->dpm_tables.gfx_table.min) ||
1529 (max > dpm_context->dpm_tables.gfx_table.max)) {
1530 dev_warn(
1531 adev->dev,
1532 "Invalid max frequency %d MHz specified for determinism\n",
1533 max);
1534 return -EINVAL;
1535 }
1536
1537 /* Restore default min/max clocks and enable determinism */
1538 min_clk = dpm_context->dpm_tables.gfx_table.min;
1539 max_clk = dpm_context->dpm_tables.gfx_table.max;
1540 ret = smu_v13_0_6_set_gfx_soft_freq_limited_range(smu, min_clk,
1541 max_clk);
1542 if (!ret) {
1543 usleep_range(500, 1000);
1544 ret = smu_cmn_send_smc_msg_with_param(
1545 smu, SMU_MSG_EnableDeterminism, max, NULL);
1546 if (ret) {
1547 dev_err(adev->dev,
1548 "Failed to enable determinism at GFX clock %d MHz\n",
1549 max);
1550 } else {
1551 pstate_table->gfxclk_pstate.curr.min = min_clk;
1552 pstate_table->gfxclk_pstate.curr.max = max;
1553 }
1554 }
1555 }
1556
1557 return ret;
1558}
1559
1560static int smu_v13_0_6_usr_edit_dpm_table(struct smu_context *smu,
1561 enum PP_OD_DPM_TABLE_COMMAND type,
1562 long input[], uint32_t size)
1563{
1564 struct smu_dpm_context *smu_dpm = &(smu->smu_dpm);
1565 struct smu_13_0_dpm_context *dpm_context = smu_dpm->dpm_context;
1566 struct smu_umd_pstate_table *pstate_table = &smu->pstate_table;
1567 uint32_t min_clk;
1568 uint32_t max_clk;
1569 int ret = 0;
1570
1571 /* Only allowed in manual or determinism mode */
1572 if ((smu_dpm->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL) &&
1573 (smu_dpm->dpm_level != AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM))
1574 return -EINVAL;
1575
1576 switch (type) {
1577 case PP_OD_EDIT_SCLK_VDDC_TABLE:
1578 if (size != 2) {
1579 dev_err(smu->adev->dev,
1580 "Input parameter number not correct\n");
1581 return -EINVAL;
1582 }
1583
1584 if (input[0] == 0) {
1585 if (input[1] < dpm_context->dpm_tables.gfx_table.min) {
1586 dev_warn(
1587 smu->adev->dev,
1588 "Minimum GFX clk (%ld) MHz specified is less than the minimum allowed (%d) MHz\n",
1589 input[1],
1590 dpm_context->dpm_tables.gfx_table.min);
1591 pstate_table->gfxclk_pstate.custom.min =
1592 pstate_table->gfxclk_pstate.curr.min;
1593 return -EINVAL;
1594 }
1595
1596 pstate_table->gfxclk_pstate.custom.min = input[1];
1597 } else if (input[0] == 1) {
1598 if (input[1] > dpm_context->dpm_tables.gfx_table.max) {
1599 dev_warn(
1600 smu->adev->dev,
1601 "Maximum GFX clk (%ld) MHz specified is greater than the maximum allowed (%d) MHz\n",
1602 input[1],
1603 dpm_context->dpm_tables.gfx_table.max);
1604 pstate_table->gfxclk_pstate.custom.max =
1605 pstate_table->gfxclk_pstate.curr.max;
1606 return -EINVAL;
1607 }
1608
1609 pstate_table->gfxclk_pstate.custom.max = input[1];
1610 } else {
1611 return -EINVAL;
1612 }
1613 break;
1614 case PP_OD_RESTORE_DEFAULT_TABLE:
1615 if (size != 0) {
1616 dev_err(smu->adev->dev,
1617 "Input parameter number not correct\n");
1618 return -EINVAL;
1619 } else {
1620 /* Use the default frequencies for manual and determinism mode */
1621 min_clk = dpm_context->dpm_tables.gfx_table.min;
1622 max_clk = dpm_context->dpm_tables.gfx_table.max;
1623
1624 return smu_v13_0_6_set_soft_freq_limited_range(
1625 smu, SMU_GFXCLK, min_clk, max_clk);
1626 }
1627 break;
1628 case PP_OD_COMMIT_DPM_TABLE:
1629 if (size != 0) {
1630 dev_err(smu->adev->dev,
1631 "Input parameter number not correct\n");
1632 return -EINVAL;
1633 } else {
1634 if (!pstate_table->gfxclk_pstate.custom.min)
1635 pstate_table->gfxclk_pstate.custom.min =
1636 pstate_table->gfxclk_pstate.curr.min;
1637
1638 if (!pstate_table->gfxclk_pstate.custom.max)
1639 pstate_table->gfxclk_pstate.custom.max =
1640 pstate_table->gfxclk_pstate.curr.max;
1641
1642 min_clk = pstate_table->gfxclk_pstate.custom.min;
1643 max_clk = pstate_table->gfxclk_pstate.custom.max;
1644
1645 return smu_v13_0_6_set_soft_freq_limited_range(
1646 smu, SMU_GFXCLK, min_clk, max_clk);
1647 }
1648 break;
1649 default:
1650 return -ENOSYS;
1651 }
1652
1653 return ret;
1654}
1655
1656static int smu_v13_0_6_get_enabled_mask(struct smu_context *smu,
1657 uint64_t *feature_mask)
1658{
1659 uint32_t smu_version;
1660 int ret;
1661
1662 smu_cmn_get_smc_version(smu, NULL, &smu_version);
1663 ret = smu_cmn_get_enabled_mask(smu, feature_mask);
1664
1665 if (ret == -EIO && smu_version < 0x552F00) {
1666 *feature_mask = 0;
1667 ret = 0;
1668 }
1669
1670 return ret;
1671}
1672
1673static bool smu_v13_0_6_is_dpm_running(struct smu_context *smu)
1674{
1675 int ret;
1676 uint64_t feature_enabled;
1677
1678 ret = smu_v13_0_6_get_enabled_mask(smu, &feature_enabled);
1679
1680 if (ret)
1681 return false;
1682
1683 return !!(feature_enabled & SMC_DPM_FEATURE);
1684}
1685
1686static int smu_v13_0_6_request_i2c_xfer(struct smu_context *smu,
1687 void *table_data)
1688{
1689 struct smu_table_context *smu_table = &smu->smu_table;
1690 struct smu_table *table = &smu_table->driver_table;
1691 struct amdgpu_device *adev = smu->adev;
1692 uint32_t table_size;
1693 int ret = 0;
1694
1695 if (!table_data)
1696 return -EINVAL;
1697
1698 table_size = smu_table->tables[SMU_TABLE_I2C_COMMANDS].size;
1699
1700 memcpy(table->cpu_addr, table_data, table_size);
1701 /* Flush hdp cache */
1702 amdgpu_asic_flush_hdp(adev, NULL);
1703 ret = smu_cmn_send_smc_msg(smu, SMU_MSG_RequestI2cTransaction,
1704 NULL);
1705
1706 return ret;
1707}
1708
1709static int smu_v13_0_6_i2c_xfer(struct i2c_adapter *i2c_adap,
1710 struct i2c_msg *msg, int num_msgs)
1711{
1712 struct amdgpu_smu_i2c_bus *smu_i2c = i2c_get_adapdata(i2c_adap);
1713 struct amdgpu_device *adev = smu_i2c->adev;
1714 struct smu_context *smu = adev->powerplay.pp_handle;
1715 struct smu_table_context *smu_table = &smu->smu_table;
1716 struct smu_table *table = &smu_table->driver_table;
1717 SwI2cRequest_t *req, *res = (SwI2cRequest_t *)table->cpu_addr;
1718 int i, j, r, c;
1719 u16 dir;
1720
1721 if (!adev->pm.dpm_enabled)
1722 return -EBUSY;
1723
1724 req = kzalloc(sizeof(*req), GFP_KERNEL);
1725 if (!req)
1726 return -ENOMEM;
1727
1728 req->I2CcontrollerPort = smu_i2c->port;
1729 req->I2CSpeed = I2C_SPEED_FAST_400K;
1730 req->SlaveAddress = msg[0].addr << 1; /* wants an 8-bit address */
1731 dir = msg[0].flags & I2C_M_RD;
1732
1733 for (c = i = 0; i < num_msgs; i++) {
1734 for (j = 0; j < msg[i].len; j++, c++) {
1735 SwI2cCmd_t *cmd = &req->SwI2cCmds[c];
1736
1737 if (!(msg[i].flags & I2C_M_RD)) {
1738 /* write */
1739 cmd->CmdConfig |= CMDCONFIG_READWRITE_MASK;
1740 cmd->ReadWriteData = msg[i].buf[j];
1741 }
1742
1743 if ((dir ^ msg[i].flags) & I2C_M_RD) {
1744 /* The direction changes.
1745 */
1746 dir = msg[i].flags & I2C_M_RD;
1747 cmd->CmdConfig |= CMDCONFIG_RESTART_MASK;
1748 }
1749
1750 req->NumCmds++;
1751
1752 /*
1753 * Insert STOP if we are at the last byte of either last
1754 * message for the transaction or the client explicitly
1755 * requires a STOP at this particular message.
1756 */
1757 if ((j == msg[i].len - 1) &&
1758 ((i == num_msgs - 1) || (msg[i].flags & I2C_M_STOP))) {
1759 cmd->CmdConfig &= ~CMDCONFIG_RESTART_MASK;
1760 cmd->CmdConfig |= CMDCONFIG_STOP_MASK;
1761 }
1762 }
1763 }
1764 mutex_lock(&adev->pm.mutex);
1765 r = smu_v13_0_6_request_i2c_xfer(smu, req);
1766 mutex_unlock(&adev->pm.mutex);
1767 if (r)
1768 goto fail;
1769
1770 for (c = i = 0; i < num_msgs; i++) {
1771 if (!(msg[i].flags & I2C_M_RD)) {
1772 c += msg[i].len;
1773 continue;
1774 }
1775 for (j = 0; j < msg[i].len; j++, c++) {
1776 SwI2cCmd_t *cmd = &res->SwI2cCmds[c];
1777
1778 msg[i].buf[j] = cmd->ReadWriteData;
1779 }
1780 }
1781 r = num_msgs;
1782fail:
1783 kfree(req);
1784 return r;
1785}
1786
1787static u32 smu_v13_0_6_i2c_func(struct i2c_adapter *adap)
1788{
1789 return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
1790}
1791
1792static const struct i2c_algorithm smu_v13_0_6_i2c_algo = {
1793 .master_xfer = smu_v13_0_6_i2c_xfer,
1794 .functionality = smu_v13_0_6_i2c_func,
1795};
1796
1797static const struct i2c_adapter_quirks smu_v13_0_6_i2c_control_quirks = {
1798 .flags = I2C_AQ_COMB | I2C_AQ_COMB_SAME_ADDR | I2C_AQ_NO_ZERO_LEN,
1799 .max_read_len = MAX_SW_I2C_COMMANDS,
1800 .max_write_len = MAX_SW_I2C_COMMANDS,
1801 .max_comb_1st_msg_len = 2,
1802 .max_comb_2nd_msg_len = MAX_SW_I2C_COMMANDS - 2,
1803};
1804
1805static int smu_v13_0_6_i2c_control_init(struct smu_context *smu)
1806{
1807 struct amdgpu_device *adev = smu->adev;
1808 int res, i;
1809
1810 for (i = 0; i < MAX_SMU_I2C_BUSES; i++) {
1811 struct amdgpu_smu_i2c_bus *smu_i2c = &adev->pm.smu_i2c[i];
1812 struct i2c_adapter *control = &smu_i2c->adapter;
1813
1814 smu_i2c->adev = adev;
1815 smu_i2c->port = i;
1816 mutex_init(&smu_i2c->mutex);
1817 control->owner = THIS_MODULE;
1818 control->class = I2C_CLASS_SPD;
1819 control->dev.parent = &adev->pdev->dev;
1820 control->algo = &smu_v13_0_6_i2c_algo;
1821 snprintf(control->name, sizeof(control->name), "AMDGPU SMU %d", i);
1822 control->quirks = &smu_v13_0_6_i2c_control_quirks;
1823 i2c_set_adapdata(control, smu_i2c);
1824
1825 res = i2c_add_adapter(control);
1826 if (res) {
1827 DRM_ERROR("Failed to register hw i2c, err: %d\n", res);
1828 goto Out_err;
1829 }
1830 }
1831
1832 adev->pm.ras_eeprom_i2c_bus = &adev->pm.smu_i2c[0].adapter;
1833 adev->pm.fru_eeprom_i2c_bus = &adev->pm.smu_i2c[0].adapter;
1834
1835 return 0;
1836Out_err:
1837 for ( ; i >= 0; i--) {
1838 struct amdgpu_smu_i2c_bus *smu_i2c = &adev->pm.smu_i2c[i];
1839 struct i2c_adapter *control = &smu_i2c->adapter;
1840
1841 i2c_del_adapter(control);
1842 }
1843 return res;
1844}
1845
1846static void smu_v13_0_6_i2c_control_fini(struct smu_context *smu)
1847{
1848 struct amdgpu_device *adev = smu->adev;
1849 int i;
1850
1851 for (i = 0; i < MAX_SMU_I2C_BUSES; i++) {
1852 struct amdgpu_smu_i2c_bus *smu_i2c = &adev->pm.smu_i2c[i];
1853 struct i2c_adapter *control = &smu_i2c->adapter;
1854
1855 i2c_del_adapter(control);
1856 }
1857 adev->pm.ras_eeprom_i2c_bus = NULL;
1858 adev->pm.fru_eeprom_i2c_bus = NULL;
1859}
1860
1861static void smu_v13_0_6_get_unique_id(struct smu_context *smu)
1862{
1863 struct amdgpu_device *adev = smu->adev;
5e86aa29
YW
1864 struct smu_table_context *smu_table = &smu->smu_table;
1865 struct PPTable_t *pptable =
1866 (struct PPTable_t *)smu_table->driver_pptable;
511a9555 1867
5e86aa29 1868 adev->unique_id = pptable->PublicSerialNumber_AID;
511a9555
LL
1869 if (adev->serial[0] == '\0')
1870 sprintf(adev->serial, "%016llx", adev->unique_id);
1871}
1872
1873static bool smu_v13_0_6_is_baco_supported(struct smu_context *smu)
1874{
1875 /* smu_13_0_6 does not support baco */
1876
1877 return false;
1878}
1879
1880static int smu_v13_0_6_set_df_cstate(struct smu_context *smu,
1881 enum pp_df_cstate state)
1882{
1883 return smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_DFCstateControl,
1884 state, NULL);
1885}
1886
1887static int smu_v13_0_6_allow_xgmi_power_down(struct smu_context *smu, bool en)
1888{
1889 return smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_GmiPwrDnControl,
1890 en ? 0 : 1, NULL);
1891}
1892
93682f8a
LL
1893static const char *const throttling_logging_label[] = {
1894 [THROTTLER_PROCHOT_BIT] = "Prochot",
1895 [THROTTLER_PPT_BIT] = "PPT",
1896 [THROTTLER_THERMAL_SOCKET_BIT] = "SOC",
1897 [THROTTLER_THERMAL_VR_BIT] = "VR",
1898 [THROTTLER_THERMAL_HBM_BIT] = "HBM"
511a9555 1899};
93682f8a 1900
511a9555
LL
1901static void smu_v13_0_6_log_thermal_throttling_event(struct smu_context *smu)
1902{
511a9555
LL
1903 int throttler_idx, throtting_events = 0, buf_idx = 0;
1904 struct amdgpu_device *adev = smu->adev;
1905 uint32_t throttler_status;
1906 char log_buf[256];
1907
93682f8a
LL
1908 throttler_status = smu_v13_0_6_get_throttler_status(smu);
1909 if (!throttler_status)
511a9555
LL
1910 return;
1911
1912 memset(log_buf, 0, sizeof(log_buf));
93682f8a
LL
1913 for (throttler_idx = 0;
1914 throttler_idx < ARRAY_SIZE(throttling_logging_label);
511a9555 1915 throttler_idx++) {
93682f8a 1916 if (throttler_status & (1U << throttler_idx)) {
511a9555 1917 throtting_events++;
93682f8a
LL
1918 buf_idx += snprintf(
1919 log_buf + buf_idx, sizeof(log_buf) - buf_idx,
1920 "%s%s", throtting_events > 1 ? " and " : "",
1921 throttling_logging_label[throttler_idx]);
511a9555
LL
1922 if (buf_idx >= sizeof(log_buf)) {
1923 dev_err(adev->dev, "buffer overflow!\n");
1924 log_buf[sizeof(log_buf) - 1] = '\0';
1925 break;
1926 }
1927 }
1928 }
1929
93682f8a
LL
1930 dev_warn(adev->dev,
1931 "WARN: GPU is throttled, expect performance decrease. %s.\n",
1932 log_buf);
511a9555
LL
1933 kgd2kfd_smi_event_throttle(
1934 smu->adev->kfd.dev,
1935 smu_cmn_get_indep_throttler_status(throttler_status,
1936 smu_v13_0_6_throttler_map));
1937}
1938
1718e973
LL
1939static int
1940smu_v13_0_6_get_current_pcie_link_width_level(struct smu_context *smu)
1941{
1942 struct amdgpu_device *adev = smu->adev;
1943
1944 return REG_GET_FIELD(RREG32_PCIE(smnPCIE_LC_LINK_WIDTH_CNTL),
1945 PCIE_LC_LINK_WIDTH_CNTL, LC_LINK_WIDTH_RD);
1946}
1947
511a9555
LL
1948static int smu_v13_0_6_get_current_pcie_link_speed(struct smu_context *smu)
1949{
1950 struct amdgpu_device *adev = smu->adev;
1951 uint32_t esm_ctrl;
1952
1953 /* TODO: confirm this on real target */
1954 esm_ctrl = RREG32_PCIE(smnPCIE_ESM_CTRL);
1955 if ((esm_ctrl >> 15) & 0x1FFFF)
1956 return (((esm_ctrl >> 8) & 0x3F) + 128);
1957
1958 return smu_v13_0_get_current_pcie_link_speed(smu);
1959}
1960
1961static ssize_t smu_v13_0_6_get_gpu_metrics(struct smu_context *smu, void **table)
1962{
1963 struct smu_table_context *smu_table = &smu->smu_table;
1964 struct gpu_metrics_v1_3 *gpu_metrics =
1965 (struct gpu_metrics_v1_3 *)smu_table->gpu_metrics_table;
1718e973
LL
1966 struct amdgpu_device *adev = smu->adev;
1967 int ret = 0, inst0, xcc0;
511a9555 1968 MetricsTable_t *metrics;
1718e973
LL
1969
1970 inst0 = adev->sdma.instance[0].aid_id;
1971 xcc0 = GET_INST(GC, 0);
511a9555
LL
1972
1973 metrics = kzalloc(sizeof(MetricsTable_t), GFP_KERNEL);
1974 ret = smu_v13_0_6_get_metrics_table(smu, metrics, true);
1975 if (ret)
1976 return ret;
1977
1978 smu_cmn_init_soft_gpu_metrics(gpu_metrics, 1, 3);
1979
1718e973
LL
1980 gpu_metrics->temperature_hotspot =
1981 SMUQ10_TO_UINT(metrics->MaxSocketTemperature);
1982 /* Individual HBM stack temperature is not reported */
1983 gpu_metrics->temperature_mem =
1984 SMUQ10_TO_UINT(metrics->MaxHbmTemperature);
1985 /* Reports max temperature of all voltage rails */
1986 gpu_metrics->temperature_vrsoc =
1987 SMUQ10_TO_UINT(metrics->MaxVrTemperature);
1988
1989 gpu_metrics->average_gfx_activity =
1990 SMUQ10_TO_UINT(metrics->SocketGfxBusy);
1991 gpu_metrics->average_umc_activity =
1992 SMUQ10_TO_UINT(metrics->DramBandwidthUtilization);
1993
1994 gpu_metrics->average_socket_power =
1995 SMUQ10_TO_UINT(metrics->SocketPower);
8ef84c1a 1996 /* Energy is reported in 15.625mJ units */
1718e973 1997 gpu_metrics->energy_accumulator =
8ef84c1a 1998 SMUQ10_TO_UINT(metrics->SocketEnergyAcc);
1718e973
LL
1999
2000 gpu_metrics->current_gfxclk =
2001 SMUQ10_TO_UINT(metrics->GfxclkFrequency[xcc0]);
2002 gpu_metrics->current_socclk =
2003 SMUQ10_TO_UINT(metrics->SocclkFrequency[inst0]);
2004 gpu_metrics->current_uclk = SMUQ10_TO_UINT(metrics->UclkFrequency);
2005 gpu_metrics->current_vclk0 =
2006 SMUQ10_TO_UINT(metrics->VclkFrequency[inst0]);
2007 gpu_metrics->current_dclk0 =
2008 SMUQ10_TO_UINT(metrics->DclkFrequency[inst0]);
2009
2010 gpu_metrics->average_gfxclk_frequency = gpu_metrics->current_gfxclk;
2011 gpu_metrics->average_socclk_frequency = gpu_metrics->current_socclk;
2012 gpu_metrics->average_uclk_frequency = gpu_metrics->current_uclk;
2013 gpu_metrics->average_vclk0_frequency = gpu_metrics->current_vclk0;
2014 gpu_metrics->average_dclk0_frequency = gpu_metrics->current_dclk0;
2015
2016 /* Throttle status is not reported through metrics now */
511a9555 2017 gpu_metrics->throttle_status = 0;
511a9555 2018
1718e973
LL
2019 if (!(adev->flags & AMD_IS_APU)) {
2020 gpu_metrics->pcie_link_width =
2021 smu_v13_0_6_get_current_pcie_link_width_level(smu);
2022 gpu_metrics->pcie_link_speed =
2023 smu_v13_0_6_get_current_pcie_link_speed(smu);
2024 }
511a9555
LL
2025
2026 gpu_metrics->system_clock_counter = ktime_get_boottime_ns();
2027
1718e973
LL
2028 gpu_metrics->gfx_activity_acc =
2029 SMUQ10_TO_UINT(metrics->SocketGfxBusyAcc);
2030 gpu_metrics->mem_activity_acc =
2031 SMUQ10_TO_UINT(metrics->DramBandwidthUtilizationAcc);
511a9555 2032
1718e973 2033 gpu_metrics->firmware_timestamp = metrics->Timestamp;
511a9555
LL
2034
2035 *table = (void *)gpu_metrics;
2036 kfree(metrics);
2037
2038 return sizeof(struct gpu_metrics_v1_3);
2039}
2040
2041static int smu_v13_0_6_mode2_reset(struct smu_context *smu)
2042{
511a9555
LL
2043 int ret = 0, index;
2044 struct amdgpu_device *adev = smu->adev;
2045 int timeout = 10;
2046
511a9555
LL
2047 index = smu_cmn_to_asic_specific_index(smu, CMN2ASIC_MAPPING_MSG,
2048 SMU_MSG_GfxDeviceDriverReset);
2049
2050 mutex_lock(&smu->message_lock);
8f2ccaaa 2051
511a9555
LL
2052 ret = smu_cmn_send_msg_without_waiting(smu, (uint16_t)index,
2053 SMU_RESET_MODE_2);
8f2ccaaa 2054
511a9555
LL
2055 /* This is similar to FLR, wait till max FLR timeout */
2056 msleep(100);
8f2ccaaa 2057
511a9555
LL
2058 dev_dbg(smu->adev->dev, "restore config space...\n");
2059 /* Restore the config space saved during init */
2060 amdgpu_device_load_pci_state(adev->pdev);
2061
2062 dev_dbg(smu->adev->dev, "wait for reset ack\n");
8f2ccaaa 2063 do {
511a9555
LL
2064 ret = smu_cmn_wait_for_response(smu);
2065 /* Wait a bit more time for getting ACK */
2066 if (ret == -ETIME) {
2067 --timeout;
2068 usleep_range(500, 1000);
2069 continue;
2070 }
2071
8f2ccaaa 2072 if (ret) {
511a9555 2073 dev_err(adev->dev,
8f2ccaaa 2074 "failed to send mode2 message \tparam: 0x%08x error code %d\n",
511a9555
LL
2075 SMU_RESET_MODE_2, ret);
2076 goto out;
2077 }
8f2ccaaa 2078 } while (ret == -ETIME && timeout);
511a9555 2079
511a9555
LL
2080out:
2081 mutex_unlock(&smu->message_lock);
2082
2083 return ret;
2084}
2085
2086static int smu_v13_0_6_mode1_reset(struct smu_context *smu)
2087{
2088 struct amdgpu_device *adev = smu->adev;
2089 struct amdgpu_ras *ras;
2090 u32 fatal_err, param;
2091 int ret = 0;
2092
2093 ras = amdgpu_ras_get_context(adev);
2094 fatal_err = 0;
2095 param = SMU_RESET_MODE_1;
2096
2097 /* fatal error triggered by ras, PMFW supports the flag */
2098 if (ras && atomic_read(&ras->in_recovery))
2099 fatal_err = 1;
2100
2101 param |= (fatal_err << 16);
2102 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_GfxDeviceDriverReset,
2103 param, NULL);
2104
2105 if (!ret)
2106 msleep(SMU13_MODE1_RESET_WAIT_TIME_IN_MS);
2107
2108 return ret;
2109}
2110
2111static bool smu_v13_0_6_is_mode1_reset_supported(struct smu_context *smu)
2112{
2113 /* TODO: Enable this when FW support is added */
2114 return false;
2115}
2116
2117static bool smu_v13_0_6_is_mode2_reset_supported(struct smu_context *smu)
2118{
2119 return true;
2120}
2121
2122static int smu_v13_0_6_smu_send_hbm_bad_page_num(struct smu_context *smu,
2123 uint32_t size)
2124{
2125 int ret = 0;
2126
2127 /* message SMU to update the bad page number on SMUBUS */
2128 ret = smu_cmn_send_smc_msg_with_param(
2129 smu, SMU_MSG_SetNumBadHbmPagesRetired, size, NULL);
2130 if (ret)
2131 dev_err(smu->adev->dev,
2132 "[%s] failed to message SMU to update HBM bad pages number\n",
2133 __func__);
2134
2135 return ret;
2136}
2137
2138static const struct pptable_funcs smu_v13_0_6_ppt_funcs = {
2139 /* init dpm */
2140 .get_allowed_feature_mask = smu_v13_0_6_get_allowed_feature_mask,
2141 /* dpm/clk tables */
2142 .set_default_dpm_table = smu_v13_0_6_set_default_dpm_table,
2143 .populate_umd_state_clk = smu_v13_0_6_populate_umd_state_clk,
511a9555
LL
2144 .print_clk_levels = smu_v13_0_6_print_clk_levels,
2145 .force_clk_levels = smu_v13_0_6_force_clk_levels,
2146 .read_sensor = smu_v13_0_6_read_sensor,
2147 .set_performance_level = smu_v13_0_6_set_performance_level,
2148 .get_power_limit = smu_v13_0_6_get_power_limit,
2149 .is_dpm_running = smu_v13_0_6_is_dpm_running,
2150 .get_unique_id = smu_v13_0_6_get_unique_id,
2151 .init_smc_tables = smu_v13_0_6_init_smc_tables,
2152 .fini_smc_tables = smu_v13_0_fini_smc_tables,
2153 .init_power = smu_v13_0_init_power,
2154 .fini_power = smu_v13_0_fini_power,
2155 .check_fw_status = smu_v13_0_6_check_fw_status,
2156 /* pptable related */
2157 .check_fw_version = smu_v13_0_check_fw_version,
2158 .set_driver_table_location = smu_v13_0_set_driver_table_location,
2159 .set_tool_table_location = smu_v13_0_set_tool_table_location,
2160 .notify_memory_pool_location = smu_v13_0_notify_memory_pool_location,
2161 .system_features_control = smu_v13_0_6_system_features_control,
2162 .send_smc_msg_with_param = smu_cmn_send_smc_msg_with_param,
2163 .send_smc_msg = smu_cmn_send_smc_msg,
2164 .get_enabled_mask = smu_v13_0_6_get_enabled_mask,
2165 .feature_is_enabled = smu_cmn_feature_is_enabled,
2166 .set_power_limit = smu_v13_0_6_set_power_limit,
2167 .set_xgmi_pstate = smu_v13_0_set_xgmi_pstate,
676915e4 2168 .register_irq_handler = smu_v13_0_6_register_irq_handler,
511a9555
LL
2169 .enable_thermal_alert = smu_v13_0_enable_thermal_alert,
2170 .disable_thermal_alert = smu_v13_0_disable_thermal_alert,
511a9555
LL
2171 .setup_pptable = smu_v13_0_6_setup_pptable,
2172 .baco_is_support = smu_v13_0_6_is_baco_supported,
2173 .get_dpm_ultimate_freq = smu_v13_0_6_get_dpm_ultimate_freq,
2174 .set_soft_freq_limited_range = smu_v13_0_6_set_soft_freq_limited_range,
2175 .od_edit_dpm_table = smu_v13_0_6_usr_edit_dpm_table,
2176 .set_df_cstate = smu_v13_0_6_set_df_cstate,
2177 .allow_xgmi_power_down = smu_v13_0_6_allow_xgmi_power_down,
2178 .log_thermal_throttling_event = smu_v13_0_6_log_thermal_throttling_event,
2179 .get_pp_feature_mask = smu_cmn_get_pp_feature_mask,
2180 .set_pp_feature_mask = smu_cmn_set_pp_feature_mask,
2181 .get_gpu_metrics = smu_v13_0_6_get_gpu_metrics,
2182 .mode1_reset_is_support = smu_v13_0_6_is_mode1_reset_supported,
2183 .mode2_reset_is_support = smu_v13_0_6_is_mode2_reset_supported,
2184 .mode1_reset = smu_v13_0_6_mode1_reset,
2185 .mode2_reset = smu_v13_0_6_mode2_reset,
2186 .wait_for_event = smu_v13_0_wait_for_event,
2187 .i2c_init = smu_v13_0_6_i2c_control_init,
2188 .i2c_fini = smu_v13_0_6_i2c_control_fini,
2189 .send_hbm_bad_pages_num = smu_v13_0_6_smu_send_hbm_bad_page_num,
2190};
2191
2192void smu_v13_0_6_set_ppt_funcs(struct smu_context *smu)
2193{
2194 smu->ppt_funcs = &smu_v13_0_6_ppt_funcs;
2195 smu->message_map = smu_v13_0_6_message_map;
2196 smu->clock_map = smu_v13_0_6_clk_map;
2197 smu->feature_map = smu_v13_0_6_feature_mask_map;
2198 smu->table_map = smu_v13_0_6_table_map;
9661bf68 2199 smu->smc_driver_if_version = SMU13_0_6_DRIVER_IF_VERSION;
511a9555
LL
2200 smu_v13_0_set_smu_mailbox_registers(smu);
2201}