Merge tag 'amd-drm-next-5.20-2022-07-05' of https://gitlab.freedesktop.org/agd5f...
[linux-block.git] / drivers / gpu / drm / amd / pm / swsmu / smu11 / vangogh_ppt.c
1 /*
2  * Copyright 2020 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  */
23
24 #define SWSMU_CODE_LAYER_L2
25
26 #include "amdgpu.h"
27 #include "amdgpu_smu.h"
28 #include "smu_v11_0.h"
29 #include "smu11_driver_if_vangogh.h"
30 #include "vangogh_ppt.h"
31 #include "smu_v11_5_ppsmc.h"
32 #include "smu_v11_5_pmfw.h"
33 #include "smu_cmn.h"
34 #include "soc15_common.h"
35 #include "asic_reg/gc/gc_10_3_0_offset.h"
36 #include "asic_reg/gc/gc_10_3_0_sh_mask.h"
37 #include <asm/processor.h>
38
39 /*
40  * DO NOT use these for err/warn/info/debug messages.
41  * Use dev_err, dev_warn, dev_info and dev_dbg instead.
42  * They are more MGPU friendly.
43  */
44 #undef pr_err
45 #undef pr_warn
46 #undef pr_info
47 #undef pr_debug
48
49 #define FEATURE_MASK(feature) (1ULL << feature)
50 #define SMC_DPM_FEATURE ( \
51         FEATURE_MASK(FEATURE_CCLK_DPM_BIT) | \
52         FEATURE_MASK(FEATURE_VCN_DPM_BIT)        | \
53         FEATURE_MASK(FEATURE_FCLK_DPM_BIT)       | \
54         FEATURE_MASK(FEATURE_SOCCLK_DPM_BIT)     | \
55         FEATURE_MASK(FEATURE_MP0CLK_DPM_BIT)     | \
56         FEATURE_MASK(FEATURE_LCLK_DPM_BIT)       | \
57         FEATURE_MASK(FEATURE_SHUBCLK_DPM_BIT)    | \
58         FEATURE_MASK(FEATURE_DCFCLK_DPM_BIT)| \
59         FEATURE_MASK(FEATURE_GFX_DPM_BIT))
60
61 static struct cmn2asic_msg_mapping vangogh_message_map[SMU_MSG_MAX_COUNT] = {
62         MSG_MAP(TestMessage,                    PPSMC_MSG_TestMessage,                  0),
63         MSG_MAP(GetSmuVersion,                  PPSMC_MSG_GetSmuVersion,                0),
64         MSG_MAP(GetDriverIfVersion,             PPSMC_MSG_GetDriverIfVersion,   0),
65         MSG_MAP(EnableGfxOff,                   PPSMC_MSG_EnableGfxOff,                 0),
66         MSG_MAP(AllowGfxOff,                    PPSMC_MSG_AllowGfxOff,          0),
67         MSG_MAP(DisallowGfxOff,                 PPSMC_MSG_DisallowGfxOff,               0),
68         MSG_MAP(PowerDownIspByTile,             PPSMC_MSG_PowerDownIspByTile,   0),
69         MSG_MAP(PowerUpIspByTile,               PPSMC_MSG_PowerUpIspByTile,             0),
70         MSG_MAP(PowerDownVcn,                   PPSMC_MSG_PowerDownVcn,                 0),
71         MSG_MAP(PowerUpVcn,                     PPSMC_MSG_PowerUpVcn,                   0),
72         MSG_MAP(RlcPowerNotify,                 PPSMC_MSG_RlcPowerNotify,               0),
73         MSG_MAP(SetHardMinVcn,                  PPSMC_MSG_SetHardMinVcn,                0),
74         MSG_MAP(SetSoftMinGfxclk,               PPSMC_MSG_SetSoftMinGfxclk,             0),
75         MSG_MAP(ActiveProcessNotify,            PPSMC_MSG_ActiveProcessNotify,          0),
76         MSG_MAP(SetHardMinIspiclkByFreq,        PPSMC_MSG_SetHardMinIspiclkByFreq,      0),
77         MSG_MAP(SetHardMinIspxclkByFreq,        PPSMC_MSG_SetHardMinIspxclkByFreq,      0),
78         MSG_MAP(SetDriverDramAddrHigh,          PPSMC_MSG_SetDriverDramAddrHigh,        0),
79         MSG_MAP(SetDriverDramAddrLow,           PPSMC_MSG_SetDriverDramAddrLow,         0),
80         MSG_MAP(TransferTableSmu2Dram,          PPSMC_MSG_TransferTableSmu2Dram,        0),
81         MSG_MAP(TransferTableDram2Smu,          PPSMC_MSG_TransferTableDram2Smu,        0),
82         MSG_MAP(GfxDeviceDriverReset,           PPSMC_MSG_GfxDeviceDriverReset,         0),
83         MSG_MAP(GetEnabledSmuFeatures,          PPSMC_MSG_GetEnabledSmuFeatures,        0),
84         MSG_MAP(SetHardMinSocclkByFreq,         PPSMC_MSG_SetHardMinSocclkByFreq,       0),
85         MSG_MAP(SetSoftMinFclk,                 PPSMC_MSG_SetSoftMinFclk,               0),
86         MSG_MAP(SetSoftMinVcn,                  PPSMC_MSG_SetSoftMinVcn,                0),
87         MSG_MAP(EnablePostCode,                 PPSMC_MSG_EnablePostCode,               0),
88         MSG_MAP(GetGfxclkFrequency,             PPSMC_MSG_GetGfxclkFrequency,   0),
89         MSG_MAP(GetFclkFrequency,               PPSMC_MSG_GetFclkFrequency,             0),
90         MSG_MAP(SetSoftMaxGfxClk,               PPSMC_MSG_SetSoftMaxGfxClk,             0),
91         MSG_MAP(SetHardMinGfxClk,               PPSMC_MSG_SetHardMinGfxClk,             0),
92         MSG_MAP(SetSoftMaxSocclkByFreq,         PPSMC_MSG_SetSoftMaxSocclkByFreq,       0),
93         MSG_MAP(SetSoftMaxFclkByFreq,           PPSMC_MSG_SetSoftMaxFclkByFreq,         0),
94         MSG_MAP(SetSoftMaxVcn,                  PPSMC_MSG_SetSoftMaxVcn,                        0),
95         MSG_MAP(SetPowerLimitPercentage,        PPSMC_MSG_SetPowerLimitPercentage,      0),
96         MSG_MAP(PowerDownJpeg,                  PPSMC_MSG_PowerDownJpeg,                        0),
97         MSG_MAP(PowerUpJpeg,                    PPSMC_MSG_PowerUpJpeg,                          0),
98         MSG_MAP(SetHardMinFclkByFreq,           PPSMC_MSG_SetHardMinFclkByFreq,         0),
99         MSG_MAP(SetSoftMinSocclkByFreq,         PPSMC_MSG_SetSoftMinSocclkByFreq,       0),
100         MSG_MAP(PowerUpCvip,                    PPSMC_MSG_PowerUpCvip,                          0),
101         MSG_MAP(PowerDownCvip,                  PPSMC_MSG_PowerDownCvip,                        0),
102         MSG_MAP(GetPptLimit,                        PPSMC_MSG_GetPptLimit,                      0),
103         MSG_MAP(GetThermalLimit,                    PPSMC_MSG_GetThermalLimit,          0),
104         MSG_MAP(GetCurrentTemperature,              PPSMC_MSG_GetCurrentTemperature, 0),
105         MSG_MAP(GetCurrentPower,                    PPSMC_MSG_GetCurrentPower,           0),
106         MSG_MAP(GetCurrentVoltage,                  PPSMC_MSG_GetCurrentVoltage,         0),
107         MSG_MAP(GetCurrentCurrent,                  PPSMC_MSG_GetCurrentCurrent,         0),
108         MSG_MAP(GetAverageCpuActivity,              PPSMC_MSG_GetAverageCpuActivity, 0),
109         MSG_MAP(GetAverageGfxActivity,              PPSMC_MSG_GetAverageGfxActivity, 0),
110         MSG_MAP(GetAveragePower,                    PPSMC_MSG_GetAveragePower,           0),
111         MSG_MAP(GetAverageTemperature,              PPSMC_MSG_GetAverageTemperature, 0),
112         MSG_MAP(SetAveragePowerTimeConstant,        PPSMC_MSG_SetAveragePowerTimeConstant,                      0),
113         MSG_MAP(SetAverageActivityTimeConstant,     PPSMC_MSG_SetAverageActivityTimeConstant,           0),
114         MSG_MAP(SetAverageTemperatureTimeConstant,  PPSMC_MSG_SetAverageTemperatureTimeConstant,        0),
115         MSG_MAP(SetMitigationEndHysteresis,         PPSMC_MSG_SetMitigationEndHysteresis,                       0),
116         MSG_MAP(GetCurrentFreq,                     PPSMC_MSG_GetCurrentFreq,                                           0),
117         MSG_MAP(SetReducedPptLimit,                 PPSMC_MSG_SetReducedPptLimit,                                       0),
118         MSG_MAP(SetReducedThermalLimit,             PPSMC_MSG_SetReducedThermalLimit,                           0),
119         MSG_MAP(DramLogSetDramAddr,                 PPSMC_MSG_DramLogSetDramAddr,                                       0),
120         MSG_MAP(StartDramLogging,                   PPSMC_MSG_StartDramLogging,                                         0),
121         MSG_MAP(StopDramLogging,                    PPSMC_MSG_StopDramLogging,                                          0),
122         MSG_MAP(SetSoftMinCclk,                     PPSMC_MSG_SetSoftMinCclk,                                           0),
123         MSG_MAP(SetSoftMaxCclk,                     PPSMC_MSG_SetSoftMaxCclk,                                           0),
124         MSG_MAP(RequestActiveWgp,                   PPSMC_MSG_RequestActiveWgp,                     0),
125         MSG_MAP(SetFastPPTLimit,                    PPSMC_MSG_SetFastPPTLimit,                                          0),
126         MSG_MAP(SetSlowPPTLimit,                    PPSMC_MSG_SetSlowPPTLimit,                                          0),
127         MSG_MAP(GetFastPPTLimit,                    PPSMC_MSG_GetFastPPTLimit,                                          0),
128         MSG_MAP(GetSlowPPTLimit,                    PPSMC_MSG_GetSlowPPTLimit,                                          0),
129 };
130
131 static struct cmn2asic_mapping vangogh_feature_mask_map[SMU_FEATURE_COUNT] = {
132         FEA_MAP(PPT),
133         FEA_MAP(TDC),
134         FEA_MAP(THERMAL),
135         FEA_MAP(DS_GFXCLK),
136         FEA_MAP(DS_SOCCLK),
137         FEA_MAP(DS_LCLK),
138         FEA_MAP(DS_FCLK),
139         FEA_MAP(DS_MP1CLK),
140         FEA_MAP(DS_MP0CLK),
141         FEA_MAP(ATHUB_PG),
142         FEA_MAP(CCLK_DPM),
143         FEA_MAP(FAN_CONTROLLER),
144         FEA_MAP(ULV),
145         FEA_MAP(VCN_DPM),
146         FEA_MAP(LCLK_DPM),
147         FEA_MAP(SHUBCLK_DPM),
148         FEA_MAP(DCFCLK_DPM),
149         FEA_MAP(DS_DCFCLK),
150         FEA_MAP(S0I2),
151         FEA_MAP(SMU_LOW_POWER),
152         FEA_MAP(GFX_DEM),
153         FEA_MAP(PSI),
154         FEA_MAP(PROCHOT),
155         FEA_MAP(CPUOFF),
156         FEA_MAP(STAPM),
157         FEA_MAP(S0I3),
158         FEA_MAP(DF_CSTATES),
159         FEA_MAP(PERF_LIMIT),
160         FEA_MAP(CORE_DLDO),
161         FEA_MAP(RSMU_LOW_POWER),
162         FEA_MAP(SMN_LOW_POWER),
163         FEA_MAP(THM_LOW_POWER),
164         FEA_MAP(SMUIO_LOW_POWER),
165         FEA_MAP(MP1_LOW_POWER),
166         FEA_MAP(DS_VCN),
167         FEA_MAP(CPPC),
168         FEA_MAP(OS_CSTATES),
169         FEA_MAP(ISP_DPM),
170         FEA_MAP(A55_DPM),
171         FEA_MAP(CVIP_DSP_DPM),
172         FEA_MAP(MSMU_LOW_POWER),
173         FEA_MAP_REVERSE(SOCCLK),
174         FEA_MAP_REVERSE(FCLK),
175         FEA_MAP_HALF_REVERSE(GFX),
176 };
177
178 static struct cmn2asic_mapping vangogh_table_map[SMU_TABLE_COUNT] = {
179         TAB_MAP_VALID(WATERMARKS),
180         TAB_MAP_VALID(SMU_METRICS),
181         TAB_MAP_VALID(CUSTOM_DPM),
182         TAB_MAP_VALID(DPMCLOCKS),
183 };
184
185 static struct cmn2asic_mapping vangogh_workload_map[PP_SMC_POWER_PROFILE_COUNT] = {
186         WORKLOAD_MAP(PP_SMC_POWER_PROFILE_FULLSCREEN3D,         WORKLOAD_PPLIB_FULL_SCREEN_3D_BIT),
187         WORKLOAD_MAP(PP_SMC_POWER_PROFILE_VIDEO,                WORKLOAD_PPLIB_VIDEO_BIT),
188         WORKLOAD_MAP(PP_SMC_POWER_PROFILE_VR,                   WORKLOAD_PPLIB_VR_BIT),
189         WORKLOAD_MAP(PP_SMC_POWER_PROFILE_COMPUTE,              WORKLOAD_PPLIB_COMPUTE_BIT),
190         WORKLOAD_MAP(PP_SMC_POWER_PROFILE_CUSTOM,               WORKLOAD_PPLIB_CUSTOM_BIT),
191 };
192
193 static const uint8_t vangogh_throttler_map[] = {
194         [THROTTLER_STATUS_BIT_SPL]      = (SMU_THROTTLER_SPL_BIT),
195         [THROTTLER_STATUS_BIT_FPPT]     = (SMU_THROTTLER_FPPT_BIT),
196         [THROTTLER_STATUS_BIT_SPPT]     = (SMU_THROTTLER_SPPT_BIT),
197         [THROTTLER_STATUS_BIT_SPPT_APU] = (SMU_THROTTLER_SPPT_APU_BIT),
198         [THROTTLER_STATUS_BIT_THM_CORE] = (SMU_THROTTLER_TEMP_CORE_BIT),
199         [THROTTLER_STATUS_BIT_THM_GFX]  = (SMU_THROTTLER_TEMP_GPU_BIT),
200         [THROTTLER_STATUS_BIT_THM_SOC]  = (SMU_THROTTLER_TEMP_SOC_BIT),
201         [THROTTLER_STATUS_BIT_TDC_VDD]  = (SMU_THROTTLER_TDC_VDD_BIT),
202         [THROTTLER_STATUS_BIT_TDC_SOC]  = (SMU_THROTTLER_TDC_SOC_BIT),
203         [THROTTLER_STATUS_BIT_TDC_GFX]  = (SMU_THROTTLER_TDC_GFX_BIT),
204         [THROTTLER_STATUS_BIT_TDC_CVIP] = (SMU_THROTTLER_TDC_CVIP_BIT),
205 };
206
207 static int vangogh_tables_init(struct smu_context *smu)
208 {
209         struct smu_table_context *smu_table = &smu->smu_table;
210         struct smu_table *tables = smu_table->tables;
211         struct amdgpu_device *adev = smu->adev;
212         uint32_t if_version;
213         uint32_t ret = 0;
214
215         ret = smu_cmn_get_smc_version(smu, &if_version, NULL);
216         if (ret) {
217                 dev_err(adev->dev, "Failed to get smu if version!\n");
218                 goto err0_out;
219         }
220
221         SMU_TABLE_INIT(tables, SMU_TABLE_WATERMARKS, sizeof(Watermarks_t),
222                        PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
223         SMU_TABLE_INIT(tables, SMU_TABLE_DPMCLOCKS, sizeof(DpmClocks_t),
224                        PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
225         SMU_TABLE_INIT(tables, SMU_TABLE_PMSTATUSLOG, SMU11_TOOL_SIZE,
226                        PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
227         SMU_TABLE_INIT(tables, SMU_TABLE_ACTIVITY_MONITOR_COEFF, sizeof(DpmActivityMonitorCoeffExt_t),
228                        PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
229
230         if (if_version < 0x3) {
231                 SMU_TABLE_INIT(tables, SMU_TABLE_SMU_METRICS, sizeof(SmuMetrics_legacy_t),
232                                 PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
233                 smu_table->metrics_table = kzalloc(sizeof(SmuMetrics_legacy_t), GFP_KERNEL);
234         } else {
235                 SMU_TABLE_INIT(tables, SMU_TABLE_SMU_METRICS, sizeof(SmuMetrics_t),
236                                 PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
237                 smu_table->metrics_table = kzalloc(sizeof(SmuMetrics_t), GFP_KERNEL);
238         }
239         if (!smu_table->metrics_table)
240                 goto err0_out;
241         smu_table->metrics_time = 0;
242
243         smu_table->gpu_metrics_table_size = sizeof(struct gpu_metrics_v2_2);
244         smu_table->gpu_metrics_table = kzalloc(smu_table->gpu_metrics_table_size, GFP_KERNEL);
245         if (!smu_table->gpu_metrics_table)
246                 goto err1_out;
247
248         smu_table->watermarks_table = kzalloc(sizeof(Watermarks_t), GFP_KERNEL);
249         if (!smu_table->watermarks_table)
250                 goto err2_out;
251
252         smu_table->clocks_table = kzalloc(sizeof(DpmClocks_t), GFP_KERNEL);
253         if (!smu_table->clocks_table)
254                 goto err3_out;
255
256         return 0;
257
258 err3_out:
259         kfree(smu_table->watermarks_table);
260 err2_out:
261         kfree(smu_table->gpu_metrics_table);
262 err1_out:
263         kfree(smu_table->metrics_table);
264 err0_out:
265         return -ENOMEM;
266 }
267
268 static int vangogh_get_legacy_smu_metrics_data(struct smu_context *smu,
269                                        MetricsMember_t member,
270                                        uint32_t *value)
271 {
272         struct smu_table_context *smu_table = &smu->smu_table;
273         SmuMetrics_legacy_t *metrics = (SmuMetrics_legacy_t *)smu_table->metrics_table;
274         int ret = 0;
275
276         ret = smu_cmn_get_metrics_table(smu,
277                                         NULL,
278                                         false);
279         if (ret)
280                 return ret;
281
282         switch (member) {
283         case METRICS_CURR_GFXCLK:
284                 *value = metrics->GfxclkFrequency;
285                 break;
286         case METRICS_AVERAGE_SOCCLK:
287                 *value = metrics->SocclkFrequency;
288                 break;
289         case METRICS_AVERAGE_VCLK:
290                 *value = metrics->VclkFrequency;
291                 break;
292         case METRICS_AVERAGE_DCLK:
293                 *value = metrics->DclkFrequency;
294                 break;
295         case METRICS_CURR_UCLK:
296                 *value = metrics->MemclkFrequency;
297                 break;
298         case METRICS_AVERAGE_GFXACTIVITY:
299                 *value = metrics->GfxActivity / 100;
300                 break;
301         case METRICS_AVERAGE_VCNACTIVITY:
302                 *value = metrics->UvdActivity;
303                 break;
304         case METRICS_AVERAGE_SOCKETPOWER:
305                 *value = (metrics->CurrentSocketPower << 8) /
306                 1000 ;
307                 break;
308         case METRICS_TEMPERATURE_EDGE:
309                 *value = metrics->GfxTemperature / 100 *
310                 SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
311                 break;
312         case METRICS_TEMPERATURE_HOTSPOT:
313                 *value = metrics->SocTemperature / 100 *
314                 SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
315                 break;
316         case METRICS_THROTTLER_STATUS:
317                 *value = metrics->ThrottlerStatus;
318                 break;
319         case METRICS_VOLTAGE_VDDGFX:
320                 *value = metrics->Voltage[2];
321                 break;
322         case METRICS_VOLTAGE_VDDSOC:
323                 *value = metrics->Voltage[1];
324                 break;
325         case METRICS_AVERAGE_CPUCLK:
326                 memcpy(value, &metrics->CoreFrequency[0],
327                        smu->cpu_core_num * sizeof(uint16_t));
328                 break;
329         default:
330                 *value = UINT_MAX;
331                 break;
332         }
333
334         return ret;
335 }
336
337 static int vangogh_get_smu_metrics_data(struct smu_context *smu,
338                                        MetricsMember_t member,
339                                        uint32_t *value)
340 {
341         struct smu_table_context *smu_table = &smu->smu_table;
342         SmuMetrics_t *metrics = (SmuMetrics_t *)smu_table->metrics_table;
343         int ret = 0;
344
345         ret = smu_cmn_get_metrics_table(smu,
346                                         NULL,
347                                         false);
348         if (ret)
349                 return ret;
350
351         switch (member) {
352         case METRICS_CURR_GFXCLK:
353                 *value = metrics->Current.GfxclkFrequency;
354                 break;
355         case METRICS_AVERAGE_SOCCLK:
356                 *value = metrics->Current.SocclkFrequency;
357                 break;
358         case METRICS_AVERAGE_VCLK:
359                 *value = metrics->Current.VclkFrequency;
360                 break;
361         case METRICS_AVERAGE_DCLK:
362                 *value = metrics->Current.DclkFrequency;
363                 break;
364         case METRICS_CURR_UCLK:
365                 *value = metrics->Current.MemclkFrequency;
366                 break;
367         case METRICS_AVERAGE_GFXACTIVITY:
368                 *value = metrics->Current.GfxActivity;
369                 break;
370         case METRICS_AVERAGE_VCNACTIVITY:
371                 *value = metrics->Current.UvdActivity;
372                 break;
373         case METRICS_AVERAGE_SOCKETPOWER:
374                 *value = (metrics->Current.CurrentSocketPower << 8) /
375                 1000;
376                 break;
377         case METRICS_TEMPERATURE_EDGE:
378                 *value = metrics->Current.GfxTemperature / 100 *
379                 SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
380                 break;
381         case METRICS_TEMPERATURE_HOTSPOT:
382                 *value = metrics->Current.SocTemperature / 100 *
383                 SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
384                 break;
385         case METRICS_THROTTLER_STATUS:
386                 *value = metrics->Current.ThrottlerStatus;
387                 break;
388         case METRICS_VOLTAGE_VDDGFX:
389                 *value = metrics->Current.Voltage[2];
390                 break;
391         case METRICS_VOLTAGE_VDDSOC:
392                 *value = metrics->Current.Voltage[1];
393                 break;
394         case METRICS_AVERAGE_CPUCLK:
395                 memcpy(value, &metrics->Current.CoreFrequency[0],
396                        smu->cpu_core_num * sizeof(uint16_t));
397                 break;
398         default:
399                 *value = UINT_MAX;
400                 break;
401         }
402
403         return ret;
404 }
405
406 static int vangogh_common_get_smu_metrics_data(struct smu_context *smu,
407                                        MetricsMember_t member,
408                                        uint32_t *value)
409 {
410         struct amdgpu_device *adev = smu->adev;
411         uint32_t if_version;
412         int ret = 0;
413
414         ret = smu_cmn_get_smc_version(smu, &if_version, NULL);
415         if (ret) {
416                 dev_err(adev->dev, "Failed to get smu if version!\n");
417                 return ret;
418         }
419
420         if (if_version < 0x3)
421                 ret = vangogh_get_legacy_smu_metrics_data(smu, member, value);
422         else
423                 ret = vangogh_get_smu_metrics_data(smu, member, value);
424
425         return ret;
426 }
427
428 static int vangogh_allocate_dpm_context(struct smu_context *smu)
429 {
430         struct smu_dpm_context *smu_dpm = &smu->smu_dpm;
431
432         smu_dpm->dpm_context = kzalloc(sizeof(struct smu_11_0_dpm_context),
433                                        GFP_KERNEL);
434         if (!smu_dpm->dpm_context)
435                 return -ENOMEM;
436
437         smu_dpm->dpm_context_size = sizeof(struct smu_11_0_dpm_context);
438
439         return 0;
440 }
441
442 static int vangogh_init_smc_tables(struct smu_context *smu)
443 {
444         int ret = 0;
445
446         ret = vangogh_tables_init(smu);
447         if (ret)
448                 return ret;
449
450         ret = vangogh_allocate_dpm_context(smu);
451         if (ret)
452                 return ret;
453
454 #ifdef CONFIG_X86
455         /* AMD x86 APU only */
456         smu->cpu_core_num = boot_cpu_data.x86_max_cores;
457 #else
458         smu->cpu_core_num = 4;
459 #endif
460
461         return smu_v11_0_init_smc_tables(smu);
462 }
463
464 static int vangogh_dpm_set_vcn_enable(struct smu_context *smu, bool enable)
465 {
466         int ret = 0;
467
468         if (enable) {
469                 /* vcn dpm on is a prerequisite for vcn power gate messages */
470                 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_PowerUpVcn, 0, NULL);
471                 if (ret)
472                         return ret;
473         } else {
474                 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_PowerDownVcn, 0, NULL);
475                 if (ret)
476                         return ret;
477         }
478
479         return ret;
480 }
481
482 static int vangogh_dpm_set_jpeg_enable(struct smu_context *smu, bool enable)
483 {
484         int ret = 0;
485
486         if (enable) {
487                 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_PowerUpJpeg, 0, NULL);
488                 if (ret)
489                         return ret;
490         } else {
491                 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_PowerDownJpeg, 0, NULL);
492                 if (ret)
493                         return ret;
494         }
495
496         return ret;
497 }
498
499 static bool vangogh_is_dpm_running(struct smu_context *smu)
500 {
501         struct amdgpu_device *adev = smu->adev;
502         int ret = 0;
503         uint64_t feature_enabled;
504
505         /* we need to re-init after suspend so return false */
506         if (adev->in_suspend)
507                 return false;
508
509         ret = smu_cmn_get_enabled_mask(smu, &feature_enabled);
510
511         if (ret)
512                 return false;
513
514         return !!(feature_enabled & SMC_DPM_FEATURE);
515 }
516
517 static int vangogh_get_dpm_clk_limited(struct smu_context *smu, enum smu_clk_type clk_type,
518                                                 uint32_t dpm_level, uint32_t *freq)
519 {
520         DpmClocks_t *clk_table = smu->smu_table.clocks_table;
521
522         if (!clk_table || clk_type >= SMU_CLK_COUNT)
523                 return -EINVAL;
524
525         switch (clk_type) {
526         case SMU_SOCCLK:
527                 if (dpm_level >= clk_table->NumSocClkLevelsEnabled)
528                         return -EINVAL;
529                 *freq = clk_table->SocClocks[dpm_level];
530                 break;
531         case SMU_VCLK:
532                 if (dpm_level >= clk_table->VcnClkLevelsEnabled)
533                         return -EINVAL;
534                 *freq = clk_table->VcnClocks[dpm_level].vclk;
535                 break;
536         case SMU_DCLK:
537                 if (dpm_level >= clk_table->VcnClkLevelsEnabled)
538                         return -EINVAL;
539                 *freq = clk_table->VcnClocks[dpm_level].dclk;
540                 break;
541         case SMU_UCLK:
542         case SMU_MCLK:
543                 if (dpm_level >= clk_table->NumDfPstatesEnabled)
544                         return -EINVAL;
545                 *freq = clk_table->DfPstateTable[dpm_level].memclk;
546
547                 break;
548         case SMU_FCLK:
549                 if (dpm_level >= clk_table->NumDfPstatesEnabled)
550                         return -EINVAL;
551                 *freq = clk_table->DfPstateTable[dpm_level].fclk;
552                 break;
553         default:
554                 return -EINVAL;
555         }
556
557         return 0;
558 }
559
560 static int vangogh_print_legacy_clk_levels(struct smu_context *smu,
561                         enum smu_clk_type clk_type, char *buf)
562 {
563         DpmClocks_t *clk_table = smu->smu_table.clocks_table;
564         SmuMetrics_legacy_t metrics;
565         struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
566         int i, size = 0, ret = 0;
567         uint32_t cur_value = 0, value = 0, count = 0;
568         bool cur_value_match_level = false;
569
570         memset(&metrics, 0, sizeof(metrics));
571
572         ret = smu_cmn_get_metrics_table(smu, &metrics, false);
573         if (ret)
574                 return ret;
575
576         smu_cmn_get_sysfs_buf(&buf, &size);
577
578         switch (clk_type) {
579         case SMU_OD_SCLK:
580                 if (smu_dpm_ctx->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL) {
581                         size += sysfs_emit_at(buf, size, "%s:\n", "OD_SCLK");
582                         size += sysfs_emit_at(buf, size, "0: %10uMhz\n",
583                         (smu->gfx_actual_hard_min_freq > 0) ? smu->gfx_actual_hard_min_freq : smu->gfx_default_hard_min_freq);
584                         size += sysfs_emit_at(buf, size, "1: %10uMhz\n",
585                         (smu->gfx_actual_soft_max_freq > 0) ? smu->gfx_actual_soft_max_freq : smu->gfx_default_soft_max_freq);
586                 }
587                 break;
588         case SMU_OD_CCLK:
589                 if (smu_dpm_ctx->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL) {
590                         size += sysfs_emit_at(buf, size, "CCLK_RANGE in Core%d:\n",  smu->cpu_core_id_select);
591                         size += sysfs_emit_at(buf, size, "0: %10uMhz\n",
592                         (smu->cpu_actual_soft_min_freq > 0) ? smu->cpu_actual_soft_min_freq : smu->cpu_default_soft_min_freq);
593                         size += sysfs_emit_at(buf, size, "1: %10uMhz\n",
594                         (smu->cpu_actual_soft_max_freq > 0) ? smu->cpu_actual_soft_max_freq : smu->cpu_default_soft_max_freq);
595                 }
596                 break;
597         case SMU_OD_RANGE:
598                 if (smu_dpm_ctx->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL) {
599                         size += sysfs_emit_at(buf, size, "%s:\n", "OD_RANGE");
600                         size += sysfs_emit_at(buf, size, "SCLK: %7uMhz %10uMhz\n",
601                                 smu->gfx_default_hard_min_freq, smu->gfx_default_soft_max_freq);
602                         size += sysfs_emit_at(buf, size, "CCLK: %7uMhz %10uMhz\n",
603                                 smu->cpu_default_soft_min_freq, smu->cpu_default_soft_max_freq);
604                 }
605                 break;
606         case SMU_SOCCLK:
607                 /* the level 3 ~ 6 of socclk use the same frequency for vangogh */
608                 count = clk_table->NumSocClkLevelsEnabled;
609                 cur_value = metrics.SocclkFrequency;
610                 break;
611         case SMU_VCLK:
612                 count = clk_table->VcnClkLevelsEnabled;
613                 cur_value = metrics.VclkFrequency;
614                 break;
615         case SMU_DCLK:
616                 count = clk_table->VcnClkLevelsEnabled;
617                 cur_value = metrics.DclkFrequency;
618                 break;
619         case SMU_MCLK:
620                 count = clk_table->NumDfPstatesEnabled;
621                 cur_value = metrics.MemclkFrequency;
622                 break;
623         case SMU_FCLK:
624                 count = clk_table->NumDfPstatesEnabled;
625                 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_GetFclkFrequency, 0, &cur_value);
626                 if (ret)
627                         return ret;
628                 break;
629         default:
630                 break;
631         }
632
633         switch (clk_type) {
634         case SMU_SOCCLK:
635         case SMU_VCLK:
636         case SMU_DCLK:
637         case SMU_MCLK:
638         case SMU_FCLK:
639                 for (i = 0; i < count; i++) {
640                         ret = vangogh_get_dpm_clk_limited(smu, clk_type, i, &value);
641                         if (ret)
642                                 return ret;
643                         if (!value)
644                                 continue;
645                         size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n", i, value,
646                                         cur_value == value ? "*" : "");
647                         if (cur_value == value)
648                                 cur_value_match_level = true;
649                 }
650
651                 if (!cur_value_match_level)
652                         size += sysfs_emit_at(buf, size, "   %uMhz *\n", cur_value);
653                 break;
654         default:
655                 break;
656         }
657
658         return size;
659 }
660
661 static int vangogh_print_clk_levels(struct smu_context *smu,
662                         enum smu_clk_type clk_type, char *buf)
663 {
664         DpmClocks_t *clk_table = smu->smu_table.clocks_table;
665         SmuMetrics_t metrics;
666         struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
667         int i, size = 0, ret = 0;
668         uint32_t cur_value = 0, value = 0, count = 0;
669         bool cur_value_match_level = false;
670         uint32_t min, max;
671
672         memset(&metrics, 0, sizeof(metrics));
673
674         ret = smu_cmn_get_metrics_table(smu, &metrics, false);
675         if (ret)
676                 return ret;
677
678         smu_cmn_get_sysfs_buf(&buf, &size);
679
680         switch (clk_type) {
681         case SMU_OD_SCLK:
682                 if (smu_dpm_ctx->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL) {
683                         size += sysfs_emit_at(buf, size, "%s:\n", "OD_SCLK");
684                         size += sysfs_emit_at(buf, size, "0: %10uMhz\n",
685                         (smu->gfx_actual_hard_min_freq > 0) ? smu->gfx_actual_hard_min_freq : smu->gfx_default_hard_min_freq);
686                         size += sysfs_emit_at(buf, size, "1: %10uMhz\n",
687                         (smu->gfx_actual_soft_max_freq > 0) ? smu->gfx_actual_soft_max_freq : smu->gfx_default_soft_max_freq);
688                 }
689                 break;
690         case SMU_OD_CCLK:
691                 if (smu_dpm_ctx->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL) {
692                         size += sysfs_emit_at(buf, size, "CCLK_RANGE in Core%d:\n",  smu->cpu_core_id_select);
693                         size += sysfs_emit_at(buf, size, "0: %10uMhz\n",
694                         (smu->cpu_actual_soft_min_freq > 0) ? smu->cpu_actual_soft_min_freq : smu->cpu_default_soft_min_freq);
695                         size += sysfs_emit_at(buf, size, "1: %10uMhz\n",
696                         (smu->cpu_actual_soft_max_freq > 0) ? smu->cpu_actual_soft_max_freq : smu->cpu_default_soft_max_freq);
697                 }
698                 break;
699         case SMU_OD_RANGE:
700                 if (smu_dpm_ctx->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL) {
701                         size += sysfs_emit_at(buf, size, "%s:\n", "OD_RANGE");
702                         size += sysfs_emit_at(buf, size, "SCLK: %7uMhz %10uMhz\n",
703                                 smu->gfx_default_hard_min_freq, smu->gfx_default_soft_max_freq);
704                         size += sysfs_emit_at(buf, size, "CCLK: %7uMhz %10uMhz\n",
705                                 smu->cpu_default_soft_min_freq, smu->cpu_default_soft_max_freq);
706                 }
707                 break;
708         case SMU_SOCCLK:
709                 /* the level 3 ~ 6 of socclk use the same frequency for vangogh */
710                 count = clk_table->NumSocClkLevelsEnabled;
711                 cur_value = metrics.Current.SocclkFrequency;
712                 break;
713         case SMU_VCLK:
714                 count = clk_table->VcnClkLevelsEnabled;
715                 cur_value = metrics.Current.VclkFrequency;
716                 break;
717         case SMU_DCLK:
718                 count = clk_table->VcnClkLevelsEnabled;
719                 cur_value = metrics.Current.DclkFrequency;
720                 break;
721         case SMU_MCLK:
722                 count = clk_table->NumDfPstatesEnabled;
723                 cur_value = metrics.Current.MemclkFrequency;
724                 break;
725         case SMU_FCLK:
726                 count = clk_table->NumDfPstatesEnabled;
727                 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_GetFclkFrequency, 0, &cur_value);
728                 if (ret)
729                         return ret;
730                 break;
731         case SMU_GFXCLK:
732         case SMU_SCLK:
733                 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_GetGfxclkFrequency, 0, &cur_value);
734                 if (ret) {
735                         return ret;
736                 }
737                 break;
738         default:
739                 break;
740         }
741
742         switch (clk_type) {
743         case SMU_SOCCLK:
744         case SMU_VCLK:
745         case SMU_DCLK:
746         case SMU_MCLK:
747         case SMU_FCLK:
748                 for (i = 0; i < count; i++) {
749                         ret = vangogh_get_dpm_clk_limited(smu, clk_type, i, &value);
750                         if (ret)
751                                 return ret;
752                         if (!value)
753                                 continue;
754                         size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n", i, value,
755                                         cur_value == value ? "*" : "");
756                         if (cur_value == value)
757                                 cur_value_match_level = true;
758                 }
759
760                 if (!cur_value_match_level)
761                         size += sysfs_emit_at(buf, size, "   %uMhz *\n", cur_value);
762                 break;
763         case SMU_GFXCLK:
764         case SMU_SCLK:
765                 min = (smu->gfx_actual_hard_min_freq > 0) ? smu->gfx_actual_hard_min_freq : smu->gfx_default_hard_min_freq;
766                 max = (smu->gfx_actual_soft_max_freq > 0) ? smu->gfx_actual_soft_max_freq : smu->gfx_default_soft_max_freq;
767                 if (cur_value  == max)
768                         i = 2;
769                 else if (cur_value == min)
770                         i = 0;
771                 else
772                         i = 1;
773                 size += sysfs_emit_at(buf, size, "0: %uMhz %s\n", min,
774                                 i == 0 ? "*" : "");
775                 size += sysfs_emit_at(buf, size, "1: %uMhz %s\n",
776                                 i == 1 ? cur_value : VANGOGH_UMD_PSTATE_STANDARD_GFXCLK,
777                                 i == 1 ? "*" : "");
778                 size += sysfs_emit_at(buf, size, "2: %uMhz %s\n", max,
779                                 i == 2 ? "*" : "");
780                 break;
781         default:
782                 break;
783         }
784
785         return size;
786 }
787
788 static int vangogh_common_print_clk_levels(struct smu_context *smu,
789                         enum smu_clk_type clk_type, char *buf)
790 {
791         struct amdgpu_device *adev = smu->adev;
792         uint32_t if_version;
793         int ret = 0;
794
795         ret = smu_cmn_get_smc_version(smu, &if_version, NULL);
796         if (ret) {
797                 dev_err(adev->dev, "Failed to get smu if version!\n");
798                 return ret;
799         }
800
801         if (if_version < 0x3)
802                 ret = vangogh_print_legacy_clk_levels(smu, clk_type, buf);
803         else
804                 ret = vangogh_print_clk_levels(smu, clk_type, buf);
805
806         return ret;
807 }
808
809 static int vangogh_get_profiling_clk_mask(struct smu_context *smu,
810                                          enum amd_dpm_forced_level level,
811                                          uint32_t *vclk_mask,
812                                          uint32_t *dclk_mask,
813                                          uint32_t *mclk_mask,
814                                          uint32_t *fclk_mask,
815                                          uint32_t *soc_mask)
816 {
817         DpmClocks_t *clk_table = smu->smu_table.clocks_table;
818
819         if (level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK) {
820                 if (mclk_mask)
821                         *mclk_mask = clk_table->NumDfPstatesEnabled - 1;
822
823                 if (fclk_mask)
824                         *fclk_mask = clk_table->NumDfPstatesEnabled - 1;
825
826                 if (soc_mask)
827                         *soc_mask = 0;
828         } else if (level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) {
829                 if (mclk_mask)
830                         *mclk_mask = 0;
831
832                 if (fclk_mask)
833                         *fclk_mask = 0;
834
835                 if (soc_mask)
836                         *soc_mask = 1;
837
838                 if (vclk_mask)
839                         *vclk_mask = 1;
840
841                 if (dclk_mask)
842                         *dclk_mask = 1;
843         } else if (level == AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD) {
844                 if (mclk_mask)
845                         *mclk_mask = 0;
846
847                 if (fclk_mask)
848                         *fclk_mask = 0;
849
850                 if (soc_mask)
851                         *soc_mask = 1;
852
853                 if (vclk_mask)
854                         *vclk_mask = 1;
855
856                 if (dclk_mask)
857                         *dclk_mask = 1;
858         }
859
860         return 0;
861 }
862
863 static bool vangogh_clk_dpm_is_enabled(struct smu_context *smu,
864                                 enum smu_clk_type clk_type)
865 {
866         enum smu_feature_mask feature_id = 0;
867
868         switch (clk_type) {
869         case SMU_MCLK:
870         case SMU_UCLK:
871         case SMU_FCLK:
872                 feature_id = SMU_FEATURE_DPM_FCLK_BIT;
873                 break;
874         case SMU_GFXCLK:
875         case SMU_SCLK:
876                 feature_id = SMU_FEATURE_DPM_GFXCLK_BIT;
877                 break;
878         case SMU_SOCCLK:
879                 feature_id = SMU_FEATURE_DPM_SOCCLK_BIT;
880                 break;
881         case SMU_VCLK:
882         case SMU_DCLK:
883                 feature_id = SMU_FEATURE_VCN_DPM_BIT;
884                 break;
885         default:
886                 return true;
887         }
888
889         if (!smu_cmn_feature_is_enabled(smu, feature_id))
890                 return false;
891
892         return true;
893 }
894
895 static int vangogh_get_dpm_ultimate_freq(struct smu_context *smu,
896                                         enum smu_clk_type clk_type,
897                                         uint32_t *min,
898                                         uint32_t *max)
899 {
900         int ret = 0;
901         uint32_t soc_mask;
902         uint32_t vclk_mask;
903         uint32_t dclk_mask;
904         uint32_t mclk_mask;
905         uint32_t fclk_mask;
906         uint32_t clock_limit;
907
908         if (!vangogh_clk_dpm_is_enabled(smu, clk_type)) {
909                 switch (clk_type) {
910                 case SMU_MCLK:
911                 case SMU_UCLK:
912                         clock_limit = smu->smu_table.boot_values.uclk;
913                         break;
914                 case SMU_FCLK:
915                         clock_limit = smu->smu_table.boot_values.fclk;
916                         break;
917                 case SMU_GFXCLK:
918                 case SMU_SCLK:
919                         clock_limit = smu->smu_table.boot_values.gfxclk;
920                         break;
921                 case SMU_SOCCLK:
922                         clock_limit = smu->smu_table.boot_values.socclk;
923                         break;
924                 case SMU_VCLK:
925                         clock_limit = smu->smu_table.boot_values.vclk;
926                         break;
927                 case SMU_DCLK:
928                         clock_limit = smu->smu_table.boot_values.dclk;
929                         break;
930                 default:
931                         clock_limit = 0;
932                         break;
933                 }
934
935                 /* clock in Mhz unit */
936                 if (min)
937                         *min = clock_limit / 100;
938                 if (max)
939                         *max = clock_limit / 100;
940
941                 return 0;
942         }
943         if (max) {
944                 ret = vangogh_get_profiling_clk_mask(smu,
945                                                         AMD_DPM_FORCED_LEVEL_PROFILE_PEAK,
946                                                         &vclk_mask,
947                                                         &dclk_mask,
948                                                         &mclk_mask,
949                                                         &fclk_mask,
950                                                         &soc_mask);
951                 if (ret)
952                         goto failed;
953
954                 switch (clk_type) {
955                 case SMU_UCLK:
956                 case SMU_MCLK:
957                         ret = vangogh_get_dpm_clk_limited(smu, clk_type, mclk_mask, max);
958                         if (ret)
959                                 goto failed;
960                         break;
961                 case SMU_SOCCLK:
962                         ret = vangogh_get_dpm_clk_limited(smu, clk_type, soc_mask, max);
963                         if (ret)
964                                 goto failed;
965                         break;
966                 case SMU_FCLK:
967                         ret = vangogh_get_dpm_clk_limited(smu, clk_type, fclk_mask, max);
968                         if (ret)
969                                 goto failed;
970                         break;
971                 case SMU_VCLK:
972                         ret = vangogh_get_dpm_clk_limited(smu, clk_type, vclk_mask, max);
973                         if (ret)
974                                 goto failed;
975                         break;
976                 case SMU_DCLK:
977                         ret = vangogh_get_dpm_clk_limited(smu, clk_type, dclk_mask, max);
978                         if (ret)
979                                 goto failed;
980                         break;
981                 default:
982                         ret = -EINVAL;
983                         goto failed;
984                 }
985         }
986         if (min) {
987                 switch (clk_type) {
988                 case SMU_UCLK:
989                 case SMU_MCLK:
990                         ret = vangogh_get_dpm_clk_limited(smu, clk_type, mclk_mask, min);
991                         if (ret)
992                                 goto failed;
993                         break;
994                 case SMU_SOCCLK:
995                         ret = vangogh_get_dpm_clk_limited(smu, clk_type, soc_mask, min);
996                         if (ret)
997                                 goto failed;
998                         break;
999                 case SMU_FCLK:
1000                         ret = vangogh_get_dpm_clk_limited(smu, clk_type, fclk_mask, min);
1001                         if (ret)
1002                                 goto failed;
1003                         break;
1004                 case SMU_VCLK:
1005                         ret = vangogh_get_dpm_clk_limited(smu, clk_type, vclk_mask, min);
1006                         if (ret)
1007                                 goto failed;
1008                         break;
1009                 case SMU_DCLK:
1010                         ret = vangogh_get_dpm_clk_limited(smu, clk_type, dclk_mask, min);
1011                         if (ret)
1012                                 goto failed;
1013                         break;
1014                 default:
1015                         ret = -EINVAL;
1016                         goto failed;
1017                 }
1018         }
1019 failed:
1020         return ret;
1021 }
1022
1023 static int vangogh_get_power_profile_mode(struct smu_context *smu,
1024                                            char *buf)
1025 {
1026         uint32_t i, size = 0;
1027         int16_t workload_type = 0;
1028
1029         if (!buf)
1030                 return -EINVAL;
1031
1032         for (i = 0; i <= PP_SMC_POWER_PROFILE_CUSTOM; i++) {
1033                 /*
1034                  * Conv PP_SMC_POWER_PROFILE* to WORKLOAD_PPLIB_*_BIT
1035                  * Not all profile modes are supported on vangogh.
1036                  */
1037                 workload_type = smu_cmn_to_asic_specific_index(smu,
1038                                                                CMN2ASIC_MAPPING_WORKLOAD,
1039                                                                i);
1040
1041                 if (workload_type < 0)
1042                         continue;
1043
1044                 size += sysfs_emit_at(buf, size, "%2d %14s%s\n",
1045                         i, amdgpu_pp_profile_name[i], (i == smu->power_profile_mode) ? "*" : " ");
1046         }
1047
1048         return size;
1049 }
1050
1051 static int vangogh_set_power_profile_mode(struct smu_context *smu, long *input, uint32_t size)
1052 {
1053         int workload_type, ret;
1054         uint32_t profile_mode = input[size];
1055
1056         if (profile_mode > PP_SMC_POWER_PROFILE_CUSTOM) {
1057                 dev_err(smu->adev->dev, "Invalid power profile mode %d\n", profile_mode);
1058                 return -EINVAL;
1059         }
1060
1061         if (profile_mode == PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT ||
1062                         profile_mode == PP_SMC_POWER_PROFILE_POWERSAVING)
1063                 return 0;
1064
1065         /* conv PP_SMC_POWER_PROFILE* to WORKLOAD_PPLIB_*_BIT */
1066         workload_type = smu_cmn_to_asic_specific_index(smu,
1067                                                        CMN2ASIC_MAPPING_WORKLOAD,
1068                                                        profile_mode);
1069         if (workload_type < 0) {
1070                 dev_dbg(smu->adev->dev, "Unsupported power profile mode %d on VANGOGH\n",
1071                                         profile_mode);
1072                 return -EINVAL;
1073         }
1074
1075         ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_ActiveProcessNotify,
1076                                     1 << workload_type,
1077                                     NULL);
1078         if (ret) {
1079                 dev_err_once(smu->adev->dev, "Fail to set workload type %d\n",
1080                                         workload_type);
1081                 return ret;
1082         }
1083
1084         smu->power_profile_mode = profile_mode;
1085
1086         return 0;
1087 }
1088
1089 static int vangogh_set_soft_freq_limited_range(struct smu_context *smu,
1090                                           enum smu_clk_type clk_type,
1091                                           uint32_t min,
1092                                           uint32_t max)
1093 {
1094         int ret = 0;
1095
1096         if (!vangogh_clk_dpm_is_enabled(smu, clk_type))
1097                 return 0;
1098
1099         switch (clk_type) {
1100         case SMU_GFXCLK:
1101         case SMU_SCLK:
1102                 ret = smu_cmn_send_smc_msg_with_param(smu,
1103                                                         SMU_MSG_SetHardMinGfxClk,
1104                                                         min, NULL);
1105                 if (ret)
1106                         return ret;
1107
1108                 ret = smu_cmn_send_smc_msg_with_param(smu,
1109                                                         SMU_MSG_SetSoftMaxGfxClk,
1110                                                         max, NULL);
1111                 if (ret)
1112                         return ret;
1113                 break;
1114         case SMU_FCLK:
1115                 ret = smu_cmn_send_smc_msg_with_param(smu,
1116                                                         SMU_MSG_SetHardMinFclkByFreq,
1117                                                         min, NULL);
1118                 if (ret)
1119                         return ret;
1120
1121                 ret = smu_cmn_send_smc_msg_with_param(smu,
1122                                                         SMU_MSG_SetSoftMaxFclkByFreq,
1123                                                         max, NULL);
1124                 if (ret)
1125                         return ret;
1126                 break;
1127         case SMU_SOCCLK:
1128                 ret = smu_cmn_send_smc_msg_with_param(smu,
1129                                                         SMU_MSG_SetHardMinSocclkByFreq,
1130                                                         min, NULL);
1131                 if (ret)
1132                         return ret;
1133
1134                 ret = smu_cmn_send_smc_msg_with_param(smu,
1135                                                         SMU_MSG_SetSoftMaxSocclkByFreq,
1136                                                         max, NULL);
1137                 if (ret)
1138                         return ret;
1139                 break;
1140         case SMU_VCLK:
1141                 ret = smu_cmn_send_smc_msg_with_param(smu,
1142                                                         SMU_MSG_SetHardMinVcn,
1143                                                         min << 16, NULL);
1144                 if (ret)
1145                         return ret;
1146                 ret = smu_cmn_send_smc_msg_with_param(smu,
1147                                                         SMU_MSG_SetSoftMaxVcn,
1148                                                         max << 16, NULL);
1149                 if (ret)
1150                         return ret;
1151                 break;
1152         case SMU_DCLK:
1153                 ret = smu_cmn_send_smc_msg_with_param(smu,
1154                                                         SMU_MSG_SetHardMinVcn,
1155                                                         min, NULL);
1156                 if (ret)
1157                         return ret;
1158                 ret = smu_cmn_send_smc_msg_with_param(smu,
1159                                                         SMU_MSG_SetSoftMaxVcn,
1160                                                         max, NULL);
1161                 if (ret)
1162                         return ret;
1163                 break;
1164         default:
1165                 return -EINVAL;
1166         }
1167
1168         return ret;
1169 }
1170
1171 static int vangogh_force_clk_levels(struct smu_context *smu,
1172                                    enum smu_clk_type clk_type, uint32_t mask)
1173 {
1174         uint32_t soft_min_level = 0, soft_max_level = 0;
1175         uint32_t min_freq = 0, max_freq = 0;
1176         int ret = 0 ;
1177
1178         soft_min_level = mask ? (ffs(mask) - 1) : 0;
1179         soft_max_level = mask ? (fls(mask) - 1) : 0;
1180
1181         switch (clk_type) {
1182         case SMU_SOCCLK:
1183                 ret = vangogh_get_dpm_clk_limited(smu, clk_type,
1184                                                 soft_min_level, &min_freq);
1185                 if (ret)
1186                         return ret;
1187                 ret = vangogh_get_dpm_clk_limited(smu, clk_type,
1188                                                 soft_max_level, &max_freq);
1189                 if (ret)
1190                         return ret;
1191                 ret = smu_cmn_send_smc_msg_with_param(smu,
1192                                                                 SMU_MSG_SetSoftMaxSocclkByFreq,
1193                                                                 max_freq, NULL);
1194                 if (ret)
1195                         return ret;
1196                 ret = smu_cmn_send_smc_msg_with_param(smu,
1197                                                                 SMU_MSG_SetHardMinSocclkByFreq,
1198                                                                 min_freq, NULL);
1199                 if (ret)
1200                         return ret;
1201                 break;
1202         case SMU_FCLK:
1203                 ret = vangogh_get_dpm_clk_limited(smu,
1204                                                         clk_type, soft_min_level, &min_freq);
1205                 if (ret)
1206                         return ret;
1207                 ret = vangogh_get_dpm_clk_limited(smu,
1208                                                         clk_type, soft_max_level, &max_freq);
1209                 if (ret)
1210                         return ret;
1211                 ret = smu_cmn_send_smc_msg_with_param(smu,
1212                                                                 SMU_MSG_SetSoftMaxFclkByFreq,
1213                                                                 max_freq, NULL);
1214                 if (ret)
1215                         return ret;
1216                 ret = smu_cmn_send_smc_msg_with_param(smu,
1217                                                                 SMU_MSG_SetHardMinFclkByFreq,
1218                                                                 min_freq, NULL);
1219                 if (ret)
1220                         return ret;
1221                 break;
1222         case SMU_VCLK:
1223                 ret = vangogh_get_dpm_clk_limited(smu,
1224                                                         clk_type, soft_min_level, &min_freq);
1225                 if (ret)
1226                         return ret;
1227
1228                 ret = vangogh_get_dpm_clk_limited(smu,
1229                                                         clk_type, soft_max_level, &max_freq);
1230                 if (ret)
1231                         return ret;
1232
1233
1234                 ret = smu_cmn_send_smc_msg_with_param(smu,
1235                                                                 SMU_MSG_SetHardMinVcn,
1236                                                                 min_freq << 16, NULL);
1237                 if (ret)
1238                         return ret;
1239
1240                 ret = smu_cmn_send_smc_msg_with_param(smu,
1241                                                                 SMU_MSG_SetSoftMaxVcn,
1242                                                                 max_freq << 16, NULL);
1243                 if (ret)
1244                         return ret;
1245
1246                 break;
1247         case SMU_DCLK:
1248                 ret = vangogh_get_dpm_clk_limited(smu,
1249                                                         clk_type, soft_min_level, &min_freq);
1250                 if (ret)
1251                         return ret;
1252
1253                 ret = vangogh_get_dpm_clk_limited(smu,
1254                                                         clk_type, soft_max_level, &max_freq);
1255                 if (ret)
1256                         return ret;
1257
1258                 ret = smu_cmn_send_smc_msg_with_param(smu,
1259                                                         SMU_MSG_SetHardMinVcn,
1260                                                         min_freq, NULL);
1261                 if (ret)
1262                         return ret;
1263
1264                 ret = smu_cmn_send_smc_msg_with_param(smu,
1265                                                         SMU_MSG_SetSoftMaxVcn,
1266                                                         max_freq, NULL);
1267                 if (ret)
1268                         return ret;
1269
1270                 break;
1271         default:
1272                 break;
1273         }
1274
1275         return ret;
1276 }
1277
1278 static int vangogh_force_dpm_limit_value(struct smu_context *smu, bool highest)
1279 {
1280         int ret = 0, i = 0;
1281         uint32_t min_freq, max_freq, force_freq;
1282         enum smu_clk_type clk_type;
1283
1284         enum smu_clk_type clks[] = {
1285                 SMU_SOCCLK,
1286                 SMU_VCLK,
1287                 SMU_DCLK,
1288                 SMU_FCLK,
1289         };
1290
1291         for (i = 0; i < ARRAY_SIZE(clks); i++) {
1292                 clk_type = clks[i];
1293                 ret = vangogh_get_dpm_ultimate_freq(smu, clk_type, &min_freq, &max_freq);
1294                 if (ret)
1295                         return ret;
1296
1297                 force_freq = highest ? max_freq : min_freq;
1298                 ret = vangogh_set_soft_freq_limited_range(smu, clk_type, force_freq, force_freq);
1299                 if (ret)
1300                         return ret;
1301         }
1302
1303         return ret;
1304 }
1305
1306 static int vangogh_unforce_dpm_levels(struct smu_context *smu)
1307 {
1308         int ret = 0, i = 0;
1309         uint32_t min_freq, max_freq;
1310         enum smu_clk_type clk_type;
1311
1312         struct clk_feature_map {
1313                 enum smu_clk_type clk_type;
1314                 uint32_t        feature;
1315         } clk_feature_map[] = {
1316                 {SMU_FCLK, SMU_FEATURE_DPM_FCLK_BIT},
1317                 {SMU_SOCCLK, SMU_FEATURE_DPM_SOCCLK_BIT},
1318                 {SMU_VCLK, SMU_FEATURE_VCN_DPM_BIT},
1319                 {SMU_DCLK, SMU_FEATURE_VCN_DPM_BIT},
1320         };
1321
1322         for (i = 0; i < ARRAY_SIZE(clk_feature_map); i++) {
1323
1324                 if (!smu_cmn_feature_is_enabled(smu, clk_feature_map[i].feature))
1325                     continue;
1326
1327                 clk_type = clk_feature_map[i].clk_type;
1328
1329                 ret = vangogh_get_dpm_ultimate_freq(smu, clk_type, &min_freq, &max_freq);
1330
1331                 if (ret)
1332                         return ret;
1333
1334                 ret = vangogh_set_soft_freq_limited_range(smu, clk_type, min_freq, max_freq);
1335
1336                 if (ret)
1337                         return ret;
1338         }
1339
1340         return ret;
1341 }
1342
1343 static int vangogh_set_peak_clock_by_device(struct smu_context *smu)
1344 {
1345         int ret = 0;
1346         uint32_t socclk_freq = 0, fclk_freq = 0;
1347         uint32_t vclk_freq = 0, dclk_freq = 0;
1348
1349         ret = vangogh_get_dpm_ultimate_freq(smu, SMU_FCLK, NULL, &fclk_freq);
1350         if (ret)
1351                 return ret;
1352
1353         ret = vangogh_set_soft_freq_limited_range(smu, SMU_FCLK, fclk_freq, fclk_freq);
1354         if (ret)
1355                 return ret;
1356
1357         ret = vangogh_get_dpm_ultimate_freq(smu, SMU_SOCCLK, NULL, &socclk_freq);
1358         if (ret)
1359                 return ret;
1360
1361         ret = vangogh_set_soft_freq_limited_range(smu, SMU_SOCCLK, socclk_freq, socclk_freq);
1362         if (ret)
1363                 return ret;
1364
1365         ret = vangogh_get_dpm_ultimate_freq(smu, SMU_VCLK, NULL, &vclk_freq);
1366         if (ret)
1367                 return ret;
1368
1369         ret = vangogh_set_soft_freq_limited_range(smu, SMU_VCLK, vclk_freq, vclk_freq);
1370         if (ret)
1371                 return ret;
1372
1373         ret = vangogh_get_dpm_ultimate_freq(smu, SMU_DCLK, NULL, &dclk_freq);
1374         if (ret)
1375                 return ret;
1376
1377         ret = vangogh_set_soft_freq_limited_range(smu, SMU_DCLK, dclk_freq, dclk_freq);
1378         if (ret)
1379                 return ret;
1380
1381         return ret;
1382 }
1383
1384 static int vangogh_set_performance_level(struct smu_context *smu,
1385                                         enum amd_dpm_forced_level level)
1386 {
1387         int ret = 0, i;
1388         uint32_t soc_mask, mclk_mask, fclk_mask;
1389         uint32_t vclk_mask = 0, dclk_mask = 0;
1390
1391         smu->cpu_actual_soft_min_freq = smu->cpu_default_soft_min_freq;
1392         smu->cpu_actual_soft_max_freq = smu->cpu_default_soft_max_freq;
1393
1394         switch (level) {
1395         case AMD_DPM_FORCED_LEVEL_HIGH:
1396                 smu->gfx_actual_hard_min_freq = smu->gfx_default_soft_max_freq;
1397                 smu->gfx_actual_soft_max_freq = smu->gfx_default_soft_max_freq;
1398
1399
1400                 ret = vangogh_force_dpm_limit_value(smu, true);
1401                 if (ret)
1402                         return ret;
1403                 break;
1404         case AMD_DPM_FORCED_LEVEL_LOW:
1405                 smu->gfx_actual_hard_min_freq = smu->gfx_default_hard_min_freq;
1406                 smu->gfx_actual_soft_max_freq = smu->gfx_default_hard_min_freq;
1407
1408                 ret = vangogh_force_dpm_limit_value(smu, false);
1409                 if (ret)
1410                         return ret;
1411                 break;
1412         case AMD_DPM_FORCED_LEVEL_AUTO:
1413                 smu->gfx_actual_hard_min_freq = smu->gfx_default_hard_min_freq;
1414                 smu->gfx_actual_soft_max_freq = smu->gfx_default_soft_max_freq;
1415
1416                 ret = vangogh_unforce_dpm_levels(smu);
1417                 if (ret)
1418                         return ret;
1419                 break;
1420         case AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD:
1421                 smu->gfx_actual_hard_min_freq = VANGOGH_UMD_PSTATE_STANDARD_GFXCLK;
1422                 smu->gfx_actual_soft_max_freq = VANGOGH_UMD_PSTATE_STANDARD_GFXCLK;
1423
1424                 ret = vangogh_get_profiling_clk_mask(smu, level,
1425                                                         &vclk_mask,
1426                                                         &dclk_mask,
1427                                                         &mclk_mask,
1428                                                         &fclk_mask,
1429                                                         &soc_mask);
1430                 if (ret)
1431                         return ret;
1432
1433                 vangogh_force_clk_levels(smu, SMU_FCLK, 1 << fclk_mask);
1434                 vangogh_force_clk_levels(smu, SMU_SOCCLK, 1 << soc_mask);
1435                 vangogh_force_clk_levels(smu, SMU_VCLK, 1 << vclk_mask);
1436                 vangogh_force_clk_levels(smu, SMU_DCLK, 1 << dclk_mask);
1437                 break;
1438         case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK:
1439                 smu->gfx_actual_hard_min_freq = smu->gfx_default_hard_min_freq;
1440                 smu->gfx_actual_soft_max_freq = smu->gfx_default_hard_min_freq;
1441                 break;
1442         case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK:
1443                 smu->gfx_actual_hard_min_freq = smu->gfx_default_hard_min_freq;
1444                 smu->gfx_actual_soft_max_freq = smu->gfx_default_soft_max_freq;
1445
1446                 ret = vangogh_get_profiling_clk_mask(smu, level,
1447                                                         NULL,
1448                                                         NULL,
1449                                                         &mclk_mask,
1450                                                         &fclk_mask,
1451                                                         NULL);
1452                 if (ret)
1453                         return ret;
1454
1455                 vangogh_force_clk_levels(smu, SMU_FCLK, 1 << fclk_mask);
1456                 break;
1457         case AMD_DPM_FORCED_LEVEL_PROFILE_PEAK:
1458                 smu->gfx_actual_hard_min_freq = VANGOGH_UMD_PSTATE_PEAK_GFXCLK;
1459                 smu->gfx_actual_soft_max_freq = VANGOGH_UMD_PSTATE_PEAK_GFXCLK;
1460
1461                 ret = vangogh_set_peak_clock_by_device(smu);
1462                 if (ret)
1463                         return ret;
1464                 break;
1465         case AMD_DPM_FORCED_LEVEL_MANUAL:
1466         case AMD_DPM_FORCED_LEVEL_PROFILE_EXIT:
1467         default:
1468                 return 0;
1469         }
1470
1471         ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinGfxClk,
1472                                               smu->gfx_actual_hard_min_freq, NULL);
1473         if (ret)
1474                 return ret;
1475
1476         ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxGfxClk,
1477                                               smu->gfx_actual_soft_max_freq, NULL);
1478         if (ret)
1479                 return ret;
1480
1481         if (smu->adev->pm.fw_version >= 0x43f1b00) {
1482                 for (i = 0; i < smu->cpu_core_num; i++) {
1483                         ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMinCclk,
1484                                                               ((i << 20)
1485                                                                | smu->cpu_actual_soft_min_freq),
1486                                                               NULL);
1487                         if (ret)
1488                                 return ret;
1489
1490                         ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxCclk,
1491                                                               ((i << 20)
1492                                                                | smu->cpu_actual_soft_max_freq),
1493                                                               NULL);
1494                         if (ret)
1495                                 return ret;
1496                 }
1497         }
1498
1499         return ret;
1500 }
1501
1502 static int vangogh_read_sensor(struct smu_context *smu,
1503                                  enum amd_pp_sensors sensor,
1504                                  void *data, uint32_t *size)
1505 {
1506         int ret = 0;
1507
1508         if (!data || !size)
1509                 return -EINVAL;
1510
1511         switch (sensor) {
1512         case AMDGPU_PP_SENSOR_GPU_LOAD:
1513                 ret = vangogh_common_get_smu_metrics_data(smu,
1514                                                    METRICS_AVERAGE_GFXACTIVITY,
1515                                                    (uint32_t *)data);
1516                 *size = 4;
1517                 break;
1518         case AMDGPU_PP_SENSOR_GPU_POWER:
1519                 ret = vangogh_common_get_smu_metrics_data(smu,
1520                                                    METRICS_AVERAGE_SOCKETPOWER,
1521                                                    (uint32_t *)data);
1522                 *size = 4;
1523                 break;
1524         case AMDGPU_PP_SENSOR_EDGE_TEMP:
1525                 ret = vangogh_common_get_smu_metrics_data(smu,
1526                                                    METRICS_TEMPERATURE_EDGE,
1527                                                    (uint32_t *)data);
1528                 *size = 4;
1529                 break;
1530         case AMDGPU_PP_SENSOR_HOTSPOT_TEMP:
1531                 ret = vangogh_common_get_smu_metrics_data(smu,
1532                                                    METRICS_TEMPERATURE_HOTSPOT,
1533                                                    (uint32_t *)data);
1534                 *size = 4;
1535                 break;
1536         case AMDGPU_PP_SENSOR_GFX_MCLK:
1537                 ret = vangogh_common_get_smu_metrics_data(smu,
1538                                                    METRICS_CURR_UCLK,
1539                                                    (uint32_t *)data);
1540                 *(uint32_t *)data *= 100;
1541                 *size = 4;
1542                 break;
1543         case AMDGPU_PP_SENSOR_GFX_SCLK:
1544                 ret = vangogh_common_get_smu_metrics_data(smu,
1545                                                    METRICS_CURR_GFXCLK,
1546                                                    (uint32_t *)data);
1547                 *(uint32_t *)data *= 100;
1548                 *size = 4;
1549                 break;
1550         case AMDGPU_PP_SENSOR_VDDGFX:
1551                 ret = vangogh_common_get_smu_metrics_data(smu,
1552                                                    METRICS_VOLTAGE_VDDGFX,
1553                                                    (uint32_t *)data);
1554                 *size = 4;
1555                 break;
1556         case AMDGPU_PP_SENSOR_VDDNB:
1557                 ret = vangogh_common_get_smu_metrics_data(smu,
1558                                                    METRICS_VOLTAGE_VDDSOC,
1559                                                    (uint32_t *)data);
1560                 *size = 4;
1561                 break;
1562         case AMDGPU_PP_SENSOR_CPU_CLK:
1563                 ret = vangogh_common_get_smu_metrics_data(smu,
1564                                                    METRICS_AVERAGE_CPUCLK,
1565                                                    (uint32_t *)data);
1566                 *size = smu->cpu_core_num * sizeof(uint16_t);
1567                 break;
1568         default:
1569                 ret = -EOPNOTSUPP;
1570                 break;
1571         }
1572
1573         return ret;
1574 }
1575
1576 static int vangogh_set_watermarks_table(struct smu_context *smu,
1577                                        struct pp_smu_wm_range_sets *clock_ranges)
1578 {
1579         int i;
1580         int ret = 0;
1581         Watermarks_t *table = smu->smu_table.watermarks_table;
1582
1583         if (!table || !clock_ranges)
1584                 return -EINVAL;
1585
1586         if (clock_ranges) {
1587                 if (clock_ranges->num_reader_wm_sets > NUM_WM_RANGES ||
1588                         clock_ranges->num_writer_wm_sets > NUM_WM_RANGES)
1589                         return -EINVAL;
1590
1591                 for (i = 0; i < clock_ranges->num_reader_wm_sets; i++) {
1592                         table->WatermarkRow[WM_DCFCLK][i].MinClock =
1593                                 clock_ranges->reader_wm_sets[i].min_drain_clk_mhz;
1594                         table->WatermarkRow[WM_DCFCLK][i].MaxClock =
1595                                 clock_ranges->reader_wm_sets[i].max_drain_clk_mhz;
1596                         table->WatermarkRow[WM_DCFCLK][i].MinMclk =
1597                                 clock_ranges->reader_wm_sets[i].min_fill_clk_mhz;
1598                         table->WatermarkRow[WM_DCFCLK][i].MaxMclk =
1599                                 clock_ranges->reader_wm_sets[i].max_fill_clk_mhz;
1600
1601                         table->WatermarkRow[WM_DCFCLK][i].WmSetting =
1602                                 clock_ranges->reader_wm_sets[i].wm_inst;
1603                 }
1604
1605                 for (i = 0; i < clock_ranges->num_writer_wm_sets; i++) {
1606                         table->WatermarkRow[WM_SOCCLK][i].MinClock =
1607                                 clock_ranges->writer_wm_sets[i].min_fill_clk_mhz;
1608                         table->WatermarkRow[WM_SOCCLK][i].MaxClock =
1609                                 clock_ranges->writer_wm_sets[i].max_fill_clk_mhz;
1610                         table->WatermarkRow[WM_SOCCLK][i].MinMclk =
1611                                 clock_ranges->writer_wm_sets[i].min_drain_clk_mhz;
1612                         table->WatermarkRow[WM_SOCCLK][i].MaxMclk =
1613                                 clock_ranges->writer_wm_sets[i].max_drain_clk_mhz;
1614
1615                         table->WatermarkRow[WM_SOCCLK][i].WmSetting =
1616                                 clock_ranges->writer_wm_sets[i].wm_inst;
1617                 }
1618
1619                 smu->watermarks_bitmap |= WATERMARKS_EXIST;
1620         }
1621
1622         /* pass data to smu controller */
1623         if ((smu->watermarks_bitmap & WATERMARKS_EXIST) &&
1624              !(smu->watermarks_bitmap & WATERMARKS_LOADED)) {
1625                 ret = smu_cmn_write_watermarks_table(smu);
1626                 if (ret) {
1627                         dev_err(smu->adev->dev, "Failed to update WMTABLE!");
1628                         return ret;
1629                 }
1630                 smu->watermarks_bitmap |= WATERMARKS_LOADED;
1631         }
1632
1633         return 0;
1634 }
1635
1636 static ssize_t vangogh_get_legacy_gpu_metrics(struct smu_context *smu,
1637                                       void **table)
1638 {
1639         struct smu_table_context *smu_table = &smu->smu_table;
1640         struct gpu_metrics_v2_2 *gpu_metrics =
1641                 (struct gpu_metrics_v2_2 *)smu_table->gpu_metrics_table;
1642         SmuMetrics_legacy_t metrics;
1643         int ret = 0;
1644
1645         ret = smu_cmn_get_metrics_table(smu, &metrics, true);
1646         if (ret)
1647                 return ret;
1648
1649         smu_cmn_init_soft_gpu_metrics(gpu_metrics, 2, 2);
1650
1651         gpu_metrics->temperature_gfx = metrics.GfxTemperature;
1652         gpu_metrics->temperature_soc = metrics.SocTemperature;
1653         memcpy(&gpu_metrics->temperature_core[0],
1654                 &metrics.CoreTemperature[0],
1655                 sizeof(uint16_t) * 4);
1656         gpu_metrics->temperature_l3[0] = metrics.L3Temperature[0];
1657
1658         gpu_metrics->average_gfx_activity = metrics.GfxActivity;
1659         gpu_metrics->average_mm_activity = metrics.UvdActivity;
1660
1661         gpu_metrics->average_socket_power = metrics.CurrentSocketPower;
1662         gpu_metrics->average_cpu_power = metrics.Power[0];
1663         gpu_metrics->average_soc_power = metrics.Power[1];
1664         gpu_metrics->average_gfx_power = metrics.Power[2];
1665         memcpy(&gpu_metrics->average_core_power[0],
1666                 &metrics.CorePower[0],
1667                 sizeof(uint16_t) * 4);
1668
1669         gpu_metrics->average_gfxclk_frequency = metrics.GfxclkFrequency;
1670         gpu_metrics->average_socclk_frequency = metrics.SocclkFrequency;
1671         gpu_metrics->average_uclk_frequency = metrics.MemclkFrequency;
1672         gpu_metrics->average_fclk_frequency = metrics.MemclkFrequency;
1673         gpu_metrics->average_vclk_frequency = metrics.VclkFrequency;
1674         gpu_metrics->average_dclk_frequency = metrics.DclkFrequency;
1675
1676         memcpy(&gpu_metrics->current_coreclk[0],
1677                 &metrics.CoreFrequency[0],
1678                 sizeof(uint16_t) * 4);
1679         gpu_metrics->current_l3clk[0] = metrics.L3Frequency[0];
1680
1681         gpu_metrics->throttle_status = metrics.ThrottlerStatus;
1682         gpu_metrics->indep_throttle_status =
1683                         smu_cmn_get_indep_throttler_status(metrics.ThrottlerStatus,
1684                                                            vangogh_throttler_map);
1685
1686         gpu_metrics->system_clock_counter = ktime_get_boottime_ns();
1687
1688         *table = (void *)gpu_metrics;
1689
1690         return sizeof(struct gpu_metrics_v2_2);
1691 }
1692
1693 static ssize_t vangogh_get_gpu_metrics(struct smu_context *smu,
1694                                       void **table)
1695 {
1696         struct smu_table_context *smu_table = &smu->smu_table;
1697         struct gpu_metrics_v2_2 *gpu_metrics =
1698                 (struct gpu_metrics_v2_2 *)smu_table->gpu_metrics_table;
1699         SmuMetrics_t metrics;
1700         int ret = 0;
1701
1702         ret = smu_cmn_get_metrics_table(smu, &metrics, true);
1703         if (ret)
1704                 return ret;
1705
1706         smu_cmn_init_soft_gpu_metrics(gpu_metrics, 2, 2);
1707
1708         gpu_metrics->temperature_gfx = metrics.Current.GfxTemperature;
1709         gpu_metrics->temperature_soc = metrics.Current.SocTemperature;
1710         memcpy(&gpu_metrics->temperature_core[0],
1711                 &metrics.Current.CoreTemperature[0],
1712                 sizeof(uint16_t) * 4);
1713         gpu_metrics->temperature_l3[0] = metrics.Current.L3Temperature[0];
1714
1715         gpu_metrics->average_gfx_activity = metrics.Current.GfxActivity;
1716         gpu_metrics->average_mm_activity = metrics.Current.UvdActivity;
1717
1718         gpu_metrics->average_socket_power = metrics.Current.CurrentSocketPower;
1719         gpu_metrics->average_cpu_power = metrics.Current.Power[0];
1720         gpu_metrics->average_soc_power = metrics.Current.Power[1];
1721         gpu_metrics->average_gfx_power = metrics.Current.Power[2];
1722         memcpy(&gpu_metrics->average_core_power[0],
1723                 &metrics.Average.CorePower[0],
1724                 sizeof(uint16_t) * 4);
1725
1726         gpu_metrics->average_gfxclk_frequency = metrics.Average.GfxclkFrequency;
1727         gpu_metrics->average_socclk_frequency = metrics.Average.SocclkFrequency;
1728         gpu_metrics->average_uclk_frequency = metrics.Average.MemclkFrequency;
1729         gpu_metrics->average_fclk_frequency = metrics.Average.MemclkFrequency;
1730         gpu_metrics->average_vclk_frequency = metrics.Average.VclkFrequency;
1731         gpu_metrics->average_dclk_frequency = metrics.Average.DclkFrequency;
1732
1733         gpu_metrics->current_gfxclk = metrics.Current.GfxclkFrequency;
1734         gpu_metrics->current_socclk = metrics.Current.SocclkFrequency;
1735         gpu_metrics->current_uclk = metrics.Current.MemclkFrequency;
1736         gpu_metrics->current_fclk = metrics.Current.MemclkFrequency;
1737         gpu_metrics->current_vclk = metrics.Current.VclkFrequency;
1738         gpu_metrics->current_dclk = metrics.Current.DclkFrequency;
1739
1740         memcpy(&gpu_metrics->current_coreclk[0],
1741                 &metrics.Current.CoreFrequency[0],
1742                 sizeof(uint16_t) * 4);
1743         gpu_metrics->current_l3clk[0] = metrics.Current.L3Frequency[0];
1744
1745         gpu_metrics->throttle_status = metrics.Current.ThrottlerStatus;
1746         gpu_metrics->indep_throttle_status =
1747                         smu_cmn_get_indep_throttler_status(metrics.Current.ThrottlerStatus,
1748                                                            vangogh_throttler_map);
1749
1750         gpu_metrics->system_clock_counter = ktime_get_boottime_ns();
1751
1752         *table = (void *)gpu_metrics;
1753
1754         return sizeof(struct gpu_metrics_v2_2);
1755 }
1756
1757 static ssize_t vangogh_common_get_gpu_metrics(struct smu_context *smu,
1758                                       void **table)
1759 {
1760         struct amdgpu_device *adev = smu->adev;
1761         uint32_t if_version;
1762         int ret = 0;
1763
1764         ret = smu_cmn_get_smc_version(smu, &if_version, NULL);
1765         if (ret) {
1766                 dev_err(adev->dev, "Failed to get smu if version!\n");
1767                 return ret;
1768         }
1769
1770         if (if_version < 0x3)
1771                 ret = vangogh_get_legacy_gpu_metrics(smu, table);
1772         else
1773                 ret = vangogh_get_gpu_metrics(smu, table);
1774
1775         return ret;
1776 }
1777
1778 static int vangogh_od_edit_dpm_table(struct smu_context *smu, enum PP_OD_DPM_TABLE_COMMAND type,
1779                                         long input[], uint32_t size)
1780 {
1781         int ret = 0;
1782         struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
1783
1784         if (!(smu_dpm_ctx->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL)) {
1785                 dev_warn(smu->adev->dev,
1786                         "pp_od_clk_voltage is not accessible if power_dpm_force_performance_level is not in manual mode!\n");
1787                 return -EINVAL;
1788         }
1789
1790         switch (type) {
1791         case PP_OD_EDIT_CCLK_VDDC_TABLE:
1792                 if (size != 3) {
1793                         dev_err(smu->adev->dev, "Input parameter number not correct (should be 4 for processor)\n");
1794                         return -EINVAL;
1795                 }
1796                 if (input[0] >= smu->cpu_core_num) {
1797                         dev_err(smu->adev->dev, "core index is overflow, should be less than %d\n",
1798                                 smu->cpu_core_num);
1799                 }
1800                 smu->cpu_core_id_select = input[0];
1801                 if (input[1] == 0) {
1802                         if (input[2] < smu->cpu_default_soft_min_freq) {
1803                                 dev_warn(smu->adev->dev, "Fine grain setting minimum cclk (%ld) MHz is less than the minimum allowed (%d) MHz\n",
1804                                         input[2], smu->cpu_default_soft_min_freq);
1805                                 return -EINVAL;
1806                         }
1807                         smu->cpu_actual_soft_min_freq = input[2];
1808                 } else if (input[1] == 1) {
1809                         if (input[2] > smu->cpu_default_soft_max_freq) {
1810                                 dev_warn(smu->adev->dev, "Fine grain setting maximum cclk (%ld) MHz is greater than the maximum allowed (%d) MHz\n",
1811                                         input[2], smu->cpu_default_soft_max_freq);
1812                                 return -EINVAL;
1813                         }
1814                         smu->cpu_actual_soft_max_freq = input[2];
1815                 } else {
1816                         return -EINVAL;
1817                 }
1818                 break;
1819         case PP_OD_EDIT_SCLK_VDDC_TABLE:
1820                 if (size != 2) {
1821                         dev_err(smu->adev->dev, "Input parameter number not correct\n");
1822                         return -EINVAL;
1823                 }
1824
1825                 if (input[0] == 0) {
1826                         if (input[1] < smu->gfx_default_hard_min_freq) {
1827                                 dev_warn(smu->adev->dev,
1828                                         "Fine grain setting minimum sclk (%ld) MHz is less than the minimum allowed (%d) MHz\n",
1829                                         input[1], smu->gfx_default_hard_min_freq);
1830                                 return -EINVAL;
1831                         }
1832                         smu->gfx_actual_hard_min_freq = input[1];
1833                 } else if (input[0] == 1) {
1834                         if (input[1] > smu->gfx_default_soft_max_freq) {
1835                                 dev_warn(smu->adev->dev,
1836                                         "Fine grain setting maximum sclk (%ld) MHz is greater than the maximum allowed (%d) MHz\n",
1837                                         input[1], smu->gfx_default_soft_max_freq);
1838                                 return -EINVAL;
1839                         }
1840                         smu->gfx_actual_soft_max_freq = input[1];
1841                 } else {
1842                         return -EINVAL;
1843                 }
1844                 break;
1845         case PP_OD_RESTORE_DEFAULT_TABLE:
1846                 if (size != 0) {
1847                         dev_err(smu->adev->dev, "Input parameter number not correct\n");
1848                         return -EINVAL;
1849                 } else {
1850                         smu->gfx_actual_hard_min_freq = smu->gfx_default_hard_min_freq;
1851                         smu->gfx_actual_soft_max_freq = smu->gfx_default_soft_max_freq;
1852                         smu->cpu_actual_soft_min_freq = smu->cpu_default_soft_min_freq;
1853                         smu->cpu_actual_soft_max_freq = smu->cpu_default_soft_max_freq;
1854                 }
1855                 break;
1856         case PP_OD_COMMIT_DPM_TABLE:
1857                 if (size != 0) {
1858                         dev_err(smu->adev->dev, "Input parameter number not correct\n");
1859                         return -EINVAL;
1860                 } else {
1861                         if (smu->gfx_actual_hard_min_freq > smu->gfx_actual_soft_max_freq) {
1862                                 dev_err(smu->adev->dev,
1863                                         "The setting minimum sclk (%d) MHz is greater than the setting maximum sclk (%d) MHz\n",
1864                                         smu->gfx_actual_hard_min_freq,
1865                                         smu->gfx_actual_soft_max_freq);
1866                                 return -EINVAL;
1867                         }
1868
1869                         ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinGfxClk,
1870                                                                         smu->gfx_actual_hard_min_freq, NULL);
1871                         if (ret) {
1872                                 dev_err(smu->adev->dev, "Set hard min sclk failed!");
1873                                 return ret;
1874                         }
1875
1876                         ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxGfxClk,
1877                                                                         smu->gfx_actual_soft_max_freq, NULL);
1878                         if (ret) {
1879                                 dev_err(smu->adev->dev, "Set soft max sclk failed!");
1880                                 return ret;
1881                         }
1882
1883                         if (smu->adev->pm.fw_version < 0x43f1b00) {
1884                                 dev_warn(smu->adev->dev, "CPUSoftMax/CPUSoftMin are not supported, please update SBIOS!\n");
1885                                 break;
1886                         }
1887
1888                         ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMinCclk,
1889                                                               ((smu->cpu_core_id_select << 20)
1890                                                                | smu->cpu_actual_soft_min_freq),
1891                                                               NULL);
1892                         if (ret) {
1893                                 dev_err(smu->adev->dev, "Set hard min cclk failed!");
1894                                 return ret;
1895                         }
1896
1897                         ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxCclk,
1898                                                               ((smu->cpu_core_id_select << 20)
1899                                                                | smu->cpu_actual_soft_max_freq),
1900                                                               NULL);
1901                         if (ret) {
1902                                 dev_err(smu->adev->dev, "Set soft max cclk failed!");
1903                                 return ret;
1904                         }
1905                 }
1906                 break;
1907         default:
1908                 return -ENOSYS;
1909         }
1910
1911         return ret;
1912 }
1913
1914 static int vangogh_set_default_dpm_tables(struct smu_context *smu)
1915 {
1916         struct smu_table_context *smu_table = &smu->smu_table;
1917
1918         return smu_cmn_update_table(smu, SMU_TABLE_DPMCLOCKS, 0, smu_table->clocks_table, false);
1919 }
1920
1921 static int vangogh_set_fine_grain_gfx_freq_parameters(struct smu_context *smu)
1922 {
1923         DpmClocks_t *clk_table = smu->smu_table.clocks_table;
1924
1925         smu->gfx_default_hard_min_freq = clk_table->MinGfxClk;
1926         smu->gfx_default_soft_max_freq = clk_table->MaxGfxClk;
1927         smu->gfx_actual_hard_min_freq = 0;
1928         smu->gfx_actual_soft_max_freq = 0;
1929
1930         smu->cpu_default_soft_min_freq = 1400;
1931         smu->cpu_default_soft_max_freq = 3500;
1932         smu->cpu_actual_soft_min_freq = 0;
1933         smu->cpu_actual_soft_max_freq = 0;
1934
1935         return 0;
1936 }
1937
1938 static int vangogh_get_dpm_clock_table(struct smu_context *smu, struct dpm_clocks *clock_table)
1939 {
1940         DpmClocks_t *table = smu->smu_table.clocks_table;
1941         int i;
1942
1943         if (!clock_table || !table)
1944                 return -EINVAL;
1945
1946         for (i = 0; i < NUM_SOCCLK_DPM_LEVELS; i++) {
1947                 clock_table->SocClocks[i].Freq = table->SocClocks[i];
1948                 clock_table->SocClocks[i].Vol = table->SocVoltage[i];
1949         }
1950
1951         for (i = 0; i < NUM_FCLK_DPM_LEVELS; i++) {
1952                 clock_table->FClocks[i].Freq = table->DfPstateTable[i].fclk;
1953                 clock_table->FClocks[i].Vol = table->DfPstateTable[i].voltage;
1954         }
1955
1956         for (i = 0; i < NUM_FCLK_DPM_LEVELS; i++) {
1957                 clock_table->MemClocks[i].Freq = table->DfPstateTable[i].memclk;
1958                 clock_table->MemClocks[i].Vol = table->DfPstateTable[i].voltage;
1959         }
1960
1961         return 0;
1962 }
1963
1964
1965 static int vangogh_system_features_control(struct smu_context *smu, bool en)
1966 {
1967         struct amdgpu_device *adev = smu->adev;
1968         int ret = 0;
1969
1970         if (adev->pm.fw_version >= 0x43f1700 && !en)
1971                 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_RlcPowerNotify,
1972                                                       RLC_STATUS_OFF, NULL);
1973
1974         return ret;
1975 }
1976
1977 static int vangogh_post_smu_init(struct smu_context *smu)
1978 {
1979         struct amdgpu_device *adev = smu->adev;
1980         uint32_t tmp;
1981         int ret = 0;
1982         uint8_t aon_bits = 0;
1983         /* Two CUs in one WGP */
1984         uint32_t req_active_wgps = adev->gfx.cu_info.number/2;
1985         uint32_t total_cu = adev->gfx.config.max_cu_per_sh *
1986                 adev->gfx.config.max_sh_per_se * adev->gfx.config.max_shader_engines;
1987
1988         /* allow message will be sent after enable message on Vangogh*/
1989         if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_GFXCLK_BIT) &&
1990                         (adev->pg_flags & AMD_PG_SUPPORT_GFX_PG)) {
1991                 ret = smu_cmn_send_smc_msg(smu, SMU_MSG_EnableGfxOff, NULL);
1992                 if (ret) {
1993                         dev_err(adev->dev, "Failed to Enable GfxOff!\n");
1994                         return ret;
1995                 }
1996         } else {
1997                 adev->pm.pp_feature &= ~PP_GFXOFF_MASK;
1998                 dev_info(adev->dev, "If GFX DPM or power gate disabled, disable GFXOFF\n");
1999         }
2000
2001         /* if all CUs are active, no need to power off any WGPs */
2002         if (total_cu == adev->gfx.cu_info.number)
2003                 return 0;
2004
2005         /*
2006          * Calculate the total bits number of always on WGPs for all SA/SEs in
2007          * RLC_PG_ALWAYS_ON_WGP_MASK.
2008          */
2009         tmp = RREG32_KIQ(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_ALWAYS_ON_WGP_MASK));
2010         tmp &= RLC_PG_ALWAYS_ON_WGP_MASK__AON_WGP_MASK_MASK;
2011
2012         aon_bits = hweight32(tmp) * adev->gfx.config.max_sh_per_se * adev->gfx.config.max_shader_engines;
2013
2014         /* Do not request any WGPs less than set in the AON_WGP_MASK */
2015         if (aon_bits > req_active_wgps) {
2016                 dev_info(adev->dev, "Number of always on WGPs greater than active WGPs: WGP power save not requested.\n");
2017                 return 0;
2018         } else {
2019                 return smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_RequestActiveWgp, req_active_wgps, NULL);
2020         }
2021 }
2022
2023 static int vangogh_mode_reset(struct smu_context *smu, int type)
2024 {
2025         int ret = 0, index = 0;
2026
2027         index = smu_cmn_to_asic_specific_index(smu, CMN2ASIC_MAPPING_MSG,
2028                                                SMU_MSG_GfxDeviceDriverReset);
2029         if (index < 0)
2030                 return index == -EACCES ? 0 : index;
2031
2032         mutex_lock(&smu->message_lock);
2033
2034         ret = smu_cmn_send_msg_without_waiting(smu, (uint16_t)index, type);
2035
2036         mutex_unlock(&smu->message_lock);
2037
2038         mdelay(10);
2039
2040         return ret;
2041 }
2042
2043 static int vangogh_mode2_reset(struct smu_context *smu)
2044 {
2045         return vangogh_mode_reset(smu, SMU_RESET_MODE_2);
2046 }
2047
2048 static int vangogh_get_power_limit(struct smu_context *smu,
2049                                    uint32_t *current_power_limit,
2050                                    uint32_t *default_power_limit,
2051                                    uint32_t *max_power_limit)
2052 {
2053         struct smu_11_5_power_context *power_context =
2054                                                                 smu->smu_power.power_context;
2055         uint32_t ppt_limit;
2056         int ret = 0;
2057
2058         if (smu->adev->pm.fw_version < 0x43f1e00)
2059                 return ret;
2060
2061         ret = smu_cmn_send_smc_msg(smu, SMU_MSG_GetSlowPPTLimit, &ppt_limit);
2062         if (ret) {
2063                 dev_err(smu->adev->dev, "Get slow PPT limit failed!\n");
2064                 return ret;
2065         }
2066         /* convert from milliwatt to watt */
2067         if (current_power_limit)
2068                 *current_power_limit = ppt_limit / 1000;
2069         if (default_power_limit)
2070                 *default_power_limit = ppt_limit / 1000;
2071         if (max_power_limit)
2072                 *max_power_limit = 29;
2073
2074         ret = smu_cmn_send_smc_msg(smu, SMU_MSG_GetFastPPTLimit, &ppt_limit);
2075         if (ret) {
2076                 dev_err(smu->adev->dev, "Get fast PPT limit failed!\n");
2077                 return ret;
2078         }
2079         /* convert from milliwatt to watt */
2080         power_context->current_fast_ppt_limit =
2081                         power_context->default_fast_ppt_limit = ppt_limit / 1000;
2082         power_context->max_fast_ppt_limit = 30;
2083
2084         return ret;
2085 }
2086
2087 static int vangogh_get_ppt_limit(struct smu_context *smu,
2088                                                                 uint32_t *ppt_limit,
2089                                                                 enum smu_ppt_limit_type type,
2090                                                                 enum smu_ppt_limit_level level)
2091 {
2092         struct smu_11_5_power_context *power_context =
2093                                                         smu->smu_power.power_context;
2094
2095         if (!power_context)
2096                 return -EOPNOTSUPP;
2097
2098         if (type == SMU_FAST_PPT_LIMIT) {
2099                 switch (level) {
2100                 case SMU_PPT_LIMIT_MAX:
2101                         *ppt_limit = power_context->max_fast_ppt_limit;
2102                         break;
2103                 case SMU_PPT_LIMIT_CURRENT:
2104                         *ppt_limit = power_context->current_fast_ppt_limit;
2105                         break;
2106                 case SMU_PPT_LIMIT_DEFAULT:
2107                         *ppt_limit = power_context->default_fast_ppt_limit;
2108                         break;
2109                 default:
2110                         break;
2111                 }
2112         }
2113
2114         return 0;
2115 }
2116
2117 static int vangogh_set_power_limit(struct smu_context *smu,
2118                                    enum smu_ppt_limit_type limit_type,
2119                                    uint32_t ppt_limit)
2120 {
2121         struct smu_11_5_power_context *power_context =
2122                         smu->smu_power.power_context;
2123         int ret = 0;
2124
2125         if (!smu_cmn_feature_is_enabled(smu, SMU_FEATURE_PPT_BIT)) {
2126                 dev_err(smu->adev->dev, "Setting new power limit is not supported!\n");
2127                 return -EOPNOTSUPP;
2128         }
2129
2130         switch (limit_type) {
2131         case SMU_DEFAULT_PPT_LIMIT:
2132                 ret = smu_cmn_send_smc_msg_with_param(smu,
2133                                 SMU_MSG_SetSlowPPTLimit,
2134                                 ppt_limit * 1000, /* convert from watt to milliwatt */
2135                                 NULL);
2136                 if (ret)
2137                         return ret;
2138
2139                 smu->current_power_limit = ppt_limit;
2140                 break;
2141         case SMU_FAST_PPT_LIMIT:
2142                 ppt_limit &= ~(SMU_FAST_PPT_LIMIT << 24);
2143                 if (ppt_limit > power_context->max_fast_ppt_limit) {
2144                         dev_err(smu->adev->dev,
2145                                 "New power limit (%d) is over the max allowed %d\n",
2146                                 ppt_limit, power_context->max_fast_ppt_limit);
2147                         return ret;
2148                 }
2149
2150                 ret = smu_cmn_send_smc_msg_with_param(smu,
2151                                 SMU_MSG_SetFastPPTLimit,
2152                                 ppt_limit * 1000, /* convert from watt to milliwatt */
2153                                 NULL);
2154                 if (ret)
2155                         return ret;
2156
2157                 power_context->current_fast_ppt_limit = ppt_limit;
2158                 break;
2159         default:
2160                 return -EINVAL;
2161         }
2162
2163         return ret;
2164 }
2165
2166 static const struct pptable_funcs vangogh_ppt_funcs = {
2167
2168         .check_fw_status = smu_v11_0_check_fw_status,
2169         .check_fw_version = smu_v11_0_check_fw_version,
2170         .init_smc_tables = vangogh_init_smc_tables,
2171         .fini_smc_tables = smu_v11_0_fini_smc_tables,
2172         .init_power = smu_v11_0_init_power,
2173         .fini_power = smu_v11_0_fini_power,
2174         .register_irq_handler = smu_v11_0_register_irq_handler,
2175         .notify_memory_pool_location = smu_v11_0_notify_memory_pool_location,
2176         .send_smc_msg_with_param = smu_cmn_send_smc_msg_with_param,
2177         .send_smc_msg = smu_cmn_send_smc_msg,
2178         .dpm_set_vcn_enable = vangogh_dpm_set_vcn_enable,
2179         .dpm_set_jpeg_enable = vangogh_dpm_set_jpeg_enable,
2180         .is_dpm_running = vangogh_is_dpm_running,
2181         .read_sensor = vangogh_read_sensor,
2182         .get_enabled_mask = smu_cmn_get_enabled_mask,
2183         .get_pp_feature_mask = smu_cmn_get_pp_feature_mask,
2184         .set_watermarks_table = vangogh_set_watermarks_table,
2185         .set_driver_table_location = smu_v11_0_set_driver_table_location,
2186         .interrupt_work = smu_v11_0_interrupt_work,
2187         .get_gpu_metrics = vangogh_common_get_gpu_metrics,
2188         .od_edit_dpm_table = vangogh_od_edit_dpm_table,
2189         .print_clk_levels = vangogh_common_print_clk_levels,
2190         .set_default_dpm_table = vangogh_set_default_dpm_tables,
2191         .set_fine_grain_gfx_freq_parameters = vangogh_set_fine_grain_gfx_freq_parameters,
2192         .system_features_control = vangogh_system_features_control,
2193         .feature_is_enabled = smu_cmn_feature_is_enabled,
2194         .set_power_profile_mode = vangogh_set_power_profile_mode,
2195         .get_power_profile_mode = vangogh_get_power_profile_mode,
2196         .get_dpm_clock_table = vangogh_get_dpm_clock_table,
2197         .force_clk_levels = vangogh_force_clk_levels,
2198         .set_performance_level = vangogh_set_performance_level,
2199         .post_init = vangogh_post_smu_init,
2200         .mode2_reset = vangogh_mode2_reset,
2201         .gfx_off_control = smu_v11_0_gfx_off_control,
2202         .get_ppt_limit = vangogh_get_ppt_limit,
2203         .get_power_limit = vangogh_get_power_limit,
2204         .set_power_limit = vangogh_set_power_limit,
2205         .get_vbios_bootup_values = smu_v11_0_get_vbios_bootup_values,
2206 };
2207
2208 void vangogh_set_ppt_funcs(struct smu_context *smu)
2209 {
2210         smu->ppt_funcs = &vangogh_ppt_funcs;
2211         smu->message_map = vangogh_message_map;
2212         smu->feature_map = vangogh_feature_mask_map;
2213         smu->table_map = vangogh_table_map;
2214         smu->workload_map = vangogh_workload_map;
2215         smu->is_apu = true;
2216         smu_v11_0_set_smu_mailbox_registers(smu);
2217 }