Merge drm/drm-next into drm-intel-next-queued
[linux-2.6-block.git] / drivers / gpu / drm / amd / powerplay / hwmgr / smu8_hwmgr.c
1 /*
2  * Copyright 2015 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  */
23 #include "pp_debug.h"
24 #include <linux/types.h>
25 #include <linux/kernel.h>
26 #include <linux/slab.h>
27 #include "atom-types.h"
28 #include "atombios.h"
29 #include "processpptables.h"
30 #include "cgs_common.h"
31 #include "smu/smu_8_0_d.h"
32 #include "smu8_fusion.h"
33 #include "smu/smu_8_0_sh_mask.h"
34 #include "smumgr.h"
35 #include "hwmgr.h"
36 #include "hardwaremanager.h"
37 #include "cz_ppsmc.h"
38 #include "smu8_hwmgr.h"
39 #include "power_state.h"
40 #include "pp_thermal.h"
41
42 #define ixSMUSVI_NB_CURRENTVID 0xD8230044
43 #define CURRENT_NB_VID_MASK 0xff000000
44 #define CURRENT_NB_VID__SHIFT 24
45 #define ixSMUSVI_GFX_CURRENTVID  0xD8230048
46 #define CURRENT_GFX_VID_MASK 0xff000000
47 #define CURRENT_GFX_VID__SHIFT 24
48
49 static const unsigned long smu8_magic = (unsigned long) PHM_Cz_Magic;
50
51 static struct smu8_power_state *cast_smu8_power_state(struct pp_hw_power_state *hw_ps)
52 {
53         if (smu8_magic != hw_ps->magic)
54                 return NULL;
55
56         return (struct smu8_power_state *)hw_ps;
57 }
58
59 static const struct smu8_power_state *cast_const_smu8_power_state(
60                                 const struct pp_hw_power_state *hw_ps)
61 {
62         if (smu8_magic != hw_ps->magic)
63                 return NULL;
64
65         return (struct smu8_power_state *)hw_ps;
66 }
67
68 static uint32_t smu8_get_eclk_level(struct pp_hwmgr *hwmgr,
69                                         uint32_t clock, uint32_t msg)
70 {
71         int i = 0;
72         struct phm_vce_clock_voltage_dependency_table *ptable =
73                 hwmgr->dyn_state.vce_clock_voltage_dependency_table;
74
75         switch (msg) {
76         case PPSMC_MSG_SetEclkSoftMin:
77         case PPSMC_MSG_SetEclkHardMin:
78                 for (i = 0; i < (int)ptable->count; i++) {
79                         if (clock <= ptable->entries[i].ecclk)
80                                 break;
81                 }
82                 break;
83
84         case PPSMC_MSG_SetEclkSoftMax:
85         case PPSMC_MSG_SetEclkHardMax:
86                 for (i = ptable->count - 1; i >= 0; i--) {
87                         if (clock >= ptable->entries[i].ecclk)
88                                 break;
89                 }
90                 break;
91
92         default:
93                 break;
94         }
95
96         return i;
97 }
98
99 static uint32_t smu8_get_sclk_level(struct pp_hwmgr *hwmgr,
100                                 uint32_t clock, uint32_t msg)
101 {
102         int i = 0;
103         struct phm_clock_voltage_dependency_table *table =
104                                 hwmgr->dyn_state.vddc_dependency_on_sclk;
105
106         switch (msg) {
107         case PPSMC_MSG_SetSclkSoftMin:
108         case PPSMC_MSG_SetSclkHardMin:
109                 for (i = 0; i < (int)table->count; i++) {
110                         if (clock <= table->entries[i].clk)
111                                 break;
112                 }
113                 break;
114
115         case PPSMC_MSG_SetSclkSoftMax:
116         case PPSMC_MSG_SetSclkHardMax:
117                 for (i = table->count - 1; i >= 0; i--) {
118                         if (clock >= table->entries[i].clk)
119                                 break;
120                 }
121                 break;
122
123         default:
124                 break;
125         }
126         return i;
127 }
128
129 static uint32_t smu8_get_uvd_level(struct pp_hwmgr *hwmgr,
130                                         uint32_t clock, uint32_t msg)
131 {
132         int i = 0;
133         struct phm_uvd_clock_voltage_dependency_table *ptable =
134                 hwmgr->dyn_state.uvd_clock_voltage_dependency_table;
135
136         switch (msg) {
137         case PPSMC_MSG_SetUvdSoftMin:
138         case PPSMC_MSG_SetUvdHardMin:
139                 for (i = 0; i < (int)ptable->count; i++) {
140                         if (clock <= ptable->entries[i].vclk)
141                                 break;
142                 }
143                 break;
144
145         case PPSMC_MSG_SetUvdSoftMax:
146         case PPSMC_MSG_SetUvdHardMax:
147                 for (i = ptable->count - 1; i >= 0; i--) {
148                         if (clock >= ptable->entries[i].vclk)
149                                 break;
150                 }
151                 break;
152
153         default:
154                 break;
155         }
156
157         return i;
158 }
159
160 static uint32_t smu8_get_max_sclk_level(struct pp_hwmgr *hwmgr)
161 {
162         struct smu8_hwmgr *data = hwmgr->backend;
163
164         if (data->max_sclk_level == 0) {
165                 smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMaxSclkLevel);
166                 data->max_sclk_level = smum_get_argument(hwmgr) + 1;
167         }
168
169         return data->max_sclk_level;
170 }
171
172 static int smu8_initialize_dpm_defaults(struct pp_hwmgr *hwmgr)
173 {
174         struct smu8_hwmgr *data = hwmgr->backend;
175         struct amdgpu_device *adev = hwmgr->adev;
176
177         data->gfx_ramp_step = 256*25/100;
178         data->gfx_ramp_delay = 1; /* by default, we delay 1us */
179
180         data->mgcg_cgtt_local0 = 0x00000000;
181         data->mgcg_cgtt_local1 = 0x00000000;
182         data->clock_slow_down_freq = 25000;
183         data->skip_clock_slow_down = 1;
184         data->enable_nb_ps_policy = 1; /* disable until UNB is ready, Enabled */
185         data->voltage_drop_in_dce_power_gating = 0; /* disable until fully verified */
186         data->voting_rights_clients = 0x00C00033;
187         data->static_screen_threshold = 8;
188         data->ddi_power_gating_disabled = 0;
189         data->bapm_enabled = 1;
190         data->voltage_drop_threshold = 0;
191         data->gfx_power_gating_threshold = 500;
192         data->vce_slow_sclk_threshold = 20000;
193         data->dce_slow_sclk_threshold = 30000;
194         data->disable_driver_thermal_policy = 1;
195         data->disable_nb_ps3_in_battery = 0;
196
197         phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
198                                                         PHM_PlatformCaps_ABM);
199
200         phm_cap_set(hwmgr->platform_descriptor.platformCaps,
201                                     PHM_PlatformCaps_NonABMSupportInPPLib);
202
203         phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
204                                         PHM_PlatformCaps_DynamicM3Arbiter);
205
206         data->override_dynamic_mgpg = 1;
207
208         phm_cap_set(hwmgr->platform_descriptor.platformCaps,
209                                   PHM_PlatformCaps_DynamicPatchPowerState);
210
211         data->thermal_auto_throttling_treshold = 0;
212         data->tdr_clock = 0;
213         data->disable_gfx_power_gating_in_uvd = 0;
214
215         phm_cap_set(hwmgr->platform_descriptor.platformCaps,
216                                         PHM_PlatformCaps_DynamicUVDState);
217
218         phm_cap_set(hwmgr->platform_descriptor.platformCaps,
219                         PHM_PlatformCaps_UVDDPM);
220         phm_cap_set(hwmgr->platform_descriptor.platformCaps,
221                         PHM_PlatformCaps_VCEDPM);
222
223         data->cc6_settings.cpu_cc6_disable = false;
224         data->cc6_settings.cpu_pstate_disable = false;
225         data->cc6_settings.nb_pstate_switch_disable = false;
226         data->cc6_settings.cpu_pstate_separation_time = 0;
227
228         phm_cap_set(hwmgr->platform_descriptor.platformCaps,
229                                    PHM_PlatformCaps_DisableVoltageIsland);
230
231         phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
232                       PHM_PlatformCaps_UVDPowerGating);
233         phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
234                       PHM_PlatformCaps_VCEPowerGating);
235
236         if (adev->pg_flags & AMD_PG_SUPPORT_UVD)
237                 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
238                               PHM_PlatformCaps_UVDPowerGating);
239         if (adev->pg_flags & AMD_PG_SUPPORT_VCE)
240                 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
241                               PHM_PlatformCaps_VCEPowerGating);
242
243
244         return 0;
245 }
246
247 /* convert form 8bit vid to real voltage in mV*4 */
248 static uint32_t smu8_convert_8Bit_index_to_voltage(
249                         struct pp_hwmgr *hwmgr, uint16_t voltage)
250 {
251         return 6200 - (voltage * 25);
252 }
253
254 static int smu8_construct_max_power_limits_table(struct pp_hwmgr *hwmgr,
255                         struct phm_clock_and_voltage_limits *table)
256 {
257         struct smu8_hwmgr *data = hwmgr->backend;
258         struct smu8_sys_info *sys_info = &data->sys_info;
259         struct phm_clock_voltage_dependency_table *dep_table =
260                                 hwmgr->dyn_state.vddc_dependency_on_sclk;
261
262         if (dep_table->count > 0) {
263                 table->sclk = dep_table->entries[dep_table->count-1].clk;
264                 table->vddc = smu8_convert_8Bit_index_to_voltage(hwmgr,
265                    (uint16_t)dep_table->entries[dep_table->count-1].v);
266         }
267         table->mclk = sys_info->nbp_memory_clock[0];
268         return 0;
269 }
270
271 static int smu8_init_dynamic_state_adjustment_rule_settings(
272                         struct pp_hwmgr *hwmgr,
273                         ATOM_CLK_VOLT_CAPABILITY *disp_voltage_table)
274 {
275         struct phm_clock_voltage_dependency_table *table_clk_vlt;
276
277         table_clk_vlt = kzalloc(struct_size(table_clk_vlt, entries, 7),
278                                 GFP_KERNEL);
279
280         if (NULL == table_clk_vlt) {
281                 pr_err("Can not allocate memory!\n");
282                 return -ENOMEM;
283         }
284
285         table_clk_vlt->count = 8;
286         table_clk_vlt->entries[0].clk = PP_DAL_POWERLEVEL_0;
287         table_clk_vlt->entries[0].v = 0;
288         table_clk_vlt->entries[1].clk = PP_DAL_POWERLEVEL_1;
289         table_clk_vlt->entries[1].v = 1;
290         table_clk_vlt->entries[2].clk = PP_DAL_POWERLEVEL_2;
291         table_clk_vlt->entries[2].v = 2;
292         table_clk_vlt->entries[3].clk = PP_DAL_POWERLEVEL_3;
293         table_clk_vlt->entries[3].v = 3;
294         table_clk_vlt->entries[4].clk = PP_DAL_POWERLEVEL_4;
295         table_clk_vlt->entries[4].v = 4;
296         table_clk_vlt->entries[5].clk = PP_DAL_POWERLEVEL_5;
297         table_clk_vlt->entries[5].v = 5;
298         table_clk_vlt->entries[6].clk = PP_DAL_POWERLEVEL_6;
299         table_clk_vlt->entries[6].v = 6;
300         table_clk_vlt->entries[7].clk = PP_DAL_POWERLEVEL_7;
301         table_clk_vlt->entries[7].v = 7;
302         hwmgr->dyn_state.vddc_dep_on_dal_pwrl = table_clk_vlt;
303
304         return 0;
305 }
306
307 static int smu8_get_system_info_data(struct pp_hwmgr *hwmgr)
308 {
309         struct smu8_hwmgr *data = hwmgr->backend;
310         ATOM_INTEGRATED_SYSTEM_INFO_V1_9 *info = NULL;
311         uint32_t i;
312         int result = 0;
313         uint8_t frev, crev;
314         uint16_t size;
315
316         info = (ATOM_INTEGRATED_SYSTEM_INFO_V1_9 *)smu_atom_get_data_table(hwmgr->adev,
317                         GetIndexIntoMasterTable(DATA, IntegratedSystemInfo),
318                         &size, &frev, &crev);
319
320         if (info == NULL) {
321                 pr_err("Could not retrieve the Integrated System Info Table!\n");
322                 return -EINVAL;
323         }
324
325         if (crev != 9) {
326                 pr_err("Unsupported IGP table: %d %d\n", frev, crev);
327                 return -EINVAL;
328         }
329
330         data->sys_info.bootup_uma_clock =
331                                    le32_to_cpu(info->ulBootUpUMAClock);
332
333         data->sys_info.bootup_engine_clock =
334                                 le32_to_cpu(info->ulBootUpEngineClock);
335
336         data->sys_info.dentist_vco_freq =
337                                    le32_to_cpu(info->ulDentistVCOFreq);
338
339         data->sys_info.system_config =
340                                      le32_to_cpu(info->ulSystemConfig);
341
342         data->sys_info.bootup_nb_voltage_index =
343                                   le16_to_cpu(info->usBootUpNBVoltage);
344
345         data->sys_info.htc_hyst_lmt =
346                         (info->ucHtcHystLmt == 0) ? 5 : info->ucHtcHystLmt;
347
348         data->sys_info.htc_tmp_lmt =
349                         (info->ucHtcTmpLmt == 0) ? 203 : info->ucHtcTmpLmt;
350
351         if (data->sys_info.htc_tmp_lmt <=
352                         data->sys_info.htc_hyst_lmt) {
353                 pr_err("The htcTmpLmt should be larger than htcHystLmt.\n");
354                 return -EINVAL;
355         }
356
357         data->sys_info.nb_dpm_enable =
358                                 data->enable_nb_ps_policy &&
359                                 (le32_to_cpu(info->ulSystemConfig) >> 3 & 0x1);
360
361         for (i = 0; i < SMU8_NUM_NBPSTATES; i++) {
362                 if (i < SMU8_NUM_NBPMEMORYCLOCK) {
363                         data->sys_info.nbp_memory_clock[i] =
364                           le32_to_cpu(info->ulNbpStateMemclkFreq[i]);
365                 }
366                 data->sys_info.nbp_n_clock[i] =
367                             le32_to_cpu(info->ulNbpStateNClkFreq[i]);
368         }
369
370         for (i = 0; i < MAX_DISPLAY_CLOCK_LEVEL; i++) {
371                 data->sys_info.display_clock[i] =
372                                         le32_to_cpu(info->sDispClkVoltageMapping[i].ulMaximumSupportedCLK);
373         }
374
375         /* Here use 4 levels, make sure not exceed */
376         for (i = 0; i < SMU8_NUM_NBPSTATES; i++) {
377                 data->sys_info.nbp_voltage_index[i] =
378                              le16_to_cpu(info->usNBPStateVoltage[i]);
379         }
380
381         if (!data->sys_info.nb_dpm_enable) {
382                 for (i = 1; i < SMU8_NUM_NBPSTATES; i++) {
383                         if (i < SMU8_NUM_NBPMEMORYCLOCK) {
384                                 data->sys_info.nbp_memory_clock[i] =
385                                     data->sys_info.nbp_memory_clock[0];
386                         }
387                         data->sys_info.nbp_n_clock[i] =
388                                     data->sys_info.nbp_n_clock[0];
389                         data->sys_info.nbp_voltage_index[i] =
390                                     data->sys_info.nbp_voltage_index[0];
391                 }
392         }
393
394         if (le32_to_cpu(info->ulGPUCapInfo) &
395                 SYS_INFO_GPUCAPS__ENABEL_DFS_BYPASS) {
396                 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
397                                     PHM_PlatformCaps_EnableDFSBypass);
398         }
399
400         data->sys_info.uma_channel_number = info->ucUMAChannelNumber;
401
402         smu8_construct_max_power_limits_table (hwmgr,
403                                     &hwmgr->dyn_state.max_clock_voltage_on_ac);
404
405         smu8_init_dynamic_state_adjustment_rule_settings(hwmgr,
406                                     &info->sDISPCLK_Voltage[0]);
407
408         return result;
409 }
410
411 static int smu8_construct_boot_state(struct pp_hwmgr *hwmgr)
412 {
413         struct smu8_hwmgr *data = hwmgr->backend;
414
415         data->boot_power_level.engineClock =
416                                 data->sys_info.bootup_engine_clock;
417
418         data->boot_power_level.vddcIndex =
419                         (uint8_t)data->sys_info.bootup_nb_voltage_index;
420
421         data->boot_power_level.dsDividerIndex = 0;
422         data->boot_power_level.ssDividerIndex = 0;
423         data->boot_power_level.allowGnbSlow = 1;
424         data->boot_power_level.forceNBPstate = 0;
425         data->boot_power_level.hysteresis_up = 0;
426         data->boot_power_level.numSIMDToPowerDown = 0;
427         data->boot_power_level.display_wm = 0;
428         data->boot_power_level.vce_wm = 0;
429
430         return 0;
431 }
432
433 static int smu8_upload_pptable_to_smu(struct pp_hwmgr *hwmgr)
434 {
435         struct SMU8_Fusion_ClkTable *clock_table;
436         int ret;
437         uint32_t i;
438         void *table = NULL;
439         pp_atomctrl_clock_dividers_kong dividers;
440
441         struct phm_clock_voltage_dependency_table *vddc_table =
442                 hwmgr->dyn_state.vddc_dependency_on_sclk;
443         struct phm_clock_voltage_dependency_table *vdd_gfx_table =
444                 hwmgr->dyn_state.vdd_gfx_dependency_on_sclk;
445         struct phm_acp_clock_voltage_dependency_table *acp_table =
446                 hwmgr->dyn_state.acp_clock_voltage_dependency_table;
447         struct phm_uvd_clock_voltage_dependency_table *uvd_table =
448                 hwmgr->dyn_state.uvd_clock_voltage_dependency_table;
449         struct phm_vce_clock_voltage_dependency_table *vce_table =
450                 hwmgr->dyn_state.vce_clock_voltage_dependency_table;
451
452         if (!hwmgr->need_pp_table_upload)
453                 return 0;
454
455         ret = smum_download_powerplay_table(hwmgr, &table);
456
457         PP_ASSERT_WITH_CODE((0 == ret && NULL != table),
458                             "Fail to get clock table from SMU!", return -EINVAL;);
459
460         clock_table = (struct SMU8_Fusion_ClkTable *)table;
461
462         /* patch clock table */
463         PP_ASSERT_WITH_CODE((vddc_table->count <= SMU8_MAX_HARDWARE_POWERLEVELS),
464                             "Dependency table entry exceeds max limit!", return -EINVAL;);
465         PP_ASSERT_WITH_CODE((vdd_gfx_table->count <= SMU8_MAX_HARDWARE_POWERLEVELS),
466                             "Dependency table entry exceeds max limit!", return -EINVAL;);
467         PP_ASSERT_WITH_CODE((acp_table->count <= SMU8_MAX_HARDWARE_POWERLEVELS),
468                             "Dependency table entry exceeds max limit!", return -EINVAL;);
469         PP_ASSERT_WITH_CODE((uvd_table->count <= SMU8_MAX_HARDWARE_POWERLEVELS),
470                             "Dependency table entry exceeds max limit!", return -EINVAL;);
471         PP_ASSERT_WITH_CODE((vce_table->count <= SMU8_MAX_HARDWARE_POWERLEVELS),
472                             "Dependency table entry exceeds max limit!", return -EINVAL;);
473
474         for (i = 0; i < SMU8_MAX_HARDWARE_POWERLEVELS; i++) {
475
476                 /* vddc_sclk */
477                 clock_table->SclkBreakdownTable.ClkLevel[i].GnbVid =
478                         (i < vddc_table->count) ? (uint8_t)vddc_table->entries[i].v : 0;
479                 clock_table->SclkBreakdownTable.ClkLevel[i].Frequency =
480                         (i < vddc_table->count) ? vddc_table->entries[i].clk : 0;
481
482                 atomctrl_get_engine_pll_dividers_kong(hwmgr,
483                                                       clock_table->SclkBreakdownTable.ClkLevel[i].Frequency,
484                                                       &dividers);
485
486                 clock_table->SclkBreakdownTable.ClkLevel[i].DfsDid =
487                         (uint8_t)dividers.pll_post_divider;
488
489                 /* vddgfx_sclk */
490                 clock_table->SclkBreakdownTable.ClkLevel[i].GfxVid =
491                         (i < vdd_gfx_table->count) ? (uint8_t)vdd_gfx_table->entries[i].v : 0;
492
493                 /* acp breakdown */
494                 clock_table->AclkBreakdownTable.ClkLevel[i].GfxVid =
495                         (i < acp_table->count) ? (uint8_t)acp_table->entries[i].v : 0;
496                 clock_table->AclkBreakdownTable.ClkLevel[i].Frequency =
497                         (i < acp_table->count) ? acp_table->entries[i].acpclk : 0;
498
499                 atomctrl_get_engine_pll_dividers_kong(hwmgr,
500                                                       clock_table->AclkBreakdownTable.ClkLevel[i].Frequency,
501                                                       &dividers);
502
503                 clock_table->AclkBreakdownTable.ClkLevel[i].DfsDid =
504                         (uint8_t)dividers.pll_post_divider;
505
506
507                 /* uvd breakdown */
508                 clock_table->VclkBreakdownTable.ClkLevel[i].GfxVid =
509                         (i < uvd_table->count) ? (uint8_t)uvd_table->entries[i].v : 0;
510                 clock_table->VclkBreakdownTable.ClkLevel[i].Frequency =
511                         (i < uvd_table->count) ? uvd_table->entries[i].vclk : 0;
512
513                 atomctrl_get_engine_pll_dividers_kong(hwmgr,
514                                                       clock_table->VclkBreakdownTable.ClkLevel[i].Frequency,
515                                                       &dividers);
516
517                 clock_table->VclkBreakdownTable.ClkLevel[i].DfsDid =
518                         (uint8_t)dividers.pll_post_divider;
519
520                 clock_table->DclkBreakdownTable.ClkLevel[i].GfxVid =
521                         (i < uvd_table->count) ? (uint8_t)uvd_table->entries[i].v : 0;
522                 clock_table->DclkBreakdownTable.ClkLevel[i].Frequency =
523                         (i < uvd_table->count) ? uvd_table->entries[i].dclk : 0;
524
525                 atomctrl_get_engine_pll_dividers_kong(hwmgr,
526                                                       clock_table->DclkBreakdownTable.ClkLevel[i].Frequency,
527                                                       &dividers);
528
529                 clock_table->DclkBreakdownTable.ClkLevel[i].DfsDid =
530                         (uint8_t)dividers.pll_post_divider;
531
532                 /* vce breakdown */
533                 clock_table->EclkBreakdownTable.ClkLevel[i].GfxVid =
534                         (i < vce_table->count) ? (uint8_t)vce_table->entries[i].v : 0;
535                 clock_table->EclkBreakdownTable.ClkLevel[i].Frequency =
536                         (i < vce_table->count) ? vce_table->entries[i].ecclk : 0;
537
538
539                 atomctrl_get_engine_pll_dividers_kong(hwmgr,
540                                                       clock_table->EclkBreakdownTable.ClkLevel[i].Frequency,
541                                                       &dividers);
542
543                 clock_table->EclkBreakdownTable.ClkLevel[i].DfsDid =
544                         (uint8_t)dividers.pll_post_divider;
545
546         }
547         ret = smum_upload_powerplay_table(hwmgr);
548
549         return ret;
550 }
551
552 static int smu8_init_sclk_limit(struct pp_hwmgr *hwmgr)
553 {
554         struct smu8_hwmgr *data = hwmgr->backend;
555         struct phm_clock_voltage_dependency_table *table =
556                                         hwmgr->dyn_state.vddc_dependency_on_sclk;
557         unsigned long clock = 0, level;
558
559         if (NULL == table || table->count <= 0)
560                 return -EINVAL;
561
562         data->sclk_dpm.soft_min_clk = table->entries[0].clk;
563         data->sclk_dpm.hard_min_clk = table->entries[0].clk;
564
565         level = smu8_get_max_sclk_level(hwmgr) - 1;
566
567         if (level < table->count)
568                 clock = table->entries[level].clk;
569         else
570                 clock = table->entries[table->count - 1].clk;
571
572         data->sclk_dpm.soft_max_clk = clock;
573         data->sclk_dpm.hard_max_clk = clock;
574
575         return 0;
576 }
577
578 static int smu8_init_uvd_limit(struct pp_hwmgr *hwmgr)
579 {
580         struct smu8_hwmgr *data = hwmgr->backend;
581         struct phm_uvd_clock_voltage_dependency_table *table =
582                                 hwmgr->dyn_state.uvd_clock_voltage_dependency_table;
583         unsigned long clock = 0, level;
584
585         if (NULL == table || table->count <= 0)
586                 return -EINVAL;
587
588         data->uvd_dpm.soft_min_clk = 0;
589         data->uvd_dpm.hard_min_clk = 0;
590
591         smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMaxUvdLevel);
592         level = smum_get_argument(hwmgr);
593
594         if (level < table->count)
595                 clock = table->entries[level].vclk;
596         else
597                 clock = table->entries[table->count - 1].vclk;
598
599         data->uvd_dpm.soft_max_clk = clock;
600         data->uvd_dpm.hard_max_clk = clock;
601
602         return 0;
603 }
604
605 static int smu8_init_vce_limit(struct pp_hwmgr *hwmgr)
606 {
607         struct smu8_hwmgr *data = hwmgr->backend;
608         struct phm_vce_clock_voltage_dependency_table *table =
609                                 hwmgr->dyn_state.vce_clock_voltage_dependency_table;
610         unsigned long clock = 0, level;
611
612         if (NULL == table || table->count <= 0)
613                 return -EINVAL;
614
615         data->vce_dpm.soft_min_clk = 0;
616         data->vce_dpm.hard_min_clk = 0;
617
618         smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMaxEclkLevel);
619         level = smum_get_argument(hwmgr);
620
621         if (level < table->count)
622                 clock = table->entries[level].ecclk;
623         else
624                 clock = table->entries[table->count - 1].ecclk;
625
626         data->vce_dpm.soft_max_clk = clock;
627         data->vce_dpm.hard_max_clk = clock;
628
629         return 0;
630 }
631
632 static int smu8_init_acp_limit(struct pp_hwmgr *hwmgr)
633 {
634         struct smu8_hwmgr *data = hwmgr->backend;
635         struct phm_acp_clock_voltage_dependency_table *table =
636                                 hwmgr->dyn_state.acp_clock_voltage_dependency_table;
637         unsigned long clock = 0, level;
638
639         if (NULL == table || table->count <= 0)
640                 return -EINVAL;
641
642         data->acp_dpm.soft_min_clk = 0;
643         data->acp_dpm.hard_min_clk = 0;
644
645         smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMaxAclkLevel);
646         level = smum_get_argument(hwmgr);
647
648         if (level < table->count)
649                 clock = table->entries[level].acpclk;
650         else
651                 clock = table->entries[table->count - 1].acpclk;
652
653         data->acp_dpm.soft_max_clk = clock;
654         data->acp_dpm.hard_max_clk = clock;
655         return 0;
656 }
657
658 static void smu8_init_power_gate_state(struct pp_hwmgr *hwmgr)
659 {
660         struct smu8_hwmgr *data = hwmgr->backend;
661
662         data->uvd_power_gated = false;
663         data->vce_power_gated = false;
664         data->samu_power_gated = false;
665 #ifdef CONFIG_DRM_AMD_ACP
666         data->acp_power_gated = false;
667 #else
668         smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ACPPowerOFF);
669         data->acp_power_gated = true;
670 #endif
671
672 }
673
674 static void smu8_init_sclk_threshold(struct pp_hwmgr *hwmgr)
675 {
676         struct smu8_hwmgr *data = hwmgr->backend;
677
678         data->low_sclk_interrupt_threshold = 0;
679 }
680
681 static int smu8_update_sclk_limit(struct pp_hwmgr *hwmgr)
682 {
683         struct smu8_hwmgr *data = hwmgr->backend;
684         struct phm_clock_voltage_dependency_table *table =
685                                         hwmgr->dyn_state.vddc_dependency_on_sclk;
686
687         unsigned long clock = 0;
688         unsigned long level;
689         unsigned long stable_pstate_sclk;
690         unsigned long percentage;
691
692         data->sclk_dpm.soft_min_clk = table->entries[0].clk;
693         level = smu8_get_max_sclk_level(hwmgr) - 1;
694
695         if (level < table->count)
696                 data->sclk_dpm.soft_max_clk  = table->entries[level].clk;
697         else
698                 data->sclk_dpm.soft_max_clk  = table->entries[table->count - 1].clk;
699
700         clock = hwmgr->display_config->min_core_set_clock;
701         if (clock == 0)
702                 pr_debug("min_core_set_clock not set\n");
703
704         if (data->sclk_dpm.hard_min_clk != clock) {
705                 data->sclk_dpm.hard_min_clk = clock;
706
707                 smum_send_msg_to_smc_with_parameter(hwmgr,
708                                                 PPSMC_MSG_SetSclkHardMin,
709                                                  smu8_get_sclk_level(hwmgr,
710                                         data->sclk_dpm.hard_min_clk,
711                                              PPSMC_MSG_SetSclkHardMin));
712         }
713
714         clock = data->sclk_dpm.soft_min_clk;
715
716         /* update minimum clocks for Stable P-State feature */
717         if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
718                                      PHM_PlatformCaps_StablePState)) {
719                 percentage = 75;
720                 /*Sclk - calculate sclk value based on percentage and find FLOOR sclk from VddcDependencyOnSCLK table  */
721                 stable_pstate_sclk = (hwmgr->dyn_state.max_clock_voltage_on_ac.mclk *
722                                         percentage) / 100;
723
724                 if (clock < stable_pstate_sclk)
725                         clock = stable_pstate_sclk;
726         }
727
728         if (data->sclk_dpm.soft_min_clk != clock) {
729                 data->sclk_dpm.soft_min_clk = clock;
730                 smum_send_msg_to_smc_with_parameter(hwmgr,
731                                                 PPSMC_MSG_SetSclkSoftMin,
732                                                 smu8_get_sclk_level(hwmgr,
733                                         data->sclk_dpm.soft_min_clk,
734                                              PPSMC_MSG_SetSclkSoftMin));
735         }
736
737         if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
738                                     PHM_PlatformCaps_StablePState) &&
739                          data->sclk_dpm.soft_max_clk != clock) {
740                 data->sclk_dpm.soft_max_clk = clock;
741                 smum_send_msg_to_smc_with_parameter(hwmgr,
742                                                 PPSMC_MSG_SetSclkSoftMax,
743                                                 smu8_get_sclk_level(hwmgr,
744                                         data->sclk_dpm.soft_max_clk,
745                                         PPSMC_MSG_SetSclkSoftMax));
746         }
747
748         return 0;
749 }
750
751 static int smu8_set_deep_sleep_sclk_threshold(struct pp_hwmgr *hwmgr)
752 {
753         if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
754                                 PHM_PlatformCaps_SclkDeepSleep)) {
755                 uint32_t clks = hwmgr->display_config->min_core_set_clock_in_sr;
756                 if (clks == 0)
757                         clks = SMU8_MIN_DEEP_SLEEP_SCLK;
758
759                 PP_DBG_LOG("Setting Deep Sleep Clock: %d\n", clks);
760
761                 smum_send_msg_to_smc_with_parameter(hwmgr,
762                                 PPSMC_MSG_SetMinDeepSleepSclk,
763                                 clks);
764         }
765
766         return 0;
767 }
768
769 static int smu8_set_watermark_threshold(struct pp_hwmgr *hwmgr)
770 {
771         struct smu8_hwmgr *data =
772                                   hwmgr->backend;
773
774         smum_send_msg_to_smc_with_parameter(hwmgr,
775                                         PPSMC_MSG_SetWatermarkFrequency,
776                                         data->sclk_dpm.soft_max_clk);
777
778         return 0;
779 }
780
781 static int smu8_nbdpm_pstate_enable_disable(struct pp_hwmgr *hwmgr, bool enable, bool lock)
782 {
783         struct smu8_hwmgr *hw_data = hwmgr->backend;
784
785         if (hw_data->is_nb_dpm_enabled) {
786                 if (enable) {
787                         PP_DBG_LOG("enable Low Memory PState.\n");
788
789                         return smum_send_msg_to_smc_with_parameter(hwmgr,
790                                                 PPSMC_MSG_EnableLowMemoryPstate,
791                                                 (lock ? 1 : 0));
792                 } else {
793                         PP_DBG_LOG("disable Low Memory PState.\n");
794
795                         return smum_send_msg_to_smc_with_parameter(hwmgr,
796                                                 PPSMC_MSG_DisableLowMemoryPstate,
797                                                 (lock ? 1 : 0));
798                 }
799         }
800
801         return 0;
802 }
803
804 static int smu8_disable_nb_dpm(struct pp_hwmgr *hwmgr)
805 {
806         int ret = 0;
807
808         struct smu8_hwmgr *data = hwmgr->backend;
809         unsigned long dpm_features = 0;
810
811         if (data->is_nb_dpm_enabled) {
812                 smu8_nbdpm_pstate_enable_disable(hwmgr, true, true);
813                 dpm_features |= NB_DPM_MASK;
814                 ret = smum_send_msg_to_smc_with_parameter(
815                                                           hwmgr,
816                                                           PPSMC_MSG_DisableAllSmuFeatures,
817                                                           dpm_features);
818                 if (ret == 0)
819                         data->is_nb_dpm_enabled = false;
820         }
821
822         return ret;
823 }
824
825 static int smu8_enable_nb_dpm(struct pp_hwmgr *hwmgr)
826 {
827         int ret = 0;
828
829         struct smu8_hwmgr *data = hwmgr->backend;
830         unsigned long dpm_features = 0;
831
832         if (!data->is_nb_dpm_enabled) {
833                 PP_DBG_LOG("enabling ALL SMU features.\n");
834                 dpm_features |= NB_DPM_MASK;
835                 ret = smum_send_msg_to_smc_with_parameter(
836                                                           hwmgr,
837                                                           PPSMC_MSG_EnableAllSmuFeatures,
838                                                           dpm_features);
839                 if (ret == 0)
840                         data->is_nb_dpm_enabled = true;
841         }
842
843         return ret;
844 }
845
846 static int smu8_update_low_mem_pstate(struct pp_hwmgr *hwmgr, const void *input)
847 {
848         bool disable_switch;
849         bool enable_low_mem_state;
850         struct smu8_hwmgr *hw_data = hwmgr->backend;
851         const struct phm_set_power_state_input *states = (struct phm_set_power_state_input *)input;
852         const struct smu8_power_state *pnew_state = cast_const_smu8_power_state(states->pnew_state);
853
854         if (hw_data->sys_info.nb_dpm_enable) {
855                 disable_switch = hw_data->cc6_settings.nb_pstate_switch_disable ? true : false;
856                 enable_low_mem_state = hw_data->cc6_settings.nb_pstate_switch_disable ? false : true;
857
858                 if (pnew_state->action == FORCE_HIGH)
859                         smu8_nbdpm_pstate_enable_disable(hwmgr, false, disable_switch);
860                 else if (pnew_state->action == CANCEL_FORCE_HIGH)
861                         smu8_nbdpm_pstate_enable_disable(hwmgr, true, disable_switch);
862                 else
863                         smu8_nbdpm_pstate_enable_disable(hwmgr, enable_low_mem_state, disable_switch);
864         }
865         return 0;
866 }
867
868 static int smu8_set_power_state_tasks(struct pp_hwmgr *hwmgr, const void *input)
869 {
870         int ret = 0;
871
872         smu8_update_sclk_limit(hwmgr);
873         smu8_set_deep_sleep_sclk_threshold(hwmgr);
874         smu8_set_watermark_threshold(hwmgr);
875         ret = smu8_enable_nb_dpm(hwmgr);
876         if (ret)
877                 return ret;
878         smu8_update_low_mem_pstate(hwmgr, input);
879
880         return 0;
881 }
882
883
884 static int smu8_setup_asic_task(struct pp_hwmgr *hwmgr)
885 {
886         int ret;
887
888         ret = smu8_upload_pptable_to_smu(hwmgr);
889         if (ret)
890                 return ret;
891         ret = smu8_init_sclk_limit(hwmgr);
892         if (ret)
893                 return ret;
894         ret = smu8_init_uvd_limit(hwmgr);
895         if (ret)
896                 return ret;
897         ret = smu8_init_vce_limit(hwmgr);
898         if (ret)
899                 return ret;
900         ret = smu8_init_acp_limit(hwmgr);
901         if (ret)
902                 return ret;
903
904         smu8_init_power_gate_state(hwmgr);
905         smu8_init_sclk_threshold(hwmgr);
906
907         return 0;
908 }
909
910 static void smu8_power_up_display_clock_sys_pll(struct pp_hwmgr *hwmgr)
911 {
912         struct smu8_hwmgr *hw_data = hwmgr->backend;
913
914         hw_data->disp_clk_bypass_pending = false;
915         hw_data->disp_clk_bypass = false;
916 }
917
918 static void smu8_clear_nb_dpm_flag(struct pp_hwmgr *hwmgr)
919 {
920         struct smu8_hwmgr *hw_data = hwmgr->backend;
921
922         hw_data->is_nb_dpm_enabled = false;
923 }
924
925 static void smu8_reset_cc6_data(struct pp_hwmgr *hwmgr)
926 {
927         struct smu8_hwmgr *hw_data = hwmgr->backend;
928
929         hw_data->cc6_settings.cc6_setting_changed = false;
930         hw_data->cc6_settings.cpu_pstate_separation_time = 0;
931         hw_data->cc6_settings.cpu_cc6_disable = false;
932         hw_data->cc6_settings.cpu_pstate_disable = false;
933 }
934
935 static void smu8_program_voting_clients(struct pp_hwmgr *hwmgr)
936 {
937         cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
938                                 ixCG_FREQ_TRAN_VOTING_0,
939                                 SMU8_VOTINGRIGHTSCLIENTS_DFLT0);
940 }
941
942 static void smu8_clear_voting_clients(struct pp_hwmgr *hwmgr)
943 {
944         cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
945                                 ixCG_FREQ_TRAN_VOTING_0, 0);
946 }
947
948 static int smu8_start_dpm(struct pp_hwmgr *hwmgr)
949 {
950         struct smu8_hwmgr *data = hwmgr->backend;
951
952         data->dpm_flags |= DPMFlags_SCLK_Enabled;
953
954         return smum_send_msg_to_smc_with_parameter(hwmgr,
955                                 PPSMC_MSG_EnableAllSmuFeatures,
956                                 SCLK_DPM_MASK);
957 }
958
959 static int smu8_stop_dpm(struct pp_hwmgr *hwmgr)
960 {
961         int ret = 0;
962         struct smu8_hwmgr *data = hwmgr->backend;
963         unsigned long dpm_features = 0;
964
965         if (data->dpm_flags & DPMFlags_SCLK_Enabled) {
966                 dpm_features |= SCLK_DPM_MASK;
967                 data->dpm_flags &= ~DPMFlags_SCLK_Enabled;
968                 ret = smum_send_msg_to_smc_with_parameter(hwmgr,
969                                         PPSMC_MSG_DisableAllSmuFeatures,
970                                         dpm_features);
971         }
972         return ret;
973 }
974
975 static int smu8_program_bootup_state(struct pp_hwmgr *hwmgr)
976 {
977         struct smu8_hwmgr *data = hwmgr->backend;
978
979         data->sclk_dpm.soft_min_clk = data->sys_info.bootup_engine_clock;
980         data->sclk_dpm.soft_max_clk = data->sys_info.bootup_engine_clock;
981
982         smum_send_msg_to_smc_with_parameter(hwmgr,
983                                 PPSMC_MSG_SetSclkSoftMin,
984                                 smu8_get_sclk_level(hwmgr,
985                                 data->sclk_dpm.soft_min_clk,
986                                 PPSMC_MSG_SetSclkSoftMin));
987
988         smum_send_msg_to_smc_with_parameter(hwmgr,
989                                 PPSMC_MSG_SetSclkSoftMax,
990                                 smu8_get_sclk_level(hwmgr,
991                                 data->sclk_dpm.soft_max_clk,
992                                 PPSMC_MSG_SetSclkSoftMax));
993
994         return 0;
995 }
996
997 static void smu8_reset_acp_boot_level(struct pp_hwmgr *hwmgr)
998 {
999         struct smu8_hwmgr *data = hwmgr->backend;
1000
1001         data->acp_boot_level = 0xff;
1002 }
1003
1004 static int smu8_enable_dpm_tasks(struct pp_hwmgr *hwmgr)
1005 {
1006         smu8_program_voting_clients(hwmgr);
1007         if (smu8_start_dpm(hwmgr))
1008                 return -EINVAL;
1009         smu8_program_bootup_state(hwmgr);
1010         smu8_reset_acp_boot_level(hwmgr);
1011
1012         return 0;
1013 }
1014
1015 static int smu8_disable_dpm_tasks(struct pp_hwmgr *hwmgr)
1016 {
1017         smu8_disable_nb_dpm(hwmgr);
1018
1019         smu8_clear_voting_clients(hwmgr);
1020         if (smu8_stop_dpm(hwmgr))
1021                 return -EINVAL;
1022
1023         return 0;
1024 }
1025
1026 static int smu8_power_off_asic(struct pp_hwmgr *hwmgr)
1027 {
1028         smu8_disable_dpm_tasks(hwmgr);
1029         smu8_power_up_display_clock_sys_pll(hwmgr);
1030         smu8_clear_nb_dpm_flag(hwmgr);
1031         smu8_reset_cc6_data(hwmgr);
1032         return 0;
1033 }
1034
1035 static int smu8_apply_state_adjust_rules(struct pp_hwmgr *hwmgr,
1036                                 struct pp_power_state  *prequest_ps,
1037                         const struct pp_power_state *pcurrent_ps)
1038 {
1039         struct smu8_power_state *smu8_ps =
1040                                 cast_smu8_power_state(&prequest_ps->hardware);
1041
1042         const struct smu8_power_state *smu8_current_ps =
1043                                 cast_const_smu8_power_state(&pcurrent_ps->hardware);
1044
1045         struct smu8_hwmgr *data = hwmgr->backend;
1046         struct PP_Clocks clocks = {0, 0, 0, 0};
1047         bool force_high;
1048
1049         smu8_ps->need_dfs_bypass = true;
1050
1051         data->battery_state = (PP_StateUILabel_Battery == prequest_ps->classification.ui_label);
1052
1053         clocks.memoryClock = hwmgr->display_config->min_mem_set_clock != 0 ?
1054                                 hwmgr->display_config->min_mem_set_clock :
1055                                 data->sys_info.nbp_memory_clock[1];
1056
1057
1058         if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_StablePState))
1059                 clocks.memoryClock = hwmgr->dyn_state.max_clock_voltage_on_ac.mclk;
1060
1061         force_high = (clocks.memoryClock > data->sys_info.nbp_memory_clock[SMU8_NUM_NBPMEMORYCLOCK - 1])
1062                         || (hwmgr->display_config->num_display >= 3);
1063
1064         smu8_ps->action = smu8_current_ps->action;
1065
1066         if (hwmgr->request_dpm_level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK)
1067                 smu8_nbdpm_pstate_enable_disable(hwmgr, false, false);
1068         else if (hwmgr->request_dpm_level == AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD)
1069                 smu8_nbdpm_pstate_enable_disable(hwmgr, false, true);
1070         else if (!force_high && (smu8_ps->action == FORCE_HIGH))
1071                 smu8_ps->action = CANCEL_FORCE_HIGH;
1072         else if (force_high && (smu8_ps->action != FORCE_HIGH))
1073                 smu8_ps->action = FORCE_HIGH;
1074         else
1075                 smu8_ps->action = DO_NOTHING;
1076
1077         return 0;
1078 }
1079
1080 static int smu8_hwmgr_backend_init(struct pp_hwmgr *hwmgr)
1081 {
1082         int result = 0;
1083         struct smu8_hwmgr *data;
1084
1085         data = kzalloc(sizeof(struct smu8_hwmgr), GFP_KERNEL);
1086         if (data == NULL)
1087                 return -ENOMEM;
1088
1089         hwmgr->backend = data;
1090
1091         result = smu8_initialize_dpm_defaults(hwmgr);
1092         if (result != 0) {
1093                 pr_err("smu8_initialize_dpm_defaults failed\n");
1094                 return result;
1095         }
1096
1097         result = smu8_get_system_info_data(hwmgr);
1098         if (result != 0) {
1099                 pr_err("smu8_get_system_info_data failed\n");
1100                 return result;
1101         }
1102
1103         smu8_construct_boot_state(hwmgr);
1104
1105         hwmgr->platform_descriptor.hardwareActivityPerformanceLevels =  SMU8_MAX_HARDWARE_POWERLEVELS;
1106
1107         return result;
1108 }
1109
1110 static int smu8_hwmgr_backend_fini(struct pp_hwmgr *hwmgr)
1111 {
1112         if (hwmgr != NULL) {
1113                 kfree(hwmgr->dyn_state.vddc_dep_on_dal_pwrl);
1114                 hwmgr->dyn_state.vddc_dep_on_dal_pwrl = NULL;
1115
1116                 kfree(hwmgr->backend);
1117                 hwmgr->backend = NULL;
1118         }
1119         return 0;
1120 }
1121
1122 static int smu8_phm_force_dpm_highest(struct pp_hwmgr *hwmgr)
1123 {
1124         struct smu8_hwmgr *data = hwmgr->backend;
1125
1126         smum_send_msg_to_smc_with_parameter(hwmgr,
1127                                         PPSMC_MSG_SetSclkSoftMin,
1128                                         smu8_get_sclk_level(hwmgr,
1129                                         data->sclk_dpm.soft_max_clk,
1130                                         PPSMC_MSG_SetSclkSoftMin));
1131
1132         smum_send_msg_to_smc_with_parameter(hwmgr,
1133                                 PPSMC_MSG_SetSclkSoftMax,
1134                                 smu8_get_sclk_level(hwmgr,
1135                                 data->sclk_dpm.soft_max_clk,
1136                                 PPSMC_MSG_SetSclkSoftMax));
1137
1138         return 0;
1139 }
1140
1141 static int smu8_phm_unforce_dpm_levels(struct pp_hwmgr *hwmgr)
1142 {
1143         struct smu8_hwmgr *data = hwmgr->backend;
1144         struct phm_clock_voltage_dependency_table *table =
1145                                 hwmgr->dyn_state.vddc_dependency_on_sclk;
1146         unsigned long clock = 0, level;
1147
1148         if (NULL == table || table->count <= 0)
1149                 return -EINVAL;
1150
1151         data->sclk_dpm.soft_min_clk = table->entries[0].clk;
1152         data->sclk_dpm.hard_min_clk = table->entries[0].clk;
1153         hwmgr->pstate_sclk = table->entries[0].clk;
1154         hwmgr->pstate_mclk = 0;
1155
1156         level = smu8_get_max_sclk_level(hwmgr) - 1;
1157
1158         if (level < table->count)
1159                 clock = table->entries[level].clk;
1160         else
1161                 clock = table->entries[table->count - 1].clk;
1162
1163         data->sclk_dpm.soft_max_clk = clock;
1164         data->sclk_dpm.hard_max_clk = clock;
1165
1166         smum_send_msg_to_smc_with_parameter(hwmgr,
1167                                 PPSMC_MSG_SetSclkSoftMin,
1168                                 smu8_get_sclk_level(hwmgr,
1169                                 data->sclk_dpm.soft_min_clk,
1170                                 PPSMC_MSG_SetSclkSoftMin));
1171
1172         smum_send_msg_to_smc_with_parameter(hwmgr,
1173                                 PPSMC_MSG_SetSclkSoftMax,
1174                                 smu8_get_sclk_level(hwmgr,
1175                                 data->sclk_dpm.soft_max_clk,
1176                                 PPSMC_MSG_SetSclkSoftMax));
1177
1178         return 0;
1179 }
1180
1181 static int smu8_phm_force_dpm_lowest(struct pp_hwmgr *hwmgr)
1182 {
1183         struct smu8_hwmgr *data = hwmgr->backend;
1184
1185         smum_send_msg_to_smc_with_parameter(hwmgr,
1186                         PPSMC_MSG_SetSclkSoftMax,
1187                         smu8_get_sclk_level(hwmgr,
1188                         data->sclk_dpm.soft_min_clk,
1189                         PPSMC_MSG_SetSclkSoftMax));
1190
1191         smum_send_msg_to_smc_with_parameter(hwmgr,
1192                                 PPSMC_MSG_SetSclkSoftMin,
1193                                 smu8_get_sclk_level(hwmgr,
1194                                 data->sclk_dpm.soft_min_clk,
1195                                 PPSMC_MSG_SetSclkSoftMin));
1196
1197         return 0;
1198 }
1199
1200 static int smu8_dpm_force_dpm_level(struct pp_hwmgr *hwmgr,
1201                                 enum amd_dpm_forced_level level)
1202 {
1203         int ret = 0;
1204
1205         switch (level) {
1206         case AMD_DPM_FORCED_LEVEL_HIGH:
1207         case AMD_DPM_FORCED_LEVEL_PROFILE_PEAK:
1208                 ret = smu8_phm_force_dpm_highest(hwmgr);
1209                 break;
1210         case AMD_DPM_FORCED_LEVEL_LOW:
1211         case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK:
1212         case AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD:
1213                 ret = smu8_phm_force_dpm_lowest(hwmgr);
1214                 break;
1215         case AMD_DPM_FORCED_LEVEL_AUTO:
1216                 ret = smu8_phm_unforce_dpm_levels(hwmgr);
1217                 break;
1218         case AMD_DPM_FORCED_LEVEL_MANUAL:
1219         case AMD_DPM_FORCED_LEVEL_PROFILE_EXIT:
1220         default:
1221                 break;
1222         }
1223
1224         return ret;
1225 }
1226
1227 static int smu8_dpm_powerdown_uvd(struct pp_hwmgr *hwmgr)
1228 {
1229         if (PP_CAP(PHM_PlatformCaps_UVDPowerGating))
1230                 return smum_send_msg_to_smc(hwmgr, PPSMC_MSG_UVDPowerOFF);
1231         return 0;
1232 }
1233
1234 static int smu8_dpm_powerup_uvd(struct pp_hwmgr *hwmgr)
1235 {
1236         if (PP_CAP(PHM_PlatformCaps_UVDPowerGating)) {
1237                 return smum_send_msg_to_smc_with_parameter(
1238                         hwmgr,
1239                         PPSMC_MSG_UVDPowerON,
1240                         PP_CAP(PHM_PlatformCaps_UVDDynamicPowerGating) ? 1 : 0);
1241         }
1242
1243         return 0;
1244 }
1245
1246 static int  smu8_dpm_update_vce_dpm(struct pp_hwmgr *hwmgr)
1247 {
1248         struct smu8_hwmgr *data = hwmgr->backend;
1249         struct phm_vce_clock_voltage_dependency_table *ptable =
1250                 hwmgr->dyn_state.vce_clock_voltage_dependency_table;
1251
1252         /* Stable Pstate is enabled and we need to set the VCE DPM to highest level */
1253         if (PP_CAP(PHM_PlatformCaps_StablePState) ||
1254             hwmgr->en_umd_pstate) {
1255                 data->vce_dpm.hard_min_clk =
1256                                   ptable->entries[ptable->count - 1].ecclk;
1257
1258                 smum_send_msg_to_smc_with_parameter(hwmgr,
1259                         PPSMC_MSG_SetEclkHardMin,
1260                         smu8_get_eclk_level(hwmgr,
1261                                 data->vce_dpm.hard_min_clk,
1262                                 PPSMC_MSG_SetEclkHardMin));
1263         } else {
1264
1265                 smum_send_msg_to_smc_with_parameter(hwmgr,
1266                                         PPSMC_MSG_SetEclkHardMin, 0);
1267                 /* disable ECLK DPM 0. Otherwise VCE could hang if
1268                  * switching SCLK from DPM 0 to 6/7 */
1269                 smum_send_msg_to_smc_with_parameter(hwmgr,
1270                                         PPSMC_MSG_SetEclkSoftMin, 1);
1271         }
1272         return 0;
1273 }
1274
1275 static int smu8_dpm_powerdown_vce(struct pp_hwmgr *hwmgr)
1276 {
1277         if (PP_CAP(PHM_PlatformCaps_VCEPowerGating))
1278                 return smum_send_msg_to_smc(hwmgr,
1279                                                      PPSMC_MSG_VCEPowerOFF);
1280         return 0;
1281 }
1282
1283 static int smu8_dpm_powerup_vce(struct pp_hwmgr *hwmgr)
1284 {
1285         if (PP_CAP(PHM_PlatformCaps_VCEPowerGating))
1286                 return smum_send_msg_to_smc(hwmgr,
1287                                                      PPSMC_MSG_VCEPowerON);
1288         return 0;
1289 }
1290
1291 static uint32_t smu8_dpm_get_mclk(struct pp_hwmgr *hwmgr, bool low)
1292 {
1293         struct smu8_hwmgr *data = hwmgr->backend;
1294
1295         return data->sys_info.bootup_uma_clock;
1296 }
1297
1298 static uint32_t smu8_dpm_get_sclk(struct pp_hwmgr *hwmgr, bool low)
1299 {
1300         struct pp_power_state  *ps;
1301         struct smu8_power_state  *smu8_ps;
1302
1303         if (hwmgr == NULL)
1304                 return -EINVAL;
1305
1306         ps = hwmgr->request_ps;
1307
1308         if (ps == NULL)
1309                 return -EINVAL;
1310
1311         smu8_ps = cast_smu8_power_state(&ps->hardware);
1312
1313         if (low)
1314                 return smu8_ps->levels[0].engineClock;
1315         else
1316                 return smu8_ps->levels[smu8_ps->level-1].engineClock;
1317 }
1318
1319 static int smu8_dpm_patch_boot_state(struct pp_hwmgr *hwmgr,
1320                                         struct pp_hw_power_state *hw_ps)
1321 {
1322         struct smu8_hwmgr *data = hwmgr->backend;
1323         struct smu8_power_state *smu8_ps = cast_smu8_power_state(hw_ps);
1324
1325         smu8_ps->level = 1;
1326         smu8_ps->nbps_flags = 0;
1327         smu8_ps->bapm_flags = 0;
1328         smu8_ps->levels[0] = data->boot_power_level;
1329
1330         return 0;
1331 }
1332
1333 static int smu8_dpm_get_pp_table_entry_callback(
1334                                                      struct pp_hwmgr *hwmgr,
1335                                            struct pp_hw_power_state *hw_ps,
1336                                                           unsigned int index,
1337                                                      const void *clock_info)
1338 {
1339         struct smu8_power_state *smu8_ps = cast_smu8_power_state(hw_ps);
1340
1341         const ATOM_PPLIB_CZ_CLOCK_INFO *smu8_clock_info = clock_info;
1342
1343         struct phm_clock_voltage_dependency_table *table =
1344                                     hwmgr->dyn_state.vddc_dependency_on_sclk;
1345         uint8_t clock_info_index = smu8_clock_info->index;
1346
1347         if (clock_info_index > (uint8_t)(hwmgr->platform_descriptor.hardwareActivityPerformanceLevels - 1))
1348                 clock_info_index = (uint8_t)(hwmgr->platform_descriptor.hardwareActivityPerformanceLevels - 1);
1349
1350         smu8_ps->levels[index].engineClock = table->entries[clock_info_index].clk;
1351         smu8_ps->levels[index].vddcIndex = (uint8_t)table->entries[clock_info_index].v;
1352
1353         smu8_ps->level = index + 1;
1354
1355         if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_SclkDeepSleep)) {
1356                 smu8_ps->levels[index].dsDividerIndex = 5;
1357                 smu8_ps->levels[index].ssDividerIndex = 5;
1358         }
1359
1360         return 0;
1361 }
1362
1363 static int smu8_dpm_get_num_of_pp_table_entries(struct pp_hwmgr *hwmgr)
1364 {
1365         int result;
1366         unsigned long ret = 0;
1367
1368         result = pp_tables_get_num_of_entries(hwmgr, &ret);
1369
1370         return result ? 0 : ret;
1371 }
1372
1373 static int smu8_dpm_get_pp_table_entry(struct pp_hwmgr *hwmgr,
1374                     unsigned long entry, struct pp_power_state *ps)
1375 {
1376         int result;
1377         struct smu8_power_state *smu8_ps;
1378
1379         ps->hardware.magic = smu8_magic;
1380
1381         smu8_ps = cast_smu8_power_state(&(ps->hardware));
1382
1383         result = pp_tables_get_entry(hwmgr, entry, ps,
1384                         smu8_dpm_get_pp_table_entry_callback);
1385
1386         smu8_ps->uvd_clocks.vclk = ps->uvd_clocks.VCLK;
1387         smu8_ps->uvd_clocks.dclk = ps->uvd_clocks.DCLK;
1388
1389         return result;
1390 }
1391
1392 static int smu8_get_power_state_size(struct pp_hwmgr *hwmgr)
1393 {
1394         return sizeof(struct smu8_power_state);
1395 }
1396
1397 static void smu8_hw_print_display_cfg(
1398         const struct cc6_settings *cc6_settings)
1399 {
1400         PP_DBG_LOG("New Display Configuration:\n");
1401
1402         PP_DBG_LOG("   cpu_cc6_disable: %d\n",
1403                         cc6_settings->cpu_cc6_disable);
1404         PP_DBG_LOG("   cpu_pstate_disable: %d\n",
1405                         cc6_settings->cpu_pstate_disable);
1406         PP_DBG_LOG("   nb_pstate_switch_disable: %d\n",
1407                         cc6_settings->nb_pstate_switch_disable);
1408         PP_DBG_LOG("   cpu_pstate_separation_time: %d\n\n",
1409                         cc6_settings->cpu_pstate_separation_time);
1410 }
1411
1412  static int smu8_set_cpu_power_state(struct pp_hwmgr *hwmgr)
1413 {
1414         struct smu8_hwmgr *hw_data = hwmgr->backend;
1415         uint32_t data = 0;
1416
1417         if (hw_data->cc6_settings.cc6_setting_changed) {
1418
1419                 hw_data->cc6_settings.cc6_setting_changed = false;
1420
1421                 smu8_hw_print_display_cfg(&hw_data->cc6_settings);
1422
1423                 data |= (hw_data->cc6_settings.cpu_pstate_separation_time
1424                         & PWRMGT_SEPARATION_TIME_MASK)
1425                         << PWRMGT_SEPARATION_TIME_SHIFT;
1426
1427                 data |= (hw_data->cc6_settings.cpu_cc6_disable ? 0x1 : 0x0)
1428                         << PWRMGT_DISABLE_CPU_CSTATES_SHIFT;
1429
1430                 data |= (hw_data->cc6_settings.cpu_pstate_disable ? 0x1 : 0x0)
1431                         << PWRMGT_DISABLE_CPU_PSTATES_SHIFT;
1432
1433                 PP_DBG_LOG("SetDisplaySizePowerParams data: 0x%X\n",
1434                         data);
1435
1436                 smum_send_msg_to_smc_with_parameter(hwmgr,
1437                                                 PPSMC_MSG_SetDisplaySizePowerParams,
1438                                                 data);
1439         }
1440
1441         return 0;
1442 }
1443
1444
1445 static int smu8_store_cc6_data(struct pp_hwmgr *hwmgr, uint32_t separation_time,
1446                         bool cc6_disable, bool pstate_disable, bool pstate_switch_disable)
1447 {
1448         struct smu8_hwmgr *hw_data = hwmgr->backend;
1449
1450         if (separation_time !=
1451             hw_data->cc6_settings.cpu_pstate_separation_time ||
1452             cc6_disable != hw_data->cc6_settings.cpu_cc6_disable ||
1453             pstate_disable != hw_data->cc6_settings.cpu_pstate_disable ||
1454             pstate_switch_disable != hw_data->cc6_settings.nb_pstate_switch_disable) {
1455
1456                 hw_data->cc6_settings.cc6_setting_changed = true;
1457
1458                 hw_data->cc6_settings.cpu_pstate_separation_time =
1459                         separation_time;
1460                 hw_data->cc6_settings.cpu_cc6_disable =
1461                         cc6_disable;
1462                 hw_data->cc6_settings.cpu_pstate_disable =
1463                         pstate_disable;
1464                 hw_data->cc6_settings.nb_pstate_switch_disable =
1465                         pstate_switch_disable;
1466
1467         }
1468
1469         return 0;
1470 }
1471
1472 static int smu8_get_dal_power_level(struct pp_hwmgr *hwmgr,
1473                 struct amd_pp_simple_clock_info *info)
1474 {
1475         uint32_t i;
1476         const struct phm_clock_voltage_dependency_table *table =
1477                         hwmgr->dyn_state.vddc_dep_on_dal_pwrl;
1478         const struct phm_clock_and_voltage_limits *limits =
1479                         &hwmgr->dyn_state.max_clock_voltage_on_ac;
1480
1481         info->engine_max_clock = limits->sclk;
1482         info->memory_max_clock = limits->mclk;
1483
1484         for (i = table->count - 1; i > 0; i--) {
1485                 if (limits->vddc >= table->entries[i].v) {
1486                         info->level = table->entries[i].clk;
1487                         return 0;
1488                 }
1489         }
1490         return -EINVAL;
1491 }
1492
1493 static int smu8_force_clock_level(struct pp_hwmgr *hwmgr,
1494                 enum pp_clock_type type, uint32_t mask)
1495 {
1496         switch (type) {
1497         case PP_SCLK:
1498                 smum_send_msg_to_smc_with_parameter(hwmgr,
1499                                 PPSMC_MSG_SetSclkSoftMin,
1500                                 mask);
1501                 smum_send_msg_to_smc_with_parameter(hwmgr,
1502                                 PPSMC_MSG_SetSclkSoftMax,
1503                                 mask);
1504                 break;
1505         default:
1506                 break;
1507         }
1508
1509         return 0;
1510 }
1511
1512 static int smu8_print_clock_levels(struct pp_hwmgr *hwmgr,
1513                 enum pp_clock_type type, char *buf)
1514 {
1515         struct smu8_hwmgr *data = hwmgr->backend;
1516         struct phm_clock_voltage_dependency_table *sclk_table =
1517                         hwmgr->dyn_state.vddc_dependency_on_sclk;
1518         int i, now, size = 0;
1519
1520         switch (type) {
1521         case PP_SCLK:
1522                 now = PHM_GET_FIELD(cgs_read_ind_register(hwmgr->device,
1523                                 CGS_IND_REG__SMC,
1524                                 ixTARGET_AND_CURRENT_PROFILE_INDEX),
1525                                 TARGET_AND_CURRENT_PROFILE_INDEX,
1526                                 CURR_SCLK_INDEX);
1527
1528                 for (i = 0; i < sclk_table->count; i++)
1529                         size += sprintf(buf + size, "%d: %uMhz %s\n",
1530                                         i, sclk_table->entries[i].clk / 100,
1531                                         (i == now) ? "*" : "");
1532                 break;
1533         case PP_MCLK:
1534                 now = PHM_GET_FIELD(cgs_read_ind_register(hwmgr->device,
1535                                 CGS_IND_REG__SMC,
1536                                 ixTARGET_AND_CURRENT_PROFILE_INDEX),
1537                                 TARGET_AND_CURRENT_PROFILE_INDEX,
1538                                 CURR_MCLK_INDEX);
1539
1540                 for (i = SMU8_NUM_NBPMEMORYCLOCK; i > 0; i--)
1541                         size += sprintf(buf + size, "%d: %uMhz %s\n",
1542                                         SMU8_NUM_NBPMEMORYCLOCK-i, data->sys_info.nbp_memory_clock[i-1] / 100,
1543                                         (SMU8_NUM_NBPMEMORYCLOCK-i == now) ? "*" : "");
1544                 break;
1545         default:
1546                 break;
1547         }
1548         return size;
1549 }
1550
1551 static int smu8_get_performance_level(struct pp_hwmgr *hwmgr, const struct pp_hw_power_state *state,
1552                                 PHM_PerformanceLevelDesignation designation, uint32_t index,
1553                                 PHM_PerformanceLevel *level)
1554 {
1555         const struct smu8_power_state *ps;
1556         struct smu8_hwmgr *data;
1557         uint32_t level_index;
1558         uint32_t i;
1559
1560         if (level == NULL || hwmgr == NULL || state == NULL)
1561                 return -EINVAL;
1562
1563         data = hwmgr->backend;
1564         ps = cast_const_smu8_power_state(state);
1565
1566         level_index = index > ps->level - 1 ? ps->level - 1 : index;
1567         level->coreClock = ps->levels[level_index].engineClock;
1568
1569         if (designation == PHM_PerformanceLevelDesignation_PowerContainment) {
1570                 for (i = 1; i < ps->level; i++) {
1571                         if (ps->levels[i].engineClock > data->dce_slow_sclk_threshold) {
1572                                 level->coreClock = ps->levels[i].engineClock;
1573                                 break;
1574                         }
1575                 }
1576         }
1577
1578         if (level_index == 0)
1579                 level->memory_clock = data->sys_info.nbp_memory_clock[SMU8_NUM_NBPMEMORYCLOCK - 1];
1580         else
1581                 level->memory_clock = data->sys_info.nbp_memory_clock[0];
1582
1583         level->vddc = (smu8_convert_8Bit_index_to_voltage(hwmgr, ps->levels[level_index].vddcIndex) + 2) / 4;
1584         level->nonLocalMemoryFreq = 0;
1585         level->nonLocalMemoryWidth = 0;
1586
1587         return 0;
1588 }
1589
1590 static int smu8_get_current_shallow_sleep_clocks(struct pp_hwmgr *hwmgr,
1591         const struct pp_hw_power_state *state, struct pp_clock_info *clock_info)
1592 {
1593         const struct smu8_power_state *ps = cast_const_smu8_power_state(state);
1594
1595         clock_info->min_eng_clk = ps->levels[0].engineClock / (1 << (ps->levels[0].ssDividerIndex));
1596         clock_info->max_eng_clk = ps->levels[ps->level - 1].engineClock / (1 << (ps->levels[ps->level - 1].ssDividerIndex));
1597
1598         return 0;
1599 }
1600
1601 static int smu8_get_clock_by_type(struct pp_hwmgr *hwmgr, enum amd_pp_clock_type type,
1602                                                 struct amd_pp_clocks *clocks)
1603 {
1604         struct smu8_hwmgr *data = hwmgr->backend;
1605         int i;
1606         struct phm_clock_voltage_dependency_table *table;
1607
1608         clocks->count = smu8_get_max_sclk_level(hwmgr);
1609         switch (type) {
1610         case amd_pp_disp_clock:
1611                 for (i = 0; i < clocks->count; i++)
1612                         clocks->clock[i] = data->sys_info.display_clock[i] * 10;
1613                 break;
1614         case amd_pp_sys_clock:
1615                 table = hwmgr->dyn_state.vddc_dependency_on_sclk;
1616                 for (i = 0; i < clocks->count; i++)
1617                         clocks->clock[i] = table->entries[i].clk * 10;
1618                 break;
1619         case amd_pp_mem_clock:
1620                 clocks->count = SMU8_NUM_NBPMEMORYCLOCK;
1621                 for (i = 0; i < clocks->count; i++)
1622                         clocks->clock[i] = data->sys_info.nbp_memory_clock[clocks->count - 1 - i] * 10;
1623                 break;
1624         default:
1625                 return -1;
1626         }
1627
1628         return 0;
1629 }
1630
1631 static int smu8_get_max_high_clocks(struct pp_hwmgr *hwmgr, struct amd_pp_simple_clock_info *clocks)
1632 {
1633         struct phm_clock_voltage_dependency_table *table =
1634                                         hwmgr->dyn_state.vddc_dependency_on_sclk;
1635         unsigned long level;
1636         const struct phm_clock_and_voltage_limits *limits =
1637                         &hwmgr->dyn_state.max_clock_voltage_on_ac;
1638
1639         if ((NULL == table) || (table->count <= 0) || (clocks == NULL))
1640                 return -EINVAL;
1641
1642         level = smu8_get_max_sclk_level(hwmgr) - 1;
1643
1644         if (level < table->count)
1645                 clocks->engine_max_clock = table->entries[level].clk;
1646         else
1647                 clocks->engine_max_clock = table->entries[table->count - 1].clk;
1648
1649         clocks->memory_max_clock = limits->mclk;
1650
1651         return 0;
1652 }
1653
1654 static int smu8_thermal_get_temperature(struct pp_hwmgr *hwmgr)
1655 {
1656         int actual_temp = 0;
1657         uint32_t val = cgs_read_ind_register(hwmgr->device,
1658                                              CGS_IND_REG__SMC, ixTHM_TCON_CUR_TMP);
1659         uint32_t temp = PHM_GET_FIELD(val, THM_TCON_CUR_TMP, CUR_TEMP);
1660
1661         if (PHM_GET_FIELD(val, THM_TCON_CUR_TMP, CUR_TEMP_RANGE_SEL))
1662                 actual_temp = ((temp / 8) - 49) * PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
1663         else
1664                 actual_temp = (temp / 8) * PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
1665
1666         return actual_temp;
1667 }
1668
1669 static int smu8_read_sensor(struct pp_hwmgr *hwmgr, int idx,
1670                           void *value, int *size)
1671 {
1672         struct smu8_hwmgr *data = hwmgr->backend;
1673
1674         struct phm_clock_voltage_dependency_table *table =
1675                                 hwmgr->dyn_state.vddc_dependency_on_sclk;
1676
1677         struct phm_vce_clock_voltage_dependency_table *vce_table =
1678                 hwmgr->dyn_state.vce_clock_voltage_dependency_table;
1679
1680         struct phm_uvd_clock_voltage_dependency_table *uvd_table =
1681                 hwmgr->dyn_state.uvd_clock_voltage_dependency_table;
1682
1683         uint32_t sclk_index = PHM_GET_FIELD(cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixTARGET_AND_CURRENT_PROFILE_INDEX),
1684                                         TARGET_AND_CURRENT_PROFILE_INDEX, CURR_SCLK_INDEX);
1685         uint32_t uvd_index = PHM_GET_FIELD(cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixTARGET_AND_CURRENT_PROFILE_INDEX_2),
1686                                         TARGET_AND_CURRENT_PROFILE_INDEX_2, CURR_UVD_INDEX);
1687         uint32_t vce_index = PHM_GET_FIELD(cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixTARGET_AND_CURRENT_PROFILE_INDEX_2),
1688                                         TARGET_AND_CURRENT_PROFILE_INDEX_2, CURR_VCE_INDEX);
1689
1690         uint32_t sclk, vclk, dclk, ecclk, tmp, activity_percent;
1691         uint16_t vddnb, vddgfx;
1692         int result;
1693
1694         /* size must be at least 4 bytes for all sensors */
1695         if (*size < 4)
1696                 return -EINVAL;
1697         *size = 4;
1698
1699         switch (idx) {
1700         case AMDGPU_PP_SENSOR_GFX_SCLK:
1701                 if (sclk_index < NUM_SCLK_LEVELS) {
1702                         sclk = table->entries[sclk_index].clk;
1703                         *((uint32_t *)value) = sclk;
1704                         return 0;
1705                 }
1706                 return -EINVAL;
1707         case AMDGPU_PP_SENSOR_VDDNB:
1708                 tmp = (cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixSMUSVI_NB_CURRENTVID) &
1709                         CURRENT_NB_VID_MASK) >> CURRENT_NB_VID__SHIFT;
1710                 vddnb = smu8_convert_8Bit_index_to_voltage(hwmgr, tmp) / 4;
1711                 *((uint32_t *)value) = vddnb;
1712                 return 0;
1713         case AMDGPU_PP_SENSOR_VDDGFX:
1714                 tmp = (cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixSMUSVI_GFX_CURRENTVID) &
1715                         CURRENT_GFX_VID_MASK) >> CURRENT_GFX_VID__SHIFT;
1716                 vddgfx = smu8_convert_8Bit_index_to_voltage(hwmgr, (u16)tmp) / 4;
1717                 *((uint32_t *)value) = vddgfx;
1718                 return 0;
1719         case AMDGPU_PP_SENSOR_UVD_VCLK:
1720                 if (!data->uvd_power_gated) {
1721                         if (uvd_index >= SMU8_MAX_HARDWARE_POWERLEVELS) {
1722                                 return -EINVAL;
1723                         } else {
1724                                 vclk = uvd_table->entries[uvd_index].vclk;
1725                                 *((uint32_t *)value) = vclk;
1726                                 return 0;
1727                         }
1728                 }
1729                 *((uint32_t *)value) = 0;
1730                 return 0;
1731         case AMDGPU_PP_SENSOR_UVD_DCLK:
1732                 if (!data->uvd_power_gated) {
1733                         if (uvd_index >= SMU8_MAX_HARDWARE_POWERLEVELS) {
1734                                 return -EINVAL;
1735                         } else {
1736                                 dclk = uvd_table->entries[uvd_index].dclk;
1737                                 *((uint32_t *)value) = dclk;
1738                                 return 0;
1739                         }
1740                 }
1741                 *((uint32_t *)value) = 0;
1742                 return 0;
1743         case AMDGPU_PP_SENSOR_VCE_ECCLK:
1744                 if (!data->vce_power_gated) {
1745                         if (vce_index >= SMU8_MAX_HARDWARE_POWERLEVELS) {
1746                                 return -EINVAL;
1747                         } else {
1748                                 ecclk = vce_table->entries[vce_index].ecclk;
1749                                 *((uint32_t *)value) = ecclk;
1750                                 return 0;
1751                         }
1752                 }
1753                 *((uint32_t *)value) = 0;
1754                 return 0;
1755         case AMDGPU_PP_SENSOR_GPU_LOAD:
1756                 result = smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetAverageGraphicsActivity);
1757                 if (0 == result) {
1758                         activity_percent = cgs_read_register(hwmgr->device, mmSMU_MP1_SRBM2P_ARG_0);
1759                         activity_percent = activity_percent > 100 ? 100 : activity_percent;
1760                 } else {
1761                         activity_percent = 50;
1762                 }
1763                 *((uint32_t *)value) = activity_percent;
1764                 return 0;
1765         case AMDGPU_PP_SENSOR_UVD_POWER:
1766                 *((uint32_t *)value) = data->uvd_power_gated ? 0 : 1;
1767                 return 0;
1768         case AMDGPU_PP_SENSOR_VCE_POWER:
1769                 *((uint32_t *)value) = data->vce_power_gated ? 0 : 1;
1770                 return 0;
1771         case AMDGPU_PP_SENSOR_GPU_TEMP:
1772                 *((uint32_t *)value) = smu8_thermal_get_temperature(hwmgr);
1773                 return 0;
1774         default:
1775                 return -EINVAL;
1776         }
1777 }
1778
1779 static int smu8_notify_cac_buffer_info(struct pp_hwmgr *hwmgr,
1780                                         uint32_t virtual_addr_low,
1781                                         uint32_t virtual_addr_hi,
1782                                         uint32_t mc_addr_low,
1783                                         uint32_t mc_addr_hi,
1784                                         uint32_t size)
1785 {
1786         smum_send_msg_to_smc_with_parameter(hwmgr,
1787                                         PPSMC_MSG_DramAddrHiVirtual,
1788                                         mc_addr_hi);
1789         smum_send_msg_to_smc_with_parameter(hwmgr,
1790                                         PPSMC_MSG_DramAddrLoVirtual,
1791                                         mc_addr_low);
1792         smum_send_msg_to_smc_with_parameter(hwmgr,
1793                                         PPSMC_MSG_DramAddrHiPhysical,
1794                                         virtual_addr_hi);
1795         smum_send_msg_to_smc_with_parameter(hwmgr,
1796                                         PPSMC_MSG_DramAddrLoPhysical,
1797                                         virtual_addr_low);
1798
1799         smum_send_msg_to_smc_with_parameter(hwmgr,
1800                                         PPSMC_MSG_DramBufferSize,
1801                                         size);
1802         return 0;
1803 }
1804
1805 static int smu8_get_thermal_temperature_range(struct pp_hwmgr *hwmgr,
1806                 struct PP_TemperatureRange *thermal_data)
1807 {
1808         struct smu8_hwmgr *data = hwmgr->backend;
1809
1810         memcpy(thermal_data, &SMU7ThermalPolicy[0], sizeof(struct PP_TemperatureRange));
1811
1812         thermal_data->max = (data->thermal_auto_throttling_treshold +
1813                         data->sys_info.htc_hyst_lmt) *
1814                         PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
1815
1816         return 0;
1817 }
1818
1819 static int smu8_enable_disable_uvd_dpm(struct pp_hwmgr *hwmgr, bool enable)
1820 {
1821         struct smu8_hwmgr *data = hwmgr->backend;
1822         uint32_t dpm_features = 0;
1823
1824         if (enable &&
1825                 phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
1826                                   PHM_PlatformCaps_UVDDPM)) {
1827                 data->dpm_flags |= DPMFlags_UVD_Enabled;
1828                 dpm_features |= UVD_DPM_MASK;
1829                 smum_send_msg_to_smc_with_parameter(hwmgr,
1830                             PPSMC_MSG_EnableAllSmuFeatures, dpm_features);
1831         } else {
1832                 dpm_features |= UVD_DPM_MASK;
1833                 data->dpm_flags &= ~DPMFlags_UVD_Enabled;
1834                 smum_send_msg_to_smc_with_parameter(hwmgr,
1835                            PPSMC_MSG_DisableAllSmuFeatures, dpm_features);
1836         }
1837         return 0;
1838 }
1839
1840 int smu8_dpm_update_uvd_dpm(struct pp_hwmgr *hwmgr, bool bgate)
1841 {
1842         struct smu8_hwmgr *data = hwmgr->backend;
1843         struct phm_uvd_clock_voltage_dependency_table *ptable =
1844                 hwmgr->dyn_state.uvd_clock_voltage_dependency_table;
1845
1846         if (!bgate) {
1847                 /* Stable Pstate is enabled and we need to set the UVD DPM to highest level */
1848                 if (PP_CAP(PHM_PlatformCaps_StablePState) ||
1849                     hwmgr->en_umd_pstate) {
1850                         data->uvd_dpm.hard_min_clk =
1851                                    ptable->entries[ptable->count - 1].vclk;
1852
1853                         smum_send_msg_to_smc_with_parameter(hwmgr,
1854                                 PPSMC_MSG_SetUvdHardMin,
1855                                 smu8_get_uvd_level(hwmgr,
1856                                         data->uvd_dpm.hard_min_clk,
1857                                         PPSMC_MSG_SetUvdHardMin));
1858
1859                         smu8_enable_disable_uvd_dpm(hwmgr, true);
1860                 } else {
1861                         smu8_enable_disable_uvd_dpm(hwmgr, true);
1862                 }
1863         } else {
1864                 smu8_enable_disable_uvd_dpm(hwmgr, false);
1865         }
1866
1867         return 0;
1868 }
1869
1870 static int smu8_enable_disable_vce_dpm(struct pp_hwmgr *hwmgr, bool enable)
1871 {
1872         struct smu8_hwmgr *data = hwmgr->backend;
1873         uint32_t dpm_features = 0;
1874
1875         if (enable && phm_cap_enabled(
1876                                 hwmgr->platform_descriptor.platformCaps,
1877                                 PHM_PlatformCaps_VCEDPM)) {
1878                 data->dpm_flags |= DPMFlags_VCE_Enabled;
1879                 dpm_features |= VCE_DPM_MASK;
1880                 smum_send_msg_to_smc_with_parameter(hwmgr,
1881                             PPSMC_MSG_EnableAllSmuFeatures, dpm_features);
1882         } else {
1883                 dpm_features |= VCE_DPM_MASK;
1884                 data->dpm_flags &= ~DPMFlags_VCE_Enabled;
1885                 smum_send_msg_to_smc_with_parameter(hwmgr,
1886                            PPSMC_MSG_DisableAllSmuFeatures, dpm_features);
1887         }
1888
1889         return 0;
1890 }
1891
1892
1893 static void smu8_dpm_powergate_acp(struct pp_hwmgr *hwmgr, bool bgate)
1894 {
1895         struct smu8_hwmgr *data = hwmgr->backend;
1896
1897         if (data->acp_power_gated == bgate)
1898                 return;
1899
1900         if (bgate)
1901                 smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ACPPowerOFF);
1902         else
1903                 smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ACPPowerON);
1904 }
1905
1906 static void smu8_dpm_powergate_uvd(struct pp_hwmgr *hwmgr, bool bgate)
1907 {
1908         struct smu8_hwmgr *data = hwmgr->backend;
1909
1910         data->uvd_power_gated = bgate;
1911
1912         if (bgate) {
1913                 amdgpu_device_ip_set_powergating_state(hwmgr->adev,
1914                                                 AMD_IP_BLOCK_TYPE_UVD,
1915                                                 AMD_PG_STATE_GATE);
1916                 amdgpu_device_ip_set_clockgating_state(hwmgr->adev,
1917                                                 AMD_IP_BLOCK_TYPE_UVD,
1918                                                 AMD_CG_STATE_GATE);
1919                 smu8_dpm_update_uvd_dpm(hwmgr, true);
1920                 smu8_dpm_powerdown_uvd(hwmgr);
1921         } else {
1922                 smu8_dpm_powerup_uvd(hwmgr);
1923                 amdgpu_device_ip_set_clockgating_state(hwmgr->adev,
1924                                                 AMD_IP_BLOCK_TYPE_UVD,
1925                                                 AMD_CG_STATE_UNGATE);
1926                 amdgpu_device_ip_set_powergating_state(hwmgr->adev,
1927                                                 AMD_IP_BLOCK_TYPE_UVD,
1928                                                 AMD_PG_STATE_UNGATE);
1929                 smu8_dpm_update_uvd_dpm(hwmgr, false);
1930         }
1931
1932 }
1933
1934 static void smu8_dpm_powergate_vce(struct pp_hwmgr *hwmgr, bool bgate)
1935 {
1936         struct smu8_hwmgr *data = hwmgr->backend;
1937
1938         if (bgate) {
1939                 amdgpu_device_ip_set_powergating_state(hwmgr->adev,
1940                                         AMD_IP_BLOCK_TYPE_VCE,
1941                                         AMD_PG_STATE_GATE);
1942                 amdgpu_device_ip_set_clockgating_state(hwmgr->adev,
1943                                         AMD_IP_BLOCK_TYPE_VCE,
1944                                         AMD_CG_STATE_GATE);
1945                 smu8_enable_disable_vce_dpm(hwmgr, false);
1946                 smu8_dpm_powerdown_vce(hwmgr);
1947                 data->vce_power_gated = true;
1948         } else {
1949                 smu8_dpm_powerup_vce(hwmgr);
1950                 data->vce_power_gated = false;
1951                 amdgpu_device_ip_set_clockgating_state(hwmgr->adev,
1952                                         AMD_IP_BLOCK_TYPE_VCE,
1953                                         AMD_CG_STATE_UNGATE);
1954                 amdgpu_device_ip_set_powergating_state(hwmgr->adev,
1955                                         AMD_IP_BLOCK_TYPE_VCE,
1956                                         AMD_PG_STATE_UNGATE);
1957                 smu8_dpm_update_vce_dpm(hwmgr);
1958                 smu8_enable_disable_vce_dpm(hwmgr, true);
1959         }
1960 }
1961
1962 static const struct pp_hwmgr_func smu8_hwmgr_funcs = {
1963         .backend_init = smu8_hwmgr_backend_init,
1964         .backend_fini = smu8_hwmgr_backend_fini,
1965         .apply_state_adjust_rules = smu8_apply_state_adjust_rules,
1966         .force_dpm_level = smu8_dpm_force_dpm_level,
1967         .get_power_state_size = smu8_get_power_state_size,
1968         .powerdown_uvd = smu8_dpm_powerdown_uvd,
1969         .powergate_uvd = smu8_dpm_powergate_uvd,
1970         .powergate_vce = smu8_dpm_powergate_vce,
1971         .powergate_acp = smu8_dpm_powergate_acp,
1972         .get_mclk = smu8_dpm_get_mclk,
1973         .get_sclk = smu8_dpm_get_sclk,
1974         .patch_boot_state = smu8_dpm_patch_boot_state,
1975         .get_pp_table_entry = smu8_dpm_get_pp_table_entry,
1976         .get_num_of_pp_table_entries = smu8_dpm_get_num_of_pp_table_entries,
1977         .set_cpu_power_state = smu8_set_cpu_power_state,
1978         .store_cc6_data = smu8_store_cc6_data,
1979         .force_clock_level = smu8_force_clock_level,
1980         .print_clock_levels = smu8_print_clock_levels,
1981         .get_dal_power_level = smu8_get_dal_power_level,
1982         .get_performance_level = smu8_get_performance_level,
1983         .get_current_shallow_sleep_clocks = smu8_get_current_shallow_sleep_clocks,
1984         .get_clock_by_type = smu8_get_clock_by_type,
1985         .get_max_high_clocks = smu8_get_max_high_clocks,
1986         .read_sensor = smu8_read_sensor,
1987         .power_off_asic = smu8_power_off_asic,
1988         .asic_setup = smu8_setup_asic_task,
1989         .dynamic_state_management_enable = smu8_enable_dpm_tasks,
1990         .power_state_set = smu8_set_power_state_tasks,
1991         .dynamic_state_management_disable = smu8_disable_dpm_tasks,
1992         .notify_cac_buffer_info = smu8_notify_cac_buffer_info,
1993         .update_nbdpm_pstate = smu8_nbdpm_pstate_enable_disable,
1994         .get_thermal_temperature_range = smu8_get_thermal_temperature_range,
1995 };
1996
1997 int smu8_init_function_pointers(struct pp_hwmgr *hwmgr)
1998 {
1999         hwmgr->hwmgr_func = &smu8_hwmgr_funcs;
2000         hwmgr->pptable_func = &pptable_funcs;
2001         return 0;
2002 }