Merge tag 'pci-v5.4-changes' of git://git.kernel.org/pub/scm/linux/kernel/git/helgaas/pci
[linux-2.6-block.git] / drivers / gpu / drm / amd / powerplay / hwmgr / vega10_hwmgr.c
CommitLineData
f83a9991
EH
1/*
2 * Copyright 2016 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
f90dee20
MY
23
24#include <linux/delay.h>
25#include <linux/fb.h>
f83a9991 26#include <linux/module.h>
f867723b 27#include <linux/pci.h>
f83a9991 28#include <linux/slab.h>
f83a9991
EH
29
30#include "hwmgr.h"
31#include "amd_powerplay.h"
f83a9991
EH
32#include "hardwaremanager.h"
33#include "ppatomfwctrl.h"
34#include "atomfirmware.h"
35#include "cgs_common.h"
36#include "vega10_powertune.h"
37#include "smu9.h"
38#include "smu9_driver_if.h"
39#include "vega10_inc.h"
b8a55591 40#include "soc15_common.h"
f83a9991
EH
41#include "pppcielanes.h"
42#include "vega10_hwmgr.h"
0b2c0a12 43#include "vega10_smumgr.h"
f83a9991
EH
44#include "vega10_processpptables.h"
45#include "vega10_pptable.h"
46#include "vega10_thermal.h"
47#include "pp_debug.h"
f83a9991 48#include "amd_pcie_helpers.h"
f83a9991 49#include "ppinterrupt.h"
ab5cf3a5 50#include "pp_overdriver.h"
0a91ee07 51#include "pp_thermal.h"
425db255 52#include "vega10_baco.h"
f83a9991 53
59655cb6
RZ
54#include "smuio/smuio_9_0_offset.h"
55#include "smuio/smuio_9_0_sh_mask.h"
56
f83a9991
EH
57#define HBM_MEMORY_CHANNEL_WIDTH 128
58
30b58a24 59static const uint32_t channel_number[] = {1, 2, 0, 4, 0, 8, 0, 16, 2};
f83a9991 60
f83a9991
EH
61#define mmDF_CS_AON0_DramBaseAddress0 0x0044
62#define mmDF_CS_AON0_DramBaseAddress0_BASE_IDX 0
63
64//DF_CS_AON0_DramBaseAddress0
65#define DF_CS_AON0_DramBaseAddress0__AddrRngVal__SHIFT 0x0
66#define DF_CS_AON0_DramBaseAddress0__LgcyMmioHoleEn__SHIFT 0x1
67#define DF_CS_AON0_DramBaseAddress0__IntLvNumChan__SHIFT 0x4
68#define DF_CS_AON0_DramBaseAddress0__IntLvAddrSel__SHIFT 0x8
69#define DF_CS_AON0_DramBaseAddress0__DramBaseAddr__SHIFT 0xc
70#define DF_CS_AON0_DramBaseAddress0__AddrRngVal_MASK 0x00000001L
71#define DF_CS_AON0_DramBaseAddress0__LgcyMmioHoleEn_MASK 0x00000002L
72#define DF_CS_AON0_DramBaseAddress0__IntLvNumChan_MASK 0x000000F0L
73#define DF_CS_AON0_DramBaseAddress0__IntLvAddrSel_MASK 0x00000700L
74#define DF_CS_AON0_DramBaseAddress0__DramBaseAddr_MASK 0xFFFFF000L
75
bb05821b
EQ
76typedef enum {
77 CLK_SMNCLK = 0,
78 CLK_SOCCLK,
79 CLK_MP0CLK,
80 CLK_MP1CLK,
81 CLK_LCLK,
82 CLK_DCEFCLK,
83 CLK_VCLK,
84 CLK_DCLK,
85 CLK_ECLK,
86 CLK_UCLK,
87 CLK_GFXCLK,
88 CLK_COUNT,
89} CLOCK_ID_e;
90
f87c379e 91static const ULONG PhwVega10_Magic = (ULONG)(PHM_VIslands_Magic);
f83a9991
EH
92
93struct vega10_power_state *cast_phw_vega10_power_state(
94 struct pp_hw_power_state *hw_ps)
95{
96 PP_ASSERT_WITH_CODE((PhwVega10_Magic == hw_ps->magic),
97 "Invalid Powerstate Type!",
98 return NULL;);
99
100 return (struct vega10_power_state *)hw_ps;
101}
102
103const struct vega10_power_state *cast_const_phw_vega10_power_state(
104 const struct pp_hw_power_state *hw_ps)
105{
106 PP_ASSERT_WITH_CODE((PhwVega10_Magic == hw_ps->magic),
107 "Invalid Powerstate Type!",
108 return NULL;);
109
110 return (const struct vega10_power_state *)hw_ps;
111}
112
113static void vega10_set_default_registry_data(struct pp_hwmgr *hwmgr)
114{
690dc626 115 struct vega10_hwmgr *data = hwmgr->backend;
f83a9991
EH
116
117 data->registry_data.sclk_dpm_key_disabled =
118 hwmgr->feature_mask & PP_SCLK_DPM_MASK ? false : true;
119 data->registry_data.socclk_dpm_key_disabled =
120 hwmgr->feature_mask & PP_SOCCLK_DPM_MASK ? false : true;
121 data->registry_data.mclk_dpm_key_disabled =
122 hwmgr->feature_mask & PP_MCLK_DPM_MASK ? false : true;
2d5f5f94
RZ
123 data->registry_data.pcie_dpm_key_disabled =
124 hwmgr->feature_mask & PP_PCIE_DPM_MASK ? false : true;
f83a9991
EH
125
126 data->registry_data.dcefclk_dpm_key_disabled =
127 hwmgr->feature_mask & PP_DCEFCLK_DPM_MASK ? false : true;
128
129 if (hwmgr->feature_mask & PP_POWER_CONTAINMENT_MASK) {
130 data->registry_data.power_containment_support = 1;
131 data->registry_data.enable_pkg_pwr_tracking_feature = 1;
132 data->registry_data.enable_tdc_limit_feature = 1;
133 }
134
afc0255c 135 data->registry_data.clock_stretcher_support =
117a48a7 136 hwmgr->feature_mask & PP_CLOCK_STRETCH_MASK ? true : false;
afc0255c 137
4022e4f2
RZ
138 data->registry_data.ulv_support =
139 hwmgr->feature_mask & PP_ULV_MASK ? true : false;
140
df057e02
RZ
141 data->registry_data.sclk_deep_sleep_support =
142 hwmgr->feature_mask & PP_SCLK_DEEP_SLEEP_MASK ? true : false;
afc0255c 143
f83a9991
EH
144 data->registry_data.disable_water_mark = 0;
145
146 data->registry_data.fan_control_support = 1;
147 data->registry_data.thermal_support = 1;
148 data->registry_data.fw_ctf_enabled = 1;
149
a19c3bea
AD
150 data->registry_data.avfs_support =
151 hwmgr->feature_mask & PP_AVFS_MASK ? true : false;
f83a9991
EH
152 data->registry_data.led_dpm_enabled = 1;
153
154 data->registry_data.vr0hot_enabled = 1;
155 data->registry_data.vr1hot_enabled = 1;
156 data->registry_data.regulator_hot_gpio_support = 1;
157
9b7b8154
EQ
158 data->registry_data.didt_support = 1;
159 if (data->registry_data.didt_support) {
160 data->registry_data.didt_mode = 6;
161 data->registry_data.sq_ramping_support = 1;
162 data->registry_data.db_ramping_support = 0;
163 data->registry_data.td_ramping_support = 0;
164 data->registry_data.tcp_ramping_support = 0;
165 data->registry_data.dbr_ramping_support = 0;
166 data->registry_data.edc_didt_support = 1;
167 data->registry_data.gc_didt_support = 0;
168 data->registry_data.psm_didt_support = 0;
169 }
170
f83a9991
EH
171 data->display_voltage_mode = PPVEGA10_VEGA10DISPLAYVOLTAGEMODE_DFLT;
172 data->dcef_clk_quad_eqn_a = PPREGKEY_VEGA10QUADRATICEQUATION_DFLT;
173 data->dcef_clk_quad_eqn_b = PPREGKEY_VEGA10QUADRATICEQUATION_DFLT;
174 data->dcef_clk_quad_eqn_c = PPREGKEY_VEGA10QUADRATICEQUATION_DFLT;
175 data->disp_clk_quad_eqn_a = PPREGKEY_VEGA10QUADRATICEQUATION_DFLT;
176 data->disp_clk_quad_eqn_b = PPREGKEY_VEGA10QUADRATICEQUATION_DFLT;
177 data->disp_clk_quad_eqn_c = PPREGKEY_VEGA10QUADRATICEQUATION_DFLT;
178 data->pixel_clk_quad_eqn_a = PPREGKEY_VEGA10QUADRATICEQUATION_DFLT;
179 data->pixel_clk_quad_eqn_b = PPREGKEY_VEGA10QUADRATICEQUATION_DFLT;
180 data->pixel_clk_quad_eqn_c = PPREGKEY_VEGA10QUADRATICEQUATION_DFLT;
181 data->phy_clk_quad_eqn_a = PPREGKEY_VEGA10QUADRATICEQUATION_DFLT;
182 data->phy_clk_quad_eqn_b = PPREGKEY_VEGA10QUADRATICEQUATION_DFLT;
183 data->phy_clk_quad_eqn_c = PPREGKEY_VEGA10QUADRATICEQUATION_DFLT;
184
185 data->gfxclk_average_alpha = PPVEGA10_VEGA10GFXCLKAVERAGEALPHA_DFLT;
186 data->socclk_average_alpha = PPVEGA10_VEGA10SOCCLKAVERAGEALPHA_DFLT;
187 data->uclk_average_alpha = PPVEGA10_VEGA10UCLKCLKAVERAGEALPHA_DFLT;
188 data->gfx_activity_average_alpha = PPVEGA10_VEGA10GFXACTIVITYAVERAGEALPHA_DFLT;
189}
190
191static int vega10_set_features_platform_caps(struct pp_hwmgr *hwmgr)
192{
690dc626 193 struct vega10_hwmgr *data = hwmgr->backend;
f83a9991
EH
194 struct phm_ppt_v2_information *table_info =
195 (struct phm_ppt_v2_information *)hwmgr->pptable;
ada6770e 196 struct amdgpu_device *adev = hwmgr->adev;
f83a9991
EH
197
198 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
199 PHM_PlatformCaps_SclkDeepSleep);
200
201 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
202 PHM_PlatformCaps_DynamicPatchPowerState);
203
204 if (data->vddci_control == VEGA10_VOLTAGE_CONTROL_NONE)
205 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
206 PHM_PlatformCaps_ControlVDDCI);
207
f83a9991
EH
208 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
209 PHM_PlatformCaps_EnableSMU7ThermalManagement);
210
ada6770e 211 if (adev->pg_flags & AMD_PG_SUPPORT_UVD)
f83a9991
EH
212 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
213 PHM_PlatformCaps_UVDPowerGating);
214
ada6770e 215 if (adev->pg_flags & AMD_PG_SUPPORT_VCE)
f83a9991
EH
216 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
217 PHM_PlatformCaps_VCEPowerGating);
218
219 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
220 PHM_PlatformCaps_UnTabledHardwareInterface);
221
222 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
223 PHM_PlatformCaps_FanSpeedInTableIsRPM);
224
225 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
226 PHM_PlatformCaps_ODFuzzyFanControlSupport);
227
228 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
229 PHM_PlatformCaps_DynamicPowerManagement);
230
231 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
232 PHM_PlatformCaps_SMC);
233
234 /* power tune caps */
235 /* assume disabled */
236 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
237 PHM_PlatformCaps_PowerContainment);
9b7b8154
EQ
238 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
239 PHM_PlatformCaps_DiDtSupport);
f83a9991
EH
240 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
241 PHM_PlatformCaps_SQRamping);
242 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
243 PHM_PlatformCaps_DBRamping);
244 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
245 PHM_PlatformCaps_TDRamping);
246 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
247 PHM_PlatformCaps_TCPRamping);
9b7b8154
EQ
248 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
249 PHM_PlatformCaps_DBRRamping);
250 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
251 PHM_PlatformCaps_DiDtEDCEnable);
252 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
253 PHM_PlatformCaps_GCEDC);
254 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
255 PHM_PlatformCaps_PSM);
256
257 if (data->registry_data.didt_support) {
258 phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DiDtSupport);
259 if (data->registry_data.sq_ramping_support)
260 phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_SQRamping);
261 if (data->registry_data.db_ramping_support)
262 phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DBRamping);
263 if (data->registry_data.td_ramping_support)
264 phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_TDRamping);
265 if (data->registry_data.tcp_ramping_support)
266 phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_TCPRamping);
267 if (data->registry_data.dbr_ramping_support)
268 phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DBRRamping);
269 if (data->registry_data.edc_didt_support)
270 phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DiDtEDCEnable);
271 if (data->registry_data.gc_didt_support)
272 phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_GCEDC);
273 if (data->registry_data.psm_didt_support)
274 phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_PSM);
275 }
f83a9991
EH
276
277 if (data->registry_data.power_containment_support)
278 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
279 PHM_PlatformCaps_PowerContainment);
280 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
281 PHM_PlatformCaps_CAC);
282
283 if (table_info->tdp_table->usClockStretchAmount &&
284 data->registry_data.clock_stretcher_support)
285 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
286 PHM_PlatformCaps_ClockStretcher);
287
288 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
289 PHM_PlatformCaps_RegulatorHot);
290 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
291 PHM_PlatformCaps_AutomaticDCTransition);
292
293 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
294 PHM_PlatformCaps_UVDDPM);
295 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
296 PHM_PlatformCaps_VCEDPM);
297
298 return 0;
299}
300
c5a44849
RZ
301static int vega10_odn_initial_default_setting(struct pp_hwmgr *hwmgr)
302{
303 struct vega10_hwmgr *data = hwmgr->backend;
304 struct phm_ppt_v2_information *table_info =
305 (struct phm_ppt_v2_information *)(hwmgr->pptable);
306 struct vega10_odn_dpm_table *odn_table = &(data->odn_dpm_table);
307 struct vega10_odn_vddc_lookup_table *od_lookup_table;
308 struct phm_ppt_v1_voltage_lookup_table *vddc_lookup_table;
309 struct phm_ppt_v1_clock_voltage_dependency_table *dep_table[3];
310 struct phm_ppt_v1_clock_voltage_dependency_table *od_table[3];
88de542e 311 struct pp_atomfwctrl_avfs_parameters avfs_params = {0};
c5a44849 312 uint32_t i;
88de542e
RZ
313 int result;
314
315 result = pp_atomfwctrl_get_avfs_information(hwmgr, &avfs_params);
316 if (!result) {
317 data->odn_dpm_table.max_vddc = avfs_params.ulMaxVddc;
318 data->odn_dpm_table.min_vddc = avfs_params.ulMinVddc;
319 }
c5a44849
RZ
320
321 od_lookup_table = &odn_table->vddc_lookup_table;
322 vddc_lookup_table = table_info->vddc_lookup_table;
323
324 for (i = 0; i < vddc_lookup_table->count; i++)
325 od_lookup_table->entries[i].us_vdd = vddc_lookup_table->entries[i].us_vdd;
326
327 od_lookup_table->count = vddc_lookup_table->count;
328
329 dep_table[0] = table_info->vdd_dep_on_sclk;
330 dep_table[1] = table_info->vdd_dep_on_mclk;
331 dep_table[2] = table_info->vdd_dep_on_socclk;
332 od_table[0] = (struct phm_ppt_v1_clock_voltage_dependency_table *)&odn_table->vdd_dep_on_sclk;
333 od_table[1] = (struct phm_ppt_v1_clock_voltage_dependency_table *)&odn_table->vdd_dep_on_mclk;
334 od_table[2] = (struct phm_ppt_v1_clock_voltage_dependency_table *)&odn_table->vdd_dep_on_socclk;
335
336 for (i = 0; i < 3; i++)
337 smu_get_voltage_dependency_table_ppt_v1(dep_table[i], od_table[i]);
338
339 if (odn_table->max_vddc == 0 || odn_table->max_vddc > 2000)
340 odn_table->max_vddc = dep_table[0]->entries[dep_table[0]->count - 1].vddc;
341 if (odn_table->min_vddc == 0 || odn_table->min_vddc > 2000)
342 odn_table->min_vddc = dep_table[0]->entries[0].vddc;
343
344 i = od_table[2]->count - 1;
f8a5de44
RZ
345 od_table[2]->entries[i].clk = hwmgr->platform_descriptor.overdriveLimit.memoryClock > od_table[2]->entries[i].clk ?
346 hwmgr->platform_descriptor.overdriveLimit.memoryClock :
347 od_table[2]->entries[i].clk;
348 od_table[2]->entries[i].vddc = odn_table->max_vddc > od_table[2]->entries[i].vddc ?
349 odn_table->max_vddc :
350 od_table[2]->entries[i].vddc;
c5a44849
RZ
351
352 return 0;
353}
354
f83a9991
EH
355static void vega10_init_dpm_defaults(struct pp_hwmgr *hwmgr)
356{
690dc626 357 struct vega10_hwmgr *data = hwmgr->backend;
f83a9991 358 int i;
15826fbf 359 uint32_t sub_vendor_id, hw_revision;
fb2dbfd2 360 uint32_t top32, bottom32;
15826fbf 361 struct amdgpu_device *adev = hwmgr->adev;
f83a9991
EH
362
363 vega10_initialize_power_tune_defaults(hwmgr);
364
365 for (i = 0; i < GNLD_FEATURES_MAX; i++) {
366 data->smu_features[i].smu_feature_id = 0xffff;
367 data->smu_features[i].smu_feature_bitmap = 1 << i;
368 data->smu_features[i].enabled = false;
369 data->smu_features[i].supported = false;
370 }
371
372 data->smu_features[GNLD_DPM_PREFETCHER].smu_feature_id =
373 FEATURE_DPM_PREFETCHER_BIT;
374 data->smu_features[GNLD_DPM_GFXCLK].smu_feature_id =
375 FEATURE_DPM_GFXCLK_BIT;
376 data->smu_features[GNLD_DPM_UCLK].smu_feature_id =
377 FEATURE_DPM_UCLK_BIT;
378 data->smu_features[GNLD_DPM_SOCCLK].smu_feature_id =
379 FEATURE_DPM_SOCCLK_BIT;
380 data->smu_features[GNLD_DPM_UVD].smu_feature_id =
381 FEATURE_DPM_UVD_BIT;
382 data->smu_features[GNLD_DPM_VCE].smu_feature_id =
383 FEATURE_DPM_VCE_BIT;
384 data->smu_features[GNLD_DPM_MP0CLK].smu_feature_id =
385 FEATURE_DPM_MP0CLK_BIT;
386 data->smu_features[GNLD_DPM_LINK].smu_feature_id =
387 FEATURE_DPM_LINK_BIT;
388 data->smu_features[GNLD_DPM_DCEFCLK].smu_feature_id =
389 FEATURE_DPM_DCEFCLK_BIT;
390 data->smu_features[GNLD_ULV].smu_feature_id =
391 FEATURE_ULV_BIT;
392 data->smu_features[GNLD_AVFS].smu_feature_id =
393 FEATURE_AVFS_BIT;
394 data->smu_features[GNLD_DS_GFXCLK].smu_feature_id =
395 FEATURE_DS_GFXCLK_BIT;
396 data->smu_features[GNLD_DS_SOCCLK].smu_feature_id =
397 FEATURE_DS_SOCCLK_BIT;
398 data->smu_features[GNLD_DS_LCLK].smu_feature_id =
399 FEATURE_DS_LCLK_BIT;
400 data->smu_features[GNLD_PPT].smu_feature_id =
401 FEATURE_PPT_BIT;
402 data->smu_features[GNLD_TDC].smu_feature_id =
403 FEATURE_TDC_BIT;
404 data->smu_features[GNLD_THERMAL].smu_feature_id =
405 FEATURE_THERMAL_BIT;
406 data->smu_features[GNLD_GFX_PER_CU_CG].smu_feature_id =
407 FEATURE_GFX_PER_CU_CG_BIT;
408 data->smu_features[GNLD_RM].smu_feature_id =
409 FEATURE_RM_BIT;
410 data->smu_features[GNLD_DS_DCEFCLK].smu_feature_id =
411 FEATURE_DS_DCEFCLK_BIT;
412 data->smu_features[GNLD_ACDC].smu_feature_id =
413 FEATURE_ACDC_BIT;
414 data->smu_features[GNLD_VR0HOT].smu_feature_id =
415 FEATURE_VR0HOT_BIT;
416 data->smu_features[GNLD_VR1HOT].smu_feature_id =
417 FEATURE_VR1HOT_BIT;
418 data->smu_features[GNLD_FW_CTF].smu_feature_id =
419 FEATURE_FW_CTF_BIT;
420 data->smu_features[GNLD_LED_DISPLAY].smu_feature_id =
421 FEATURE_LED_DISPLAY_BIT;
422 data->smu_features[GNLD_FAN_CONTROL].smu_feature_id =
423 FEATURE_FAN_CONTROL_BIT;
bdb8cd10 424 data->smu_features[GNLD_ACG].smu_feature_id = FEATURE_ACG_BIT;
9b7b8154 425 data->smu_features[GNLD_DIDT].smu_feature_id = FEATURE_GFX_EDC_BIT;
15826fbf 426 data->smu_features[GNLD_PCC_LIMIT].smu_feature_id = FEATURE_PCC_LIMIT_CONTROL_BIT;
f83a9991
EH
427
428 if (!data->registry_data.prefetcher_dpm_key_disabled)
429 data->smu_features[GNLD_DPM_PREFETCHER].supported = true;
430
431 if (!data->registry_data.sclk_dpm_key_disabled)
432 data->smu_features[GNLD_DPM_GFXCLK].supported = true;
433
434 if (!data->registry_data.mclk_dpm_key_disabled)
435 data->smu_features[GNLD_DPM_UCLK].supported = true;
436
437 if (!data->registry_data.socclk_dpm_key_disabled)
438 data->smu_features[GNLD_DPM_SOCCLK].supported = true;
439
dd5a6fe2 440 if (PP_CAP(PHM_PlatformCaps_UVDDPM))
f83a9991
EH
441 data->smu_features[GNLD_DPM_UVD].supported = true;
442
dd5a6fe2 443 if (PP_CAP(PHM_PlatformCaps_VCEDPM))
f83a9991
EH
444 data->smu_features[GNLD_DPM_VCE].supported = true;
445
446 if (!data->registry_data.pcie_dpm_key_disabled)
447 data->smu_features[GNLD_DPM_LINK].supported = true;
448
449 if (!data->registry_data.dcefclk_dpm_key_disabled)
450 data->smu_features[GNLD_DPM_DCEFCLK].supported = true;
451
dd5a6fe2
TSD
452 if (PP_CAP(PHM_PlatformCaps_SclkDeepSleep) &&
453 data->registry_data.sclk_deep_sleep_support) {
f83a9991
EH
454 data->smu_features[GNLD_DS_GFXCLK].supported = true;
455 data->smu_features[GNLD_DS_SOCCLK].supported = true;
456 data->smu_features[GNLD_DS_LCLK].supported = true;
df057e02 457 data->smu_features[GNLD_DS_DCEFCLK].supported = true;
f83a9991
EH
458 }
459
460 if (data->registry_data.enable_pkg_pwr_tracking_feature)
461 data->smu_features[GNLD_PPT].supported = true;
462
463 if (data->registry_data.enable_tdc_limit_feature)
464 data->smu_features[GNLD_TDC].supported = true;
465
466 if (data->registry_data.thermal_support)
467 data->smu_features[GNLD_THERMAL].supported = true;
468
469 if (data->registry_data.fan_control_support)
470 data->smu_features[GNLD_FAN_CONTROL].supported = true;
471
472 if (data->registry_data.fw_ctf_enabled)
473 data->smu_features[GNLD_FW_CTF].supported = true;
474
475 if (data->registry_data.avfs_support)
476 data->smu_features[GNLD_AVFS].supported = true;
477
478 if (data->registry_data.led_dpm_enabled)
479 data->smu_features[GNLD_LED_DISPLAY].supported = true;
480
481 if (data->registry_data.vr1hot_enabled)
482 data->smu_features[GNLD_VR1HOT].supported = true;
483
484 if (data->registry_data.vr0hot_enabled)
485 data->smu_features[GNLD_VR0HOT].supported = true;
486
d3f8c0ab 487 smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetSmuVersion);
3f9ca14a 488 hwmgr->smu_version = smum_get_argument(hwmgr);
bdb8cd10 489 /* ACG firmware has major version 5 */
d100033b 490 if ((hwmgr->smu_version & 0xff000000) == 0x5000000)
bdb8cd10 491 data->smu_features[GNLD_ACG].supported = true;
9b7b8154
EQ
492 if (data->registry_data.didt_support)
493 data->smu_features[GNLD_DIDT].supported = true;
494
15826fbf
RZ
495 hw_revision = adev->pdev->revision;
496 sub_vendor_id = adev->pdev->subsystem_vendor;
497
498 if ((hwmgr->chip_id == 0x6862 ||
499 hwmgr->chip_id == 0x6861 ||
500 hwmgr->chip_id == 0x6868) &&
501 (hw_revision == 0) &&
502 (sub_vendor_id != 0x1002))
503 data->smu_features[GNLD_PCC_LIMIT].supported = true;
fb2dbfd2
KR
504
505 /* Get the SN to turn into a Unique ID */
506 smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ReadSerialNumTop32);
507 top32 = smum_get_argument(hwmgr);
508 smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ReadSerialNumBottom32);
509 bottom32 = smum_get_argument(hwmgr);
510
511 adev->unique_id = ((uint64_t)bottom32 << 32) | top32;
f83a9991
EH
512}
513
514#ifdef PPLIB_VEGA10_EVV_SUPPORT
515static int vega10_get_socclk_for_voltage_evv(struct pp_hwmgr *hwmgr,
516 phm_ppt_v1_voltage_lookup_table *lookup_table,
517 uint16_t virtual_voltage_id, int32_t *socclk)
518{
519 uint8_t entry_id;
520 uint8_t voltage_id;
521 struct phm_ppt_v2_information *table_info =
522 (struct phm_ppt_v2_information *)(hwmgr->pptable);
523
524 PP_ASSERT_WITH_CODE(lookup_table->count != 0,
525 "Lookup table is empty",
526 return -EINVAL);
527
528 /* search for leakage voltage ID 0xff01 ~ 0xff08 and sclk */
529 for (entry_id = 0; entry_id < table_info->vdd_dep_on_sclk->count; entry_id++) {
530 voltage_id = table_info->vdd_dep_on_socclk->entries[entry_id].vddInd;
531 if (lookup_table->entries[voltage_id].us_vdd == virtual_voltage_id)
532 break;
533 }
534
535 PP_ASSERT_WITH_CODE(entry_id < table_info->vdd_dep_on_socclk->count,
536 "Can't find requested voltage id in vdd_dep_on_socclk table!",
537 return -EINVAL);
538
539 *socclk = table_info->vdd_dep_on_socclk->entries[entry_id].clk;
540
541 return 0;
542}
543
544#define ATOM_VIRTUAL_VOLTAGE_ID0 0xff01
545/**
546* Get Leakage VDDC based on leakage ID.
547*
548* @param hwmgr the address of the powerplay hardware manager.
549* @return always 0.
550*/
551static int vega10_get_evv_voltages(struct pp_hwmgr *hwmgr)
552{
690dc626 553 struct vega10_hwmgr *data = hwmgr->backend;
f83a9991
EH
554 uint16_t vv_id;
555 uint32_t vddc = 0;
556 uint16_t i, j;
557 uint32_t sclk = 0;
558 struct phm_ppt_v2_information *table_info =
559 (struct phm_ppt_v2_information *)hwmgr->pptable;
560 struct phm_ppt_v1_clock_voltage_dependency_table *socclk_table =
561 table_info->vdd_dep_on_socclk;
562 int result;
563
564 for (i = 0; i < VEGA10_MAX_LEAKAGE_COUNT; i++) {
565 vv_id = ATOM_VIRTUAL_VOLTAGE_ID0 + i;
566
567 if (!vega10_get_socclk_for_voltage_evv(hwmgr,
568 table_info->vddc_lookup_table, vv_id, &sclk)) {
dd5a6fe2 569 if (PP_CAP(PHM_PlatformCaps_ClockStretcher)) {
f83a9991
EH
570 for (j = 1; j < socclk_table->count; j++) {
571 if (socclk_table->entries[j].clk == sclk &&
572 socclk_table->entries[j].cks_enable == 0) {
573 sclk += 5000;
574 break;
575 }
576 }
577 }
578
579 PP_ASSERT_WITH_CODE(!atomctrl_get_voltage_evv_on_sclk_ai(hwmgr,
580 VOLTAGE_TYPE_VDDC, sclk, vv_id, &vddc),
581 "Error retrieving EVV voltage value!",
582 continue);
583
584
585 /* need to make sure vddc is less than 2v or else, it could burn the ASIC. */
586 PP_ASSERT_WITH_CODE((vddc < 2000 && vddc != 0),
587 "Invalid VDDC value", result = -EINVAL;);
588
589 /* the voltage should not be zero nor equal to leakage ID */
590 if (vddc != 0 && vddc != vv_id) {
591 data->vddc_leakage.actual_voltage[data->vddc_leakage.count] = (uint16_t)(vddc/100);
592 data->vddc_leakage.leakage_id[data->vddc_leakage.count] = vv_id;
593 data->vddc_leakage.count++;
594 }
595 }
596 }
597
598 return 0;
599}
600
601/**
602 * Change virtual leakage voltage to actual value.
603 *
604 * @param hwmgr the address of the powerplay hardware manager.
605 * @param pointer to changing voltage
606 * @param pointer to leakage table
607 */
608static void vega10_patch_with_vdd_leakage(struct pp_hwmgr *hwmgr,
609 uint16_t *voltage, struct vega10_leakage_voltage *leakage_table)
610{
611 uint32_t index;
612
613 /* search for leakage voltage ID 0xff01 ~ 0xff08 */
614 for (index = 0; index < leakage_table->count; index++) {
615 /* if this voltage matches a leakage voltage ID */
616 /* patch with actual leakage voltage */
617 if (leakage_table->leakage_id[index] == *voltage) {
618 *voltage = leakage_table->actual_voltage[index];
619 break;
620 }
621 }
622
623 if (*voltage > ATOM_VIRTUAL_VOLTAGE_ID0)
4f42a2dd 624 pr_info("Voltage value looks like a Leakage ID but it's not patched\n");
f83a9991
EH
625}
626
627/**
628* Patch voltage lookup table by EVV leakages.
629*
630* @param hwmgr the address of the powerplay hardware manager.
631* @param pointer to voltage lookup table
632* @param pointer to leakage table
633* @return always 0
634*/
635static int vega10_patch_lookup_table_with_leakage(struct pp_hwmgr *hwmgr,
636 phm_ppt_v1_voltage_lookup_table *lookup_table,
637 struct vega10_leakage_voltage *leakage_table)
638{
639 uint32_t i;
640
641 for (i = 0; i < lookup_table->count; i++)
642 vega10_patch_with_vdd_leakage(hwmgr,
643 &lookup_table->entries[i].us_vdd, leakage_table);
644
645 return 0;
646}
647
648static int vega10_patch_clock_voltage_limits_with_vddc_leakage(
649 struct pp_hwmgr *hwmgr, struct vega10_leakage_voltage *leakage_table,
650 uint16_t *vddc)
651{
652 vega10_patch_with_vdd_leakage(hwmgr, (uint16_t *)vddc, leakage_table);
653
654 return 0;
655}
656#endif
657
658static int vega10_patch_voltage_dependency_tables_with_lookup_table(
659 struct pp_hwmgr *hwmgr)
660{
9a5487ef
TSD
661 uint8_t entry_id, voltage_id;
662 unsigned i;
f83a9991
EH
663 struct phm_ppt_v2_information *table_info =
664 (struct phm_ppt_v2_information *)(hwmgr->pptable);
f83a9991
EH
665 struct phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table =
666 table_info->mm_dep_table;
9a5487ef
TSD
667 struct phm_ppt_v1_clock_voltage_dependency_table *mclk_table =
668 table_info->vdd_dep_on_mclk;
f83a9991 669
9a5487ef
TSD
670 for (i = 0; i < 6; i++) {
671 struct phm_ppt_v1_clock_voltage_dependency_table *vdt;
672 switch (i) {
673 case 0: vdt = table_info->vdd_dep_on_socclk; break;
674 case 1: vdt = table_info->vdd_dep_on_sclk; break;
675 case 2: vdt = table_info->vdd_dep_on_dcefclk; break;
676 case 3: vdt = table_info->vdd_dep_on_pixclk; break;
677 case 4: vdt = table_info->vdd_dep_on_dispclk; break;
678 case 5: vdt = table_info->vdd_dep_on_phyclk; break;
679 }
f83a9991 680
9a5487ef
TSD
681 for (entry_id = 0; entry_id < vdt->count; entry_id++) {
682 voltage_id = vdt->entries[entry_id].vddInd;
683 vdt->entries[entry_id].vddc =
684 table_info->vddc_lookup_table->entries[voltage_id].us_vdd;
685 }
f83a9991
EH
686 }
687
9a5487ef
TSD
688 for (entry_id = 0; entry_id < mm_table->count; ++entry_id) {
689 voltage_id = mm_table->entries[entry_id].vddcInd;
690 mm_table->entries[entry_id].vddc =
691 table_info->vddc_lookup_table->entries[voltage_id].us_vdd;
f83a9991
EH
692 }
693
694 for (entry_id = 0; entry_id < mclk_table->count; ++entry_id) {
695 voltage_id = mclk_table->entries[entry_id].vddInd;
696 mclk_table->entries[entry_id].vddc =
697 table_info->vddc_lookup_table->entries[voltage_id].us_vdd;
698 voltage_id = mclk_table->entries[entry_id].vddciInd;
699 mclk_table->entries[entry_id].vddci =
700 table_info->vddci_lookup_table->entries[voltage_id].us_vdd;
701 voltage_id = mclk_table->entries[entry_id].mvddInd;
702 mclk_table->entries[entry_id].mvdd =
703 table_info->vddmem_lookup_table->entries[voltage_id].us_vdd;
704 }
705
f83a9991
EH
706
707 return 0;
708
709}
710
711static int vega10_sort_lookup_table(struct pp_hwmgr *hwmgr,
712 struct phm_ppt_v1_voltage_lookup_table *lookup_table)
713{
714 uint32_t table_size, i, j;
715 struct phm_ppt_v1_voltage_lookup_record tmp_voltage_lookup_record;
716
717 PP_ASSERT_WITH_CODE(lookup_table && lookup_table->count,
718 "Lookup table is empty", return -EINVAL);
719
720 table_size = lookup_table->count;
721
722 /* Sorting voltages */
723 for (i = 0; i < table_size - 1; i++) {
724 for (j = i + 1; j > 0; j--) {
725 if (lookup_table->entries[j].us_vdd <
726 lookup_table->entries[j - 1].us_vdd) {
727 tmp_voltage_lookup_record = lookup_table->entries[j - 1];
728 lookup_table->entries[j - 1] = lookup_table->entries[j];
729 lookup_table->entries[j] = tmp_voltage_lookup_record;
730 }
731 }
732 }
733
734 return 0;
735}
736
737static int vega10_complete_dependency_tables(struct pp_hwmgr *hwmgr)
738{
739 int result = 0;
740 int tmp_result;
741 struct phm_ppt_v2_information *table_info =
742 (struct phm_ppt_v2_information *)(hwmgr->pptable);
743#ifdef PPLIB_VEGA10_EVV_SUPPORT
690dc626 744 struct vega10_hwmgr *data = hwmgr->backend;
f83a9991
EH
745
746 tmp_result = vega10_patch_lookup_table_with_leakage(hwmgr,
747 table_info->vddc_lookup_table, &(data->vddc_leakage));
748 if (tmp_result)
749 result = tmp_result;
750
751 tmp_result = vega10_patch_clock_voltage_limits_with_vddc_leakage(hwmgr,
752 &(data->vddc_leakage), &table_info->max_clock_voltage_on_dc.vddc);
753 if (tmp_result)
754 result = tmp_result;
755#endif
756
757 tmp_result = vega10_patch_voltage_dependency_tables_with_lookup_table(hwmgr);
758 if (tmp_result)
759 result = tmp_result;
760
761 tmp_result = vega10_sort_lookup_table(hwmgr, table_info->vddc_lookup_table);
762 if (tmp_result)
763 result = tmp_result;
764
765 return result;
766}
767
768static int vega10_set_private_data_based_on_pptable(struct pp_hwmgr *hwmgr)
769{
770 struct phm_ppt_v2_information *table_info =
771 (struct phm_ppt_v2_information *)(hwmgr->pptable);
772 struct phm_ppt_v1_clock_voltage_dependency_table *allowed_sclk_vdd_table =
773 table_info->vdd_dep_on_socclk;
774 struct phm_ppt_v1_clock_voltage_dependency_table *allowed_mclk_vdd_table =
775 table_info->vdd_dep_on_mclk;
776
777 PP_ASSERT_WITH_CODE(allowed_sclk_vdd_table,
4f42a2dd 778 "VDD dependency on SCLK table is missing. This table is mandatory", return -EINVAL);
f83a9991 779 PP_ASSERT_WITH_CODE(allowed_sclk_vdd_table->count >= 1,
4f42a2dd 780 "VDD dependency on SCLK table is empty. This table is mandatory", return -EINVAL);
f83a9991
EH
781
782 PP_ASSERT_WITH_CODE(allowed_mclk_vdd_table,
4f42a2dd 783 "VDD dependency on MCLK table is missing. This table is mandatory", return -EINVAL);
f83a9991 784 PP_ASSERT_WITH_CODE(allowed_mclk_vdd_table->count >= 1,
4f42a2dd 785 "VDD dependency on MCLK table is empty. This table is mandatory", return -EINVAL);
f83a9991
EH
786
787 table_info->max_clock_voltage_on_ac.sclk =
788 allowed_sclk_vdd_table->entries[allowed_sclk_vdd_table->count - 1].clk;
789 table_info->max_clock_voltage_on_ac.mclk =
790 allowed_mclk_vdd_table->entries[allowed_mclk_vdd_table->count - 1].clk;
791 table_info->max_clock_voltage_on_ac.vddc =
792 allowed_sclk_vdd_table->entries[allowed_sclk_vdd_table->count - 1].vddc;
793 table_info->max_clock_voltage_on_ac.vddci =
794 allowed_mclk_vdd_table->entries[allowed_mclk_vdd_table->count - 1].vddci;
795
796 hwmgr->dyn_state.max_clock_voltage_on_ac.sclk =
797 table_info->max_clock_voltage_on_ac.sclk;
798 hwmgr->dyn_state.max_clock_voltage_on_ac.mclk =
799 table_info->max_clock_voltage_on_ac.mclk;
800 hwmgr->dyn_state.max_clock_voltage_on_ac.vddc =
801 table_info->max_clock_voltage_on_ac.vddc;
802 hwmgr->dyn_state.max_clock_voltage_on_ac.vddci =
803 table_info->max_clock_voltage_on_ac.vddci;
804
805 return 0;
806}
807
808static int vega10_hwmgr_backend_fini(struct pp_hwmgr *hwmgr)
809{
810 kfree(hwmgr->dyn_state.vddc_dep_on_dal_pwrl);
811 hwmgr->dyn_state.vddc_dep_on_dal_pwrl = NULL;
812
813 kfree(hwmgr->backend);
814 hwmgr->backend = NULL;
815
816 return 0;
817}
818
819static int vega10_hwmgr_backend_init(struct pp_hwmgr *hwmgr)
820{
821 int result = 0;
822 struct vega10_hwmgr *data;
823 uint32_t config_telemetry = 0;
824 struct pp_atomfwctrl_voltage_table vol_table;
ada6770e 825 struct amdgpu_device *adev = hwmgr->adev;
f83a9991
EH
826
827 data = kzalloc(sizeof(struct vega10_hwmgr), GFP_KERNEL);
828 if (data == NULL)
829 return -ENOMEM;
830
831 hwmgr->backend = data;
832
c27c9778
EQ
833 hwmgr->workload_mask = 1 << hwmgr->workload_prority[PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT];
834 hwmgr->power_profile_mode = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT;
835 hwmgr->default_power_profile_mode = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT;
6390258a 836
f83a9991 837 vega10_set_default_registry_data(hwmgr);
f83a9991 838 data->disable_dpm_mask = 0xff;
f83a9991
EH
839
840 /* need to set voltage control types before EVV patching */
841 data->vddc_control = VEGA10_VOLTAGE_CONTROL_NONE;
842 data->mvdd_control = VEGA10_VOLTAGE_CONTROL_NONE;
843 data->vddci_control = VEGA10_VOLTAGE_CONTROL_NONE;
844
845 /* VDDCR_SOC */
846 if (pp_atomfwctrl_is_voltage_controlled_by_gpio_v4(hwmgr,
847 VOLTAGE_TYPE_VDDC, VOLTAGE_OBJ_SVID2)) {
848 if (!pp_atomfwctrl_get_voltage_table_v4(hwmgr,
849 VOLTAGE_TYPE_VDDC, VOLTAGE_OBJ_SVID2,
850 &vol_table)) {
851 config_telemetry = ((vol_table.telemetry_slope << 8) & 0xff00) |
852 (vol_table.telemetry_offset & 0xff);
853 data->vddc_control = VEGA10_VOLTAGE_CONTROL_BY_SVID2;
854 }
855 } else {
856 kfree(hwmgr->backend);
857 hwmgr->backend = NULL;
858 PP_ASSERT_WITH_CODE(false,
859 "VDDCR_SOC is not SVID2!",
860 return -1);
861 }
862
863 /* MVDDC */
864 if (pp_atomfwctrl_is_voltage_controlled_by_gpio_v4(hwmgr,
865 VOLTAGE_TYPE_MVDDC, VOLTAGE_OBJ_SVID2)) {
866 if (!pp_atomfwctrl_get_voltage_table_v4(hwmgr,
867 VOLTAGE_TYPE_MVDDC, VOLTAGE_OBJ_SVID2,
868 &vol_table)) {
869 config_telemetry |=
870 ((vol_table.telemetry_slope << 24) & 0xff000000) |
871 ((vol_table.telemetry_offset << 16) & 0xff0000);
872 data->mvdd_control = VEGA10_VOLTAGE_CONTROL_BY_SVID2;
873 }
874 }
875
876 /* VDDCI_MEM */
dd5a6fe2 877 if (PP_CAP(PHM_PlatformCaps_ControlVDDCI)) {
f83a9991
EH
878 if (pp_atomfwctrl_is_voltage_controlled_by_gpio_v4(hwmgr,
879 VOLTAGE_TYPE_VDDCI, VOLTAGE_OBJ_GPIO_LUT))
880 data->vddci_control = VEGA10_VOLTAGE_CONTROL_BY_GPIO;
881 }
882
883 data->config_telemetry = config_telemetry;
884
885 vega10_set_features_platform_caps(hwmgr);
886
887 vega10_init_dpm_defaults(hwmgr);
888
889#ifdef PPLIB_VEGA10_EVV_SUPPORT
890 /* Get leakage voltage based on leakage ID. */
891 PP_ASSERT_WITH_CODE(!vega10_get_evv_voltages(hwmgr),
892 "Get EVV Voltage Failed. Abort Driver loading!",
893 return -1);
894#endif
895
896 /* Patch our voltage dependency table with actual leakage voltage
897 * We need to perform leakage translation before it's used by other functions
898 */
899 vega10_complete_dependency_tables(hwmgr);
900
901 /* Parse pptable data read from VBIOS */
902 vega10_set_private_data_based_on_pptable(hwmgr);
903
904 data->is_tlu_enabled = false;
905
906 hwmgr->platform_descriptor.hardwareActivityPerformanceLevels =
907 VEGA10_MAX_HARDWARE_POWERLEVELS;
908 hwmgr->platform_descriptor.hardwarePerformanceLevels = 2;
909 hwmgr->platform_descriptor.minimumClocksReductionPercentage = 50;
910
911 hwmgr->platform_descriptor.vbiosInterruptId = 0x20000400; /* IRQ_SOURCE1_SW_INT */
912 /* The true clock step depends on the frequency, typically 4.5 or 9 MHz. Here we use 5. */
913 hwmgr->platform_descriptor.clockStep.engineClock = 500;
914 hwmgr->platform_descriptor.clockStep.memoryClock = 500;
915
ada6770e 916 data->total_active_cus = adev->gfx.cu_info.number;
f83a9991
EH
917 /* Setup default Overdrive Fan control settings */
918 data->odn_fan_table.target_fan_speed =
919 hwmgr->thermal_controller.advanceFanControlParameters.usMaxFanRPM;
920 data->odn_fan_table.target_temperature =
921 hwmgr->thermal_controller.
922 advanceFanControlParameters.ucTargetTemperature;
923 data->odn_fan_table.min_performance_clock =
924 hwmgr->thermal_controller.advanceFanControlParameters.
925 ulMinFanSCLKAcousticLimit;
926 data->odn_fan_table.min_fan_limit =
927 hwmgr->thermal_controller.
928 advanceFanControlParameters.usFanPWMMinLimit *
929 hwmgr->thermal_controller.fanInfo.ulMaxRPM / 100;
930
b8a55591 931 data->mem_channels = (RREG32_SOC15(DF, 0, mmDF_CS_AON0_DramBaseAddress0) &
451cc55d
RZ
932 DF_CS_AON0_DramBaseAddress0__IntLvNumChan_MASK) >>
933 DF_CS_AON0_DramBaseAddress0__IntLvNumChan__SHIFT;
934 PP_ASSERT_WITH_CODE(data->mem_channels < ARRAY_SIZE(channel_number),
935 "Mem Channel Index Exceeded maximum!",
936 return -EINVAL);
937
f83a9991
EH
938 return result;
939}
940
941static int vega10_init_sclk_threshold(struct pp_hwmgr *hwmgr)
942{
690dc626 943 struct vega10_hwmgr *data = hwmgr->backend;
f83a9991
EH
944
945 data->low_sclk_interrupt_threshold = 0;
946
947 return 0;
948}
949
950static int vega10_setup_dpm_led_config(struct pp_hwmgr *hwmgr)
951{
690dc626 952 struct vega10_hwmgr *data = hwmgr->backend;
f83a9991
EH
953 PPTable_t *pp_table = &(data->smc_state_table.pp_table);
954
955 struct pp_atomfwctrl_voltage_table table;
956 uint8_t i, j;
957 uint32_t mask = 0;
958 uint32_t tmp;
959 int32_t ret = 0;
960
961 ret = pp_atomfwctrl_get_voltage_table_v4(hwmgr, VOLTAGE_TYPE_LEDDPM,
962 VOLTAGE_OBJ_GPIO_LUT, &table);
963
964 if (!ret) {
965 tmp = table.mask_low;
966 for (i = 0, j = 0; i < 32; i++) {
967 if (tmp & 1) {
968 mask |= (uint32_t)(i << (8 * j));
969 if (++j >= 3)
970 break;
971 }
972 tmp >>= 1;
973 }
974 }
975
976 pp_table->LedPin0 = (uint8_t)(mask & 0xff);
977 pp_table->LedPin1 = (uint8_t)((mask >> 8) & 0xff);
978 pp_table->LedPin2 = (uint8_t)((mask >> 16) & 0xff);
979 return 0;
980}
981
982static int vega10_setup_asic_task(struct pp_hwmgr *hwmgr)
983{
984 PP_ASSERT_WITH_CODE(!vega10_init_sclk_threshold(hwmgr),
985 "Failed to init sclk threshold!",
986 return -EINVAL);
987
988 PP_ASSERT_WITH_CODE(!vega10_setup_dpm_led_config(hwmgr),
989 "Failed to set up led dpm config!",
990 return -EINVAL);
991
e21148ec
RZ
992 smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_NumOfDisplays, 0);
993
f83a9991
EH
994 return 0;
995}
996
f83a9991
EH
997/**
998* Remove repeated voltage values and create table with unique values.
999*
1000* @param hwmgr the address of the powerplay hardware manager.
1001* @param vol_table the pointer to changing voltage table
1002* @return 0 in success
1003*/
1004
1005static int vega10_trim_voltage_table(struct pp_hwmgr *hwmgr,
1006 struct pp_atomfwctrl_voltage_table *vol_table)
1007{
1008 uint32_t i, j;
1009 uint16_t vvalue;
1010 bool found = false;
1011 struct pp_atomfwctrl_voltage_table *table;
1012
1013 PP_ASSERT_WITH_CODE(vol_table,
1014 "Voltage Table empty.", return -EINVAL);
1015 table = kzalloc(sizeof(struct pp_atomfwctrl_voltage_table),
1016 GFP_KERNEL);
1017
1018 if (!table)
1019 return -ENOMEM;
1020
1021 table->mask_low = vol_table->mask_low;
1022 table->phase_delay = vol_table->phase_delay;
1023
1024 for (i = 0; i < vol_table->count; i++) {
1025 vvalue = vol_table->entries[i].value;
1026 found = false;
1027
1028 for (j = 0; j < table->count; j++) {
1029 if (vvalue == table->entries[j].value) {
1030 found = true;
1031 break;
1032 }
1033 }
1034
1035 if (!found) {
1036 table->entries[table->count].value = vvalue;
1037 table->entries[table->count].smio_low =
1038 vol_table->entries[i].smio_low;
1039 table->count++;
1040 }
1041 }
1042
1043 memcpy(vol_table, table, sizeof(struct pp_atomfwctrl_voltage_table));
1044 kfree(table);
1045
1046 return 0;
1047}
1048
1049static int vega10_get_mvdd_voltage_table(struct pp_hwmgr *hwmgr,
1050 phm_ppt_v1_clock_voltage_dependency_table *dep_table,
1051 struct pp_atomfwctrl_voltage_table *vol_table)
1052{
1053 int i;
1054
1055 PP_ASSERT_WITH_CODE(dep_table->count,
1056 "Voltage Dependency Table empty.",
1057 return -EINVAL);
1058
1059 vol_table->mask_low = 0;
1060 vol_table->phase_delay = 0;
1061 vol_table->count = dep_table->count;
1062
1063 for (i = 0; i < vol_table->count; i++) {
1064 vol_table->entries[i].value = dep_table->entries[i].mvdd;
1065 vol_table->entries[i].smio_low = 0;
1066 }
1067
1068 PP_ASSERT_WITH_CODE(!vega10_trim_voltage_table(hwmgr,
1069 vol_table),
1070 "Failed to trim MVDD Table!",
1071 return -1);
1072
1073 return 0;
1074}
1075
1076static int vega10_get_vddci_voltage_table(struct pp_hwmgr *hwmgr,
1077 phm_ppt_v1_clock_voltage_dependency_table *dep_table,
1078 struct pp_atomfwctrl_voltage_table *vol_table)
1079{
1080 uint32_t i;
1081
1082 PP_ASSERT_WITH_CODE(dep_table->count,
1083 "Voltage Dependency Table empty.",
1084 return -EINVAL);
1085
1086 vol_table->mask_low = 0;
1087 vol_table->phase_delay = 0;
1088 vol_table->count = dep_table->count;
1089
1090 for (i = 0; i < dep_table->count; i++) {
1091 vol_table->entries[i].value = dep_table->entries[i].vddci;
1092 vol_table->entries[i].smio_low = 0;
1093 }
1094
1095 PP_ASSERT_WITH_CODE(!vega10_trim_voltage_table(hwmgr, vol_table),
1096 "Failed to trim VDDCI table.",
1097 return -1);
1098
1099 return 0;
1100}
1101
1102static int vega10_get_vdd_voltage_table(struct pp_hwmgr *hwmgr,
1103 phm_ppt_v1_clock_voltage_dependency_table *dep_table,
1104 struct pp_atomfwctrl_voltage_table *vol_table)
1105{
1106 int i;
1107
1108 PP_ASSERT_WITH_CODE(dep_table->count,
1109 "Voltage Dependency Table empty.",
1110 return -EINVAL);
1111
1112 vol_table->mask_low = 0;
1113 vol_table->phase_delay = 0;
1114 vol_table->count = dep_table->count;
1115
1116 for (i = 0; i < vol_table->count; i++) {
1117 vol_table->entries[i].value = dep_table->entries[i].vddc;
1118 vol_table->entries[i].smio_low = 0;
1119 }
1120
1121 return 0;
1122}
1123
1124/* ---- Voltage Tables ----
1125 * If the voltage table would be bigger than
1126 * what will fit into the state table on
1127 * the SMC keep only the higher entries.
1128 */
1129static void vega10_trim_voltage_table_to_fit_state_table(
1130 struct pp_hwmgr *hwmgr,
1131 uint32_t max_vol_steps,
1132 struct pp_atomfwctrl_voltage_table *vol_table)
1133{
1134 unsigned int i, diff;
1135
1136 if (vol_table->count <= max_vol_steps)
1137 return;
1138
1139 diff = vol_table->count - max_vol_steps;
1140
1141 for (i = 0; i < max_vol_steps; i++)
1142 vol_table->entries[i] = vol_table->entries[i + diff];
1143
1144 vol_table->count = max_vol_steps;
1145}
1146
1147/**
1148* Create Voltage Tables.
1149*
1150* @param hwmgr the address of the powerplay hardware manager.
1151* @return always 0
1152*/
1153static int vega10_construct_voltage_tables(struct pp_hwmgr *hwmgr)
1154{
690dc626 1155 struct vega10_hwmgr *data = hwmgr->backend;
f83a9991
EH
1156 struct phm_ppt_v2_information *table_info =
1157 (struct phm_ppt_v2_information *)hwmgr->pptable;
1158 int result;
1159
1160 if (data->mvdd_control == VEGA10_VOLTAGE_CONTROL_BY_SVID2 ||
1161 data->mvdd_control == VEGA10_VOLTAGE_CONTROL_NONE) {
1162 result = vega10_get_mvdd_voltage_table(hwmgr,
1163 table_info->vdd_dep_on_mclk,
1164 &(data->mvdd_voltage_table));
1165 PP_ASSERT_WITH_CODE(!result,
1166 "Failed to retrieve MVDDC table!",
1167 return result);
1168 }
1169
1170 if (data->vddci_control == VEGA10_VOLTAGE_CONTROL_NONE) {
1171 result = vega10_get_vddci_voltage_table(hwmgr,
1172 table_info->vdd_dep_on_mclk,
1173 &(data->vddci_voltage_table));
1174 PP_ASSERT_WITH_CODE(!result,
1175 "Failed to retrieve VDDCI_MEM table!",
1176 return result);
1177 }
1178
1179 if (data->vddc_control == VEGA10_VOLTAGE_CONTROL_BY_SVID2 ||
1180 data->vddc_control == VEGA10_VOLTAGE_CONTROL_NONE) {
1181 result = vega10_get_vdd_voltage_table(hwmgr,
1182 table_info->vdd_dep_on_sclk,
1183 &(data->vddc_voltage_table));
1184 PP_ASSERT_WITH_CODE(!result,
1185 "Failed to retrieve VDDCR_SOC table!",
1186 return result);
1187 }
1188
1189 PP_ASSERT_WITH_CODE(data->vddc_voltage_table.count <= 16,
1190 "Too many voltage values for VDDC. Trimming to fit state table.",
1191 vega10_trim_voltage_table_to_fit_state_table(hwmgr,
1192 16, &(data->vddc_voltage_table)));
1193
1194 PP_ASSERT_WITH_CODE(data->vddci_voltage_table.count <= 16,
1195 "Too many voltage values for VDDCI. Trimming to fit state table.",
1196 vega10_trim_voltage_table_to_fit_state_table(hwmgr,
1197 16, &(data->vddci_voltage_table)));
1198
1199 PP_ASSERT_WITH_CODE(data->mvdd_voltage_table.count <= 16,
1200 "Too many voltage values for MVDD. Trimming to fit state table.",
1201 vega10_trim_voltage_table_to_fit_state_table(hwmgr,
1202 16, &(data->mvdd_voltage_table)));
1203
1204
1205 return 0;
1206}
1207
1208/*
1209 * @fn vega10_init_dpm_state
1210 * @brief Function to initialize all Soft Min/Max and Hard Min/Max to 0xff.
1211 *
1212 * @param dpm_state - the address of the DPM Table to initiailize.
1213 * @return None.
1214 */
1215static void vega10_init_dpm_state(struct vega10_dpm_state *dpm_state)
1216{
1217 dpm_state->soft_min_level = 0xff;
1218 dpm_state->soft_max_level = 0xff;
1219 dpm_state->hard_min_level = 0xff;
1220 dpm_state->hard_max_level = 0xff;
1221}
1222
1223static void vega10_setup_default_single_dpm_table(struct pp_hwmgr *hwmgr,
1224 struct vega10_single_dpm_table *dpm_table,
1225 struct phm_ppt_v1_clock_voltage_dependency_table *dep_table)
1226{
1227 int i;
1228
658b9391
RZ
1229 dpm_table->count = 0;
1230
f83a9991 1231 for (i = 0; i < dep_table->count; i++) {
b7a1f0e3 1232 if (i == 0 || dpm_table->dpm_levels[dpm_table->count - 1].value <=
f83a9991
EH
1233 dep_table->entries[i].clk) {
1234 dpm_table->dpm_levels[dpm_table->count].value =
1235 dep_table->entries[i].clk;
1236 dpm_table->dpm_levels[dpm_table->count].enabled = true;
1237 dpm_table->count++;
1238 }
1239 }
1240}
1241static int vega10_setup_default_pcie_table(struct pp_hwmgr *hwmgr)
1242{
690dc626 1243 struct vega10_hwmgr *data = hwmgr->backend;
f83a9991
EH
1244 struct vega10_pcie_table *pcie_table = &(data->dpm_table.pcie_table);
1245 struct phm_ppt_v2_information *table_info =
1246 (struct phm_ppt_v2_information *)(hwmgr->pptable);
1247 struct phm_ppt_v1_pcie_table *bios_pcie_table =
1248 table_info->pcie_table;
1249 uint32_t i;
1250
1251 PP_ASSERT_WITH_CODE(bios_pcie_table->count,
1252 "Incorrect number of PCIE States from VBIOS!",
1253 return -1);
1254
b6dc60cf 1255 for (i = 0; i < NUM_LINK_LEVELS; i++) {
f83a9991
EH
1256 if (data->registry_data.pcieSpeedOverride)
1257 pcie_table->pcie_gen[i] =
1258 data->registry_data.pcieSpeedOverride;
1259 else
1260 pcie_table->pcie_gen[i] =
1261 bios_pcie_table->entries[i].gen_speed;
1262
1263 if (data->registry_data.pcieLaneOverride)
676b4087
RZ
1264 pcie_table->pcie_lane[i] = (uint8_t)encode_pcie_lane_width(
1265 data->registry_data.pcieLaneOverride);
f83a9991 1266 else
676b4087
RZ
1267 pcie_table->pcie_lane[i] = (uint8_t)encode_pcie_lane_width(
1268 bios_pcie_table->entries[i].lane_width);
f83a9991
EH
1269 if (data->registry_data.pcieClockOverride)
1270 pcie_table->lclk[i] =
1271 data->registry_data.pcieClockOverride;
1272 else
1273 pcie_table->lclk[i] =
1274 bios_pcie_table->entries[i].pcie_sclk;
f83a9991
EH
1275 }
1276
00c4855e 1277 pcie_table->count = NUM_LINK_LEVELS;
f83a9991
EH
1278
1279 return 0;
1280}
1281
1282/*
1283 * This function is to initialize all DPM state tables
1284 * for SMU based on the dependency table.
1285 * Dynamic state patching function will then trim these
1286 * state tables to the allowed range based
1287 * on the power policy or external client requests,
1288 * such as UVD request, etc.
1289 */
1290static int vega10_setup_default_dpm_tables(struct pp_hwmgr *hwmgr)
1291{
690dc626 1292 struct vega10_hwmgr *data = hwmgr->backend;
f83a9991
EH
1293 struct phm_ppt_v2_information *table_info =
1294 (struct phm_ppt_v2_information *)(hwmgr->pptable);
1295 struct vega10_single_dpm_table *dpm_table;
1296 uint32_t i;
1297
1298 struct phm_ppt_v1_clock_voltage_dependency_table *dep_soc_table =
1299 table_info->vdd_dep_on_socclk;
1300 struct phm_ppt_v1_clock_voltage_dependency_table *dep_gfx_table =
1301 table_info->vdd_dep_on_sclk;
1302 struct phm_ppt_v1_clock_voltage_dependency_table *dep_mclk_table =
1303 table_info->vdd_dep_on_mclk;
1304 struct phm_ppt_v1_mm_clock_voltage_dependency_table *dep_mm_table =
1305 table_info->mm_dep_table;
1306 struct phm_ppt_v1_clock_voltage_dependency_table *dep_dcef_table =
1307 table_info->vdd_dep_on_dcefclk;
1308 struct phm_ppt_v1_clock_voltage_dependency_table *dep_pix_table =
1309 table_info->vdd_dep_on_pixclk;
1310 struct phm_ppt_v1_clock_voltage_dependency_table *dep_disp_table =
1311 table_info->vdd_dep_on_dispclk;
1312 struct phm_ppt_v1_clock_voltage_dependency_table *dep_phy_table =
1313 table_info->vdd_dep_on_phyclk;
1314
1315 PP_ASSERT_WITH_CODE(dep_soc_table,
1316 "SOCCLK dependency table is missing. This table is mandatory",
1317 return -EINVAL);
1318 PP_ASSERT_WITH_CODE(dep_soc_table->count >= 1,
1319 "SOCCLK dependency table is empty. This table is mandatory",
1320 return -EINVAL);
1321
1322 PP_ASSERT_WITH_CODE(dep_gfx_table,
1323 "GFXCLK dependency table is missing. This table is mandatory",
1324 return -EINVAL);
1325 PP_ASSERT_WITH_CODE(dep_gfx_table->count >= 1,
1326 "GFXCLK dependency table is empty. This table is mandatory",
1327 return -EINVAL);
1328
1329 PP_ASSERT_WITH_CODE(dep_mclk_table,
1330 "MCLK dependency table is missing. This table is mandatory",
1331 return -EINVAL);
1332 PP_ASSERT_WITH_CODE(dep_mclk_table->count >= 1,
1333 "MCLK dependency table has to have is missing. This table is mandatory",
1334 return -EINVAL);
1335
1336 /* Initialize Sclk DPM table based on allow Sclk values */
f83a9991
EH
1337 dpm_table = &(data->dpm_table.soc_table);
1338 vega10_setup_default_single_dpm_table(hwmgr,
1339 dpm_table,
1340 dep_soc_table);
1341
1342 vega10_init_dpm_state(&(dpm_table->dpm_state));
1343
1344 dpm_table = &(data->dpm_table.gfx_table);
1345 vega10_setup_default_single_dpm_table(hwmgr,
1346 dpm_table,
1347 dep_gfx_table);
46defdd6
RZ
1348 if (hwmgr->platform_descriptor.overdriveLimit.engineClock == 0)
1349 hwmgr->platform_descriptor.overdriveLimit.engineClock =
1350 dpm_table->dpm_levels[dpm_table->count-1].value;
f83a9991
EH
1351 vega10_init_dpm_state(&(dpm_table->dpm_state));
1352
1353 /* Initialize Mclk DPM table based on allow Mclk values */
1354 data->dpm_table.mem_table.count = 0;
1355 dpm_table = &(data->dpm_table.mem_table);
1356 vega10_setup_default_single_dpm_table(hwmgr,
1357 dpm_table,
1358 dep_mclk_table);
46defdd6
RZ
1359 if (hwmgr->platform_descriptor.overdriveLimit.memoryClock == 0)
1360 hwmgr->platform_descriptor.overdriveLimit.memoryClock =
1361 dpm_table->dpm_levels[dpm_table->count-1].value;
f83a9991
EH
1362 vega10_init_dpm_state(&(dpm_table->dpm_state));
1363
1364 data->dpm_table.eclk_table.count = 0;
1365 dpm_table = &(data->dpm_table.eclk_table);
1366 for (i = 0; i < dep_mm_table->count; i++) {
1367 if (i == 0 || dpm_table->dpm_levels
b7a1f0e3 1368 [dpm_table->count - 1].value <=
f83a9991
EH
1369 dep_mm_table->entries[i].eclk) {
1370 dpm_table->dpm_levels[dpm_table->count].value =
1371 dep_mm_table->entries[i].eclk;
1372 dpm_table->dpm_levels[dpm_table->count].enabled =
1373 (i == 0) ? true : false;
1374 dpm_table->count++;
1375 }
1376 }
1377 vega10_init_dpm_state(&(dpm_table->dpm_state));
1378
1379 data->dpm_table.vclk_table.count = 0;
1380 data->dpm_table.dclk_table.count = 0;
1381 dpm_table = &(data->dpm_table.vclk_table);
1382 for (i = 0; i < dep_mm_table->count; i++) {
1383 if (i == 0 || dpm_table->dpm_levels
b7a1f0e3 1384 [dpm_table->count - 1].value <=
f83a9991
EH
1385 dep_mm_table->entries[i].vclk) {
1386 dpm_table->dpm_levels[dpm_table->count].value =
1387 dep_mm_table->entries[i].vclk;
1388 dpm_table->dpm_levels[dpm_table->count].enabled =
1389 (i == 0) ? true : false;
1390 dpm_table->count++;
1391 }
1392 }
1393 vega10_init_dpm_state(&(dpm_table->dpm_state));
1394
1395 dpm_table = &(data->dpm_table.dclk_table);
1396 for (i = 0; i < dep_mm_table->count; i++) {
1397 if (i == 0 || dpm_table->dpm_levels
b7a1f0e3 1398 [dpm_table->count - 1].value <=
f83a9991
EH
1399 dep_mm_table->entries[i].dclk) {
1400 dpm_table->dpm_levels[dpm_table->count].value =
1401 dep_mm_table->entries[i].dclk;
1402 dpm_table->dpm_levels[dpm_table->count].enabled =
1403 (i == 0) ? true : false;
1404 dpm_table->count++;
1405 }
1406 }
1407 vega10_init_dpm_state(&(dpm_table->dpm_state));
1408
1409 /* Assume there is no headless Vega10 for now */
1410 dpm_table = &(data->dpm_table.dcef_table);
1411 vega10_setup_default_single_dpm_table(hwmgr,
1412 dpm_table,
1413 dep_dcef_table);
1414
1415 vega10_init_dpm_state(&(dpm_table->dpm_state));
1416
1417 dpm_table = &(data->dpm_table.pixel_table);
1418 vega10_setup_default_single_dpm_table(hwmgr,
1419 dpm_table,
1420 dep_pix_table);
1421
1422 vega10_init_dpm_state(&(dpm_table->dpm_state));
1423
1424 dpm_table = &(data->dpm_table.display_table);
1425 vega10_setup_default_single_dpm_table(hwmgr,
1426 dpm_table,
1427 dep_disp_table);
1428
1429 vega10_init_dpm_state(&(dpm_table->dpm_state));
1430
1431 dpm_table = &(data->dpm_table.phy_table);
1432 vega10_setup_default_single_dpm_table(hwmgr,
1433 dpm_table,
1434 dep_phy_table);
1435
1436 vega10_init_dpm_state(&(dpm_table->dpm_state));
1437
1438 vega10_setup_default_pcie_table(hwmgr);
1439
bbdf38cc
KR
1440 /* Zero out the saved copy of the CUSTOM profile
1441 * This will be checked when trying to set the profile
1442 * and will require that new values be passed in
1443 */
1444 data->custom_profile_mode[0] = 0;
1445 data->custom_profile_mode[1] = 0;
1446 data->custom_profile_mode[2] = 0;
1447 data->custom_profile_mode[3] = 0;
1448
f83a9991
EH
1449 /* save a copy of the default DPM table */
1450 memcpy(&(data->golden_dpm_table), &(data->dpm_table),
1451 sizeof(struct vega10_dpm_table));
1452
f83a9991
EH
1453 return 0;
1454}
1455
1456/*
1457 * @fn vega10_populate_ulv_state
1458 * @brief Function to provide parameters for Utral Low Voltage state to SMC.
1459 *
1460 * @param hwmgr - the address of the hardware manager.
1461 * @return Always 0.
1462 */
1463static int vega10_populate_ulv_state(struct pp_hwmgr *hwmgr)
1464{
690dc626 1465 struct vega10_hwmgr *data = hwmgr->backend;
f83a9991
EH
1466 struct phm_ppt_v2_information *table_info =
1467 (struct phm_ppt_v2_information *)(hwmgr->pptable);
1468
1469 data->smc_state_table.pp_table.UlvOffsetVid =
effa290c 1470 (uint8_t)table_info->us_ulv_voltage_offset;
f83a9991
EH
1471
1472 data->smc_state_table.pp_table.UlvSmnclkDid =
1473 (uint8_t)(table_info->us_ulv_smnclk_did);
1474 data->smc_state_table.pp_table.UlvMp1clkDid =
1475 (uint8_t)(table_info->us_ulv_mp1clk_did);
1476 data->smc_state_table.pp_table.UlvGfxclkBypass =
1477 (uint8_t)(table_info->us_ulv_gfxclk_bypass);
1478 data->smc_state_table.pp_table.UlvPhaseSheddingPsi0 =
1479 (uint8_t)(data->vddc_voltage_table.psi0_enable);
1480 data->smc_state_table.pp_table.UlvPhaseSheddingPsi1 =
1481 (uint8_t)(data->vddc_voltage_table.psi1_enable);
1482
1483 return 0;
1484}
1485
1486static int vega10_populate_single_lclk_level(struct pp_hwmgr *hwmgr,
1487 uint32_t lclock, uint8_t *curr_lclk_did)
1488{
1489 struct pp_atomfwctrl_clock_dividers_soc15 dividers;
1490
1491 PP_ASSERT_WITH_CODE(!pp_atomfwctrl_get_gpu_pll_dividers_vega10(
1492 hwmgr,
1493 COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK,
1494 lclock, &dividers),
1495 "Failed to get LCLK clock settings from VBIOS!",
1496 return -1);
1497
1498 *curr_lclk_did = dividers.ulDid;
1499
1500 return 0;
1501}
1502
1503static int vega10_populate_smc_link_levels(struct pp_hwmgr *hwmgr)
1504{
1505 int result = -1;
690dc626 1506 struct vega10_hwmgr *data = hwmgr->backend;
f83a9991
EH
1507 PPTable_t *pp_table = &(data->smc_state_table.pp_table);
1508 struct vega10_pcie_table *pcie_table =
1509 &(data->dpm_table.pcie_table);
1510 uint32_t i, j;
1511
1512 for (i = 0; i < pcie_table->count; i++) {
1513 pp_table->PcieGenSpeed[i] = pcie_table->pcie_gen[i];
1514 pp_table->PcieLaneCount[i] = pcie_table->pcie_lane[i];
1515
1516 result = vega10_populate_single_lclk_level(hwmgr,
1517 pcie_table->lclk[i], &(pp_table->LclkDid[i]));
1518 if (result) {
1519 pr_info("Populate LClock Level %d Failed!\n", i);
1520 return result;
1521 }
1522 }
1523
1524 j = i - 1;
1525 while (i < NUM_LINK_LEVELS) {
1526 pp_table->PcieGenSpeed[i] = pcie_table->pcie_gen[j];
1527 pp_table->PcieLaneCount[i] = pcie_table->pcie_lane[j];
1528
1529 result = vega10_populate_single_lclk_level(hwmgr,
1530 pcie_table->lclk[j], &(pp_table->LclkDid[i]));
1531 if (result) {
1532 pr_info("Populate LClock Level %d Failed!\n", i);
1533 return result;
1534 }
1535 i++;
1536 }
1537
1538 return result;
1539}
1540
1541/**
1542* Populates single SMC GFXSCLK structure using the provided engine clock
1543*
1544* @param hwmgr the address of the hardware manager
1545* @param gfx_clock the GFX clock to use to populate the structure.
1546* @param current_gfxclk_level location in PPTable for the SMC GFXCLK structure.
1547*/
1548
1549static int vega10_populate_single_gfx_level(struct pp_hwmgr *hwmgr,
84d43463
EQ
1550 uint32_t gfx_clock, PllSetting_t *current_gfxclk_level,
1551 uint32_t *acg_freq)
f83a9991
EH
1552{
1553 struct phm_ppt_v2_information *table_info =
1554 (struct phm_ppt_v2_information *)(hwmgr->pptable);
c5a44849 1555 struct phm_ppt_v1_clock_voltage_dependency_table *dep_on_sclk;
690dc626 1556 struct vega10_hwmgr *data = hwmgr->backend;
f83a9991 1557 struct pp_atomfwctrl_clock_dividers_soc15 dividers;
dd4e2237
EH
1558 uint32_t gfx_max_clock =
1559 hwmgr->platform_descriptor.overdriveLimit.engineClock;
1560 uint32_t i = 0;
f83a9991 1561
c5a44849 1562 if (hwmgr->od_enabled)
f83a9991 1563 dep_on_sclk = (struct phm_ppt_v1_clock_voltage_dependency_table *)
c5a44849
RZ
1564 &(data->odn_dpm_table.vdd_dep_on_sclk);
1565 else
1566 dep_on_sclk = table_info->vdd_dep_on_sclk;
f83a9991
EH
1567
1568 PP_ASSERT_WITH_CODE(dep_on_sclk,
1569 "Invalid SOC_VDD-GFX_CLK Dependency Table!",
1570 return -EINVAL);
1571
dd4e2237
EH
1572 if (data->need_update_dpm_table & DPMTABLE_OD_UPDATE_SCLK)
1573 gfx_clock = gfx_clock > gfx_max_clock ? gfx_max_clock : gfx_clock;
1574 else {
1575 for (i = 0; i < dep_on_sclk->count; i++) {
1576 if (dep_on_sclk->entries[i].clk == gfx_clock)
1577 break;
1578 }
1579 PP_ASSERT_WITH_CODE(dep_on_sclk->count > i,
1580 "Cannot find gfx_clk in SOC_VDD-GFX_CLK!",
1581 return -EINVAL);
f83a9991
EH
1582 }
1583
f83a9991
EH
1584 PP_ASSERT_WITH_CODE(!pp_atomfwctrl_get_gpu_pll_dividers_vega10(hwmgr,
1585 COMPUTE_GPUCLK_INPUT_FLAG_GFXCLK,
1586 gfx_clock, &dividers),
1587 "Failed to get GFX Clock settings from VBIOS!",
1588 return -EINVAL);
1589
1590 /* Feedback Multiplier: bit 0:8 int, bit 15:12 post_div, bit 31:16 frac */
1591 current_gfxclk_level->FbMult =
1592 cpu_to_le32(dividers.ulPll_fb_mult);
1593 /* Spread FB Multiplier bit: bit 0:8 int, bit 31:16 frac */
93480f89 1594 current_gfxclk_level->SsOn = dividers.ucPll_ss_enable;
f83a9991
EH
1595 current_gfxclk_level->SsFbMult =
1596 cpu_to_le32(dividers.ulPll_ss_fbsmult);
1597 current_gfxclk_level->SsSlewFrac =
1598 cpu_to_le16(dividers.usPll_ss_slew_frac);
1599 current_gfxclk_level->Did = (uint8_t)(dividers.ulDid);
1600
84d43463
EQ
1601 *acg_freq = gfx_clock / 100; /* 100 Khz to Mhz conversion */
1602
f83a9991
EH
1603 return 0;
1604}
1605
1606/**
1607 * @brief Populates single SMC SOCCLK structure using the provided clock.
1608 *
1609 * @param hwmgr - the address of the hardware manager.
1610 * @param soc_clock - the SOC clock to use to populate the structure.
1611 * @param current_socclk_level - location in PPTable for the SMC SOCCLK structure.
1612 * @return 0 on success..
1613 */
1614static int vega10_populate_single_soc_level(struct pp_hwmgr *hwmgr,
1615 uint32_t soc_clock, uint8_t *current_soc_did,
1616 uint8_t *current_vol_index)
1617{
c5a44849 1618 struct vega10_hwmgr *data = hwmgr->backend;
f83a9991
EH
1619 struct phm_ppt_v2_information *table_info =
1620 (struct phm_ppt_v2_information *)(hwmgr->pptable);
c5a44849 1621 struct phm_ppt_v1_clock_voltage_dependency_table *dep_on_soc;
f83a9991
EH
1622 struct pp_atomfwctrl_clock_dividers_soc15 dividers;
1623 uint32_t i;
1624
c5a44849
RZ
1625 if (hwmgr->od_enabled) {
1626 dep_on_soc = (struct phm_ppt_v1_clock_voltage_dependency_table *)
1627 &data->odn_dpm_table.vdd_dep_on_socclk;
1628 for (i = 0; i < dep_on_soc->count; i++) {
1629 if (dep_on_soc->entries[i].clk >= soc_clock)
1630 break;
1631 }
1632 } else {
1633 dep_on_soc = table_info->vdd_dep_on_socclk;
1634 for (i = 0; i < dep_on_soc->count; i++) {
1635 if (dep_on_soc->entries[i].clk == soc_clock)
1636 break;
1637 }
f83a9991 1638 }
c5a44849 1639
f83a9991
EH
1640 PP_ASSERT_WITH_CODE(dep_on_soc->count > i,
1641 "Cannot find SOC_CLK in SOC_VDD-SOC_CLK Dependency Table",
1642 return -EINVAL);
c5a44849 1643
f83a9991
EH
1644 PP_ASSERT_WITH_CODE(!pp_atomfwctrl_get_gpu_pll_dividers_vega10(hwmgr,
1645 COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK,
1646 soc_clock, &dividers),
1647 "Failed to get SOC Clock settings from VBIOS!",
1648 return -EINVAL);
1649
1650 *current_soc_did = (uint8_t)dividers.ulDid;
1651 *current_vol_index = (uint8_t)(dep_on_soc->entries[i].vddInd);
f83a9991
EH
1652 return 0;
1653}
1654
1655/**
1656* Populates all SMC SCLK levels' structure based on the trimmed allowed dpm engine clock states
1657*
1658* @param hwmgr the address of the hardware manager
1659*/
1660static int vega10_populate_all_graphic_levels(struct pp_hwmgr *hwmgr)
1661{
690dc626 1662 struct vega10_hwmgr *data = hwmgr->backend;
f83a9991
EH
1663 struct phm_ppt_v2_information *table_info =
1664 (struct phm_ppt_v2_information *)(hwmgr->pptable);
f83a9991
EH
1665 PPTable_t *pp_table = &(data->smc_state_table.pp_table);
1666 struct vega10_single_dpm_table *dpm_table = &(data->dpm_table.gfx_table);
1667 int result = 0;
1668 uint32_t i, j;
1669
1670 for (i = 0; i < dpm_table->count; i++) {
1671 result = vega10_populate_single_gfx_level(hwmgr,
1672 dpm_table->dpm_levels[i].value,
84d43463
EQ
1673 &(pp_table->GfxclkLevel[i]),
1674 &(pp_table->AcgFreqTable[i]));
f83a9991
EH
1675 if (result)
1676 return result;
1677 }
1678
1679 j = i - 1;
1680 while (i < NUM_GFXCLK_DPM_LEVELS) {
1681 result = vega10_populate_single_gfx_level(hwmgr,
1682 dpm_table->dpm_levels[j].value,
84d43463
EQ
1683 &(pp_table->GfxclkLevel[i]),
1684 &(pp_table->AcgFreqTable[i]));
f83a9991
EH
1685 if (result)
1686 return result;
1687 i++;
1688 }
1689
1690 pp_table->GfxclkSlewRate =
1691 cpu_to_le16(table_info->us_gfxclk_slew_rate);
1692
1693 dpm_table = &(data->dpm_table.soc_table);
1694 for (i = 0; i < dpm_table->count; i++) {
f83a9991
EH
1695 result = vega10_populate_single_soc_level(hwmgr,
1696 dpm_table->dpm_levels[i].value,
1697 &(pp_table->SocclkDid[i]),
1698 &(pp_table->SocDpmVoltageIndex[i]));
1699 if (result)
1700 return result;
1701 }
1702
1703 j = i - 1;
1704 while (i < NUM_SOCCLK_DPM_LEVELS) {
f83a9991
EH
1705 result = vega10_populate_single_soc_level(hwmgr,
1706 dpm_table->dpm_levels[j].value,
1707 &(pp_table->SocclkDid[i]),
1708 &(pp_table->SocDpmVoltageIndex[i]));
1709 if (result)
1710 return result;
1711 i++;
1712 }
1713
1714 return result;
1715}
1716
c5a44849
RZ
1717static void vega10_populate_vddc_soc_levels(struct pp_hwmgr *hwmgr)
1718{
1719 struct vega10_hwmgr *data = hwmgr->backend;
1720 PPTable_t *pp_table = &(data->smc_state_table.pp_table);
1721 struct phm_ppt_v2_information *table_info = hwmgr->pptable;
1722 struct phm_ppt_v1_voltage_lookup_table *vddc_lookup_table;
1723
1724 uint8_t soc_vid = 0;
1725 uint32_t i, max_vddc_level;
1726
1727 if (hwmgr->od_enabled)
1728 vddc_lookup_table = (struct phm_ppt_v1_voltage_lookup_table *)&data->odn_dpm_table.vddc_lookup_table;
1729 else
1730 vddc_lookup_table = table_info->vddc_lookup_table;
1731
1732 max_vddc_level = vddc_lookup_table->count;
1733 for (i = 0; i < max_vddc_level; i++) {
1734 soc_vid = (uint8_t)convert_to_vid(vddc_lookup_table->entries[i].us_vdd);
1735 pp_table->SocVid[i] = soc_vid;
1736 }
1737 while (i < MAX_REGULAR_DPM_NUMBER) {
1738 pp_table->SocVid[i] = soc_vid;
1739 i++;
1740 }
1741}
1742
f83a9991
EH
1743/**
1744 * @brief Populates single SMC GFXCLK structure using the provided clock.
1745 *
1746 * @param hwmgr - the address of the hardware manager.
1747 * @param mem_clock - the memory clock to use to populate the structure.
1748 * @return 0 on success..
1749 */
1750static int vega10_populate_single_memory_level(struct pp_hwmgr *hwmgr,
1751 uint32_t mem_clock, uint8_t *current_mem_vid,
1752 PllSetting_t *current_memclk_level, uint8_t *current_mem_soc_vind)
1753{
690dc626 1754 struct vega10_hwmgr *data = hwmgr->backend;
f83a9991
EH
1755 struct phm_ppt_v2_information *table_info =
1756 (struct phm_ppt_v2_information *)(hwmgr->pptable);
c5a44849 1757 struct phm_ppt_v1_clock_voltage_dependency_table *dep_on_mclk;
f83a9991 1758 struct pp_atomfwctrl_clock_dividers_soc15 dividers;
dd4e2237
EH
1759 uint32_t mem_max_clock =
1760 hwmgr->platform_descriptor.overdriveLimit.memoryClock;
1761 uint32_t i = 0;
f83a9991 1762
c5a44849 1763 if (hwmgr->od_enabled)
f83a9991 1764 dep_on_mclk = (struct phm_ppt_v1_clock_voltage_dependency_table *)
c5a44849
RZ
1765 &data->odn_dpm_table.vdd_dep_on_mclk;
1766 else
1767 dep_on_mclk = table_info->vdd_dep_on_mclk;
f83a9991
EH
1768
1769 PP_ASSERT_WITH_CODE(dep_on_mclk,
1770 "Invalid SOC_VDD-UCLK Dependency Table!",
1771 return -EINVAL);
1772
c5a44849 1773 if (data->need_update_dpm_table & DPMTABLE_OD_UPDATE_MCLK) {
dd4e2237 1774 mem_clock = mem_clock > mem_max_clock ? mem_max_clock : mem_clock;
c5a44849 1775 } else {
dd4e2237
EH
1776 for (i = 0; i < dep_on_mclk->count; i++) {
1777 if (dep_on_mclk->entries[i].clk == mem_clock)
1778 break;
1779 }
1780 PP_ASSERT_WITH_CODE(dep_on_mclk->count > i,
1781 "Cannot find UCLK in SOC_VDD-UCLK Dependency Table!",
1782 return -EINVAL);
f83a9991
EH
1783 }
1784
f83a9991
EH
1785 PP_ASSERT_WITH_CODE(!pp_atomfwctrl_get_gpu_pll_dividers_vega10(
1786 hwmgr, COMPUTE_GPUCLK_INPUT_FLAG_UCLK, mem_clock, &dividers),
1787 "Failed to get UCLK settings from VBIOS!",
1788 return -1);
1789
1790 *current_mem_vid =
1791 (uint8_t)(convert_to_vid(dep_on_mclk->entries[i].mvdd));
1792 *current_mem_soc_vind =
1793 (uint8_t)(dep_on_mclk->entries[i].vddInd);
1794 current_memclk_level->FbMult = cpu_to_le32(dividers.ulPll_fb_mult);
1795 current_memclk_level->Did = (uint8_t)(dividers.ulDid);
1796
1797 PP_ASSERT_WITH_CODE(current_memclk_level->Did >= 1,
1798 "Invalid Divider ID!",
1799 return -EINVAL);
1800
1801 return 0;
1802}
1803
1804/**
1805 * @brief Populates all SMC MCLK levels' structure based on the trimmed allowed dpm memory clock states.
1806 *
1807 * @param pHwMgr - the address of the hardware manager.
1808 * @return PP_Result_OK on success.
1809 */
1810static int vega10_populate_all_memory_levels(struct pp_hwmgr *hwmgr)
1811{
690dc626 1812 struct vega10_hwmgr *data = hwmgr->backend;
f83a9991
EH
1813 PPTable_t *pp_table = &(data->smc_state_table.pp_table);
1814 struct vega10_single_dpm_table *dpm_table =
1815 &(data->dpm_table.mem_table);
1816 int result = 0;
451cc55d 1817 uint32_t i, j;
f83a9991
EH
1818
1819 for (i = 0; i < dpm_table->count; i++) {
1820 result = vega10_populate_single_memory_level(hwmgr,
1821 dpm_table->dpm_levels[i].value,
1822 &(pp_table->MemVid[i]),
1823 &(pp_table->UclkLevel[i]),
1824 &(pp_table->MemSocVoltageIndex[i]));
1825 if (result)
1826 return result;
1827 }
1828
1829 j = i - 1;
1830 while (i < NUM_UCLK_DPM_LEVELS) {
1831 result = vega10_populate_single_memory_level(hwmgr,
1832 dpm_table->dpm_levels[j].value,
1833 &(pp_table->MemVid[i]),
1834 &(pp_table->UclkLevel[i]),
1835 &(pp_table->MemSocVoltageIndex[i]));
1836 if (result)
1837 return result;
1838 i++;
1839 }
1840
451cc55d 1841 pp_table->NumMemoryChannels = (uint16_t)(data->mem_channels);
f83a9991 1842 pp_table->MemoryChannelWidth =
451cc55d
RZ
1843 (uint16_t)(HBM_MEMORY_CHANNEL_WIDTH *
1844 channel_number[data->mem_channels]);
f83a9991
EH
1845
1846 pp_table->LowestUclkReservedForUlv =
1847 (uint8_t)(data->lowest_uclk_reserved_for_ulv);
1848
1849 return result;
1850}
1851
1852static int vega10_populate_single_display_type(struct pp_hwmgr *hwmgr,
1853 DSPCLK_e disp_clock)
1854{
690dc626 1855 struct vega10_hwmgr *data = hwmgr->backend;
f83a9991
EH
1856 PPTable_t *pp_table = &(data->smc_state_table.pp_table);
1857 struct phm_ppt_v2_information *table_info =
1858 (struct phm_ppt_v2_information *)
1859 (hwmgr->pptable);
1860 struct phm_ppt_v1_clock_voltage_dependency_table *dep_table;
1861 uint32_t i;
1862 uint16_t clk = 0, vddc = 0;
1863 uint8_t vid = 0;
1864
1865 switch (disp_clock) {
1866 case DSPCLK_DCEFCLK:
1867 dep_table = table_info->vdd_dep_on_dcefclk;
1868 break;
1869 case DSPCLK_DISPCLK:
1870 dep_table = table_info->vdd_dep_on_dispclk;
1871 break;
1872 case DSPCLK_PIXCLK:
1873 dep_table = table_info->vdd_dep_on_pixclk;
1874 break;
1875 case DSPCLK_PHYCLK:
1876 dep_table = table_info->vdd_dep_on_phyclk;
1877 break;
1878 default:
1879 return -1;
1880 }
1881
1882 PP_ASSERT_WITH_CODE(dep_table->count <= NUM_DSPCLK_LEVELS,
1883 "Number Of Entries Exceeded maximum!",
1884 return -1);
1885
1886 for (i = 0; i < dep_table->count; i++) {
1887 clk = (uint16_t)(dep_table->entries[i].clk / 100);
1888 vddc = table_info->vddc_lookup_table->
1889 entries[dep_table->entries[i].vddInd].us_vdd;
1890 vid = (uint8_t)convert_to_vid(vddc);
1891 pp_table->DisplayClockTable[disp_clock][i].Freq =
1892 cpu_to_le16(clk);
1893 pp_table->DisplayClockTable[disp_clock][i].Vid =
1894 cpu_to_le16(vid);
1895 }
1896
1897 while (i < NUM_DSPCLK_LEVELS) {
1898 pp_table->DisplayClockTable[disp_clock][i].Freq =
1899 cpu_to_le16(clk);
1900 pp_table->DisplayClockTable[disp_clock][i].Vid =
1901 cpu_to_le16(vid);
1902 i++;
1903 }
1904
1905 return 0;
1906}
1907
1908static int vega10_populate_all_display_clock_levels(struct pp_hwmgr *hwmgr)
1909{
1910 uint32_t i;
1911
1912 for (i = 0; i < DSPCLK_COUNT; i++) {
1913 PP_ASSERT_WITH_CODE(!vega10_populate_single_display_type(hwmgr, i),
1914 "Failed to populate Clock in DisplayClockTable!",
1915 return -1);
1916 }
1917
1918 return 0;
1919}
1920
1921static int vega10_populate_single_eclock_level(struct pp_hwmgr *hwmgr,
1922 uint32_t eclock, uint8_t *current_eclk_did,
1923 uint8_t *current_soc_vol)
1924{
1925 struct phm_ppt_v2_information *table_info =
1926 (struct phm_ppt_v2_information *)(hwmgr->pptable);
1927 struct phm_ppt_v1_mm_clock_voltage_dependency_table *dep_table =
1928 table_info->mm_dep_table;
1929 struct pp_atomfwctrl_clock_dividers_soc15 dividers;
1930 uint32_t i;
1931
1932 PP_ASSERT_WITH_CODE(!pp_atomfwctrl_get_gpu_pll_dividers_vega10(hwmgr,
1933 COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK,
1934 eclock, &dividers),
1935 "Failed to get ECLK clock settings from VBIOS!",
1936 return -1);
1937
1938 *current_eclk_did = (uint8_t)dividers.ulDid;
1939
1940 for (i = 0; i < dep_table->count; i++) {
1941 if (dep_table->entries[i].eclk == eclock)
1942 *current_soc_vol = dep_table->entries[i].vddcInd;
1943 }
1944
1945 return 0;
1946}
1947
1948static int vega10_populate_smc_vce_levels(struct pp_hwmgr *hwmgr)
1949{
690dc626 1950 struct vega10_hwmgr *data = hwmgr->backend;
f83a9991
EH
1951 PPTable_t *pp_table = &(data->smc_state_table.pp_table);
1952 struct vega10_single_dpm_table *dpm_table = &(data->dpm_table.eclk_table);
1953 int result = -EINVAL;
1954 uint32_t i, j;
1955
1956 for (i = 0; i < dpm_table->count; i++) {
1957 result = vega10_populate_single_eclock_level(hwmgr,
1958 dpm_table->dpm_levels[i].value,
1959 &(pp_table->EclkDid[i]),
1960 &(pp_table->VceDpmVoltageIndex[i]));
1961 if (result)
1962 return result;
1963 }
1964
1965 j = i - 1;
1966 while (i < NUM_VCE_DPM_LEVELS) {
1967 result = vega10_populate_single_eclock_level(hwmgr,
1968 dpm_table->dpm_levels[j].value,
1969 &(pp_table->EclkDid[i]),
1970 &(pp_table->VceDpmVoltageIndex[i]));
1971 if (result)
1972 return result;
1973 i++;
1974 }
1975
1976 return result;
1977}
1978
1979static int vega10_populate_single_vclock_level(struct pp_hwmgr *hwmgr,
1980 uint32_t vclock, uint8_t *current_vclk_did)
1981{
1982 struct pp_atomfwctrl_clock_dividers_soc15 dividers;
1983
1984 PP_ASSERT_WITH_CODE(!pp_atomfwctrl_get_gpu_pll_dividers_vega10(hwmgr,
1985 COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK,
1986 vclock, &dividers),
1987 "Failed to get VCLK clock settings from VBIOS!",
1988 return -EINVAL);
1989
1990 *current_vclk_did = (uint8_t)dividers.ulDid;
1991
1992 return 0;
1993}
1994
1995static int vega10_populate_single_dclock_level(struct pp_hwmgr *hwmgr,
1996 uint32_t dclock, uint8_t *current_dclk_did)
1997{
1998 struct pp_atomfwctrl_clock_dividers_soc15 dividers;
1999
2000 PP_ASSERT_WITH_CODE(!pp_atomfwctrl_get_gpu_pll_dividers_vega10(hwmgr,
2001 COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK,
2002 dclock, &dividers),
2003 "Failed to get DCLK clock settings from VBIOS!",
2004 return -EINVAL);
2005
2006 *current_dclk_did = (uint8_t)dividers.ulDid;
2007
2008 return 0;
2009}
2010
2011static int vega10_populate_smc_uvd_levels(struct pp_hwmgr *hwmgr)
2012{
690dc626 2013 struct vega10_hwmgr *data = hwmgr->backend;
f83a9991
EH
2014 PPTable_t *pp_table = &(data->smc_state_table.pp_table);
2015 struct vega10_single_dpm_table *vclk_dpm_table =
2016 &(data->dpm_table.vclk_table);
2017 struct vega10_single_dpm_table *dclk_dpm_table =
2018 &(data->dpm_table.dclk_table);
2019 struct phm_ppt_v2_information *table_info =
2020 (struct phm_ppt_v2_information *)(hwmgr->pptable);
2021 struct phm_ppt_v1_mm_clock_voltage_dependency_table *dep_table =
2022 table_info->mm_dep_table;
2023 int result = -EINVAL;
2024 uint32_t i, j;
2025
2026 for (i = 0; i < vclk_dpm_table->count; i++) {
2027 result = vega10_populate_single_vclock_level(hwmgr,
2028 vclk_dpm_table->dpm_levels[i].value,
2029 &(pp_table->VclkDid[i]));
2030 if (result)
2031 return result;
2032 }
2033
2034 j = i - 1;
2035 while (i < NUM_UVD_DPM_LEVELS) {
2036 result = vega10_populate_single_vclock_level(hwmgr,
2037 vclk_dpm_table->dpm_levels[j].value,
2038 &(pp_table->VclkDid[i]));
2039 if (result)
2040 return result;
2041 i++;
2042 }
2043
2044 for (i = 0; i < dclk_dpm_table->count; i++) {
2045 result = vega10_populate_single_dclock_level(hwmgr,
2046 dclk_dpm_table->dpm_levels[i].value,
2047 &(pp_table->DclkDid[i]));
2048 if (result)
2049 return result;
2050 }
2051
2052 j = i - 1;
2053 while (i < NUM_UVD_DPM_LEVELS) {
2054 result = vega10_populate_single_dclock_level(hwmgr,
2055 dclk_dpm_table->dpm_levels[j].value,
2056 &(pp_table->DclkDid[i]));
2057 if (result)
2058 return result;
2059 i++;
2060 }
2061
2062 for (i = 0; i < dep_table->count; i++) {
2063 if (dep_table->entries[i].vclk ==
2064 vclk_dpm_table->dpm_levels[i].value &&
2065 dep_table->entries[i].dclk ==
2066 dclk_dpm_table->dpm_levels[i].value)
2067 pp_table->UvdDpmVoltageIndex[i] =
2068 dep_table->entries[i].vddcInd;
2069 else
2070 return -1;
2071 }
2072
2073 j = i - 1;
2074 while (i < NUM_UVD_DPM_LEVELS) {
2075 pp_table->UvdDpmVoltageIndex[i] = dep_table->entries[j].vddcInd;
2076 i++;
2077 }
2078
2079 return 0;
2080}
2081
2082static int vega10_populate_clock_stretcher_table(struct pp_hwmgr *hwmgr)
2083{
690dc626 2084 struct vega10_hwmgr *data = hwmgr->backend;
f83a9991
EH
2085 PPTable_t *pp_table = &(data->smc_state_table.pp_table);
2086 struct phm_ppt_v2_information *table_info =
2087 (struct phm_ppt_v2_information *)(hwmgr->pptable);
2088 struct phm_ppt_v1_clock_voltage_dependency_table *dep_table =
2089 table_info->vdd_dep_on_sclk;
2090 uint32_t i;
2091
afc0255c 2092 for (i = 0; i < dep_table->count; i++) {
f83a9991 2093 pp_table->CksEnable[i] = dep_table->entries[i].cks_enable;
afc0255c
RZ
2094 pp_table->CksVidOffset[i] = (uint8_t)(dep_table->entries[i].cks_voffset
2095 * VOLTAGE_VID_OFFSET_SCALE2 / VOLTAGE_VID_OFFSET_SCALE1);
f83a9991
EH
2096 }
2097
2098 return 0;
2099}
2100
2101static int vega10_populate_avfs_parameters(struct pp_hwmgr *hwmgr)
2102{
690dc626 2103 struct vega10_hwmgr *data = hwmgr->backend;
f83a9991
EH
2104 PPTable_t *pp_table = &(data->smc_state_table.pp_table);
2105 struct phm_ppt_v2_information *table_info =
2106 (struct phm_ppt_v2_information *)(hwmgr->pptable);
2107 struct phm_ppt_v1_clock_voltage_dependency_table *dep_table =
2108 table_info->vdd_dep_on_sclk;
2109 struct pp_atomfwctrl_avfs_parameters avfs_params = {0};
2110 int result = 0;
2111 uint32_t i;
2112
2113 pp_table->MinVoltageVid = (uint8_t)0xff;
2114 pp_table->MaxVoltageVid = (uint8_t)0;
2115
2116 if (data->smu_features[GNLD_AVFS].supported) {
2117 result = pp_atomfwctrl_get_avfs_information(hwmgr, &avfs_params);
2118 if (!result) {
2119 pp_table->MinVoltageVid = (uint8_t)
f83a9991 2120 convert_to_vid((uint16_t)(avfs_params.ulMinVddc));
6524e494
RZ
2121 pp_table->MaxVoltageVid = (uint8_t)
2122 convert_to_vid((uint16_t)(avfs_params.ulMaxVddc));
2123
2124 pp_table->AConstant[0] = cpu_to_le32(avfs_params.ulMeanNsigmaAcontant0);
2125 pp_table->AConstant[1] = cpu_to_le32(avfs_params.ulMeanNsigmaAcontant1);
2126 pp_table->AConstant[2] = cpu_to_le32(avfs_params.ulMeanNsigmaAcontant2);
2127 pp_table->DC_tol_sigma = cpu_to_le16(avfs_params.usMeanNsigmaDcTolSigma);
2128 pp_table->Platform_mean = cpu_to_le16(avfs_params.usMeanNsigmaPlatformMean);
2129 pp_table->Platform_sigma = cpu_to_le16(avfs_params.usMeanNsigmaDcTolSigma);
2130 pp_table->PSM_Age_CompFactor = cpu_to_le16(avfs_params.usPsmAgeComfactor);
f83a9991
EH
2131
2132 pp_table->BtcGbVdroopTableCksOff.a0 =
2133 cpu_to_le32(avfs_params.ulGbVdroopTableCksoffA0);
6524e494 2134 pp_table->BtcGbVdroopTableCksOff.a0_shift = 20;
f83a9991
EH
2135 pp_table->BtcGbVdroopTableCksOff.a1 =
2136 cpu_to_le32(avfs_params.ulGbVdroopTableCksoffA1);
6524e494 2137 pp_table->BtcGbVdroopTableCksOff.a1_shift = 20;
f83a9991
EH
2138 pp_table->BtcGbVdroopTableCksOff.a2 =
2139 cpu_to_le32(avfs_params.ulGbVdroopTableCksoffA2);
6524e494
RZ
2140 pp_table->BtcGbVdroopTableCksOff.a2_shift = 20;
2141
2142 pp_table->OverrideBtcGbCksOn = avfs_params.ucEnableGbVdroopTableCkson;
2143 pp_table->BtcGbVdroopTableCksOn.a0 =
2144 cpu_to_le32(avfs_params.ulGbVdroopTableCksonA0);
2145 pp_table->BtcGbVdroopTableCksOn.a0_shift = 20;
2146 pp_table->BtcGbVdroopTableCksOn.a1 =
2147 cpu_to_le32(avfs_params.ulGbVdroopTableCksonA1);
2148 pp_table->BtcGbVdroopTableCksOn.a1_shift = 20;
2149 pp_table->BtcGbVdroopTableCksOn.a2 =
2150 cpu_to_le32(avfs_params.ulGbVdroopTableCksonA2);
2151 pp_table->BtcGbVdroopTableCksOn.a2_shift = 20;
f83a9991
EH
2152
2153 pp_table->AvfsGbCksOn.m1 =
2154 cpu_to_le32(avfs_params.ulGbFuseTableCksonM1);
2155 pp_table->AvfsGbCksOn.m2 =
040cd2d1 2156 cpu_to_le32(avfs_params.ulGbFuseTableCksonM2);
f83a9991
EH
2157 pp_table->AvfsGbCksOn.b =
2158 cpu_to_le32(avfs_params.ulGbFuseTableCksonB);
2159 pp_table->AvfsGbCksOn.m1_shift = 24;
2160 pp_table->AvfsGbCksOn.m2_shift = 12;
6524e494 2161 pp_table->AvfsGbCksOn.b_shift = 0;
f83a9991 2162
6524e494
RZ
2163 pp_table->OverrideAvfsGbCksOn =
2164 avfs_params.ucEnableGbFuseTableCkson;
f83a9991
EH
2165 pp_table->AvfsGbCksOff.m1 =
2166 cpu_to_le32(avfs_params.ulGbFuseTableCksoffM1);
2167 pp_table->AvfsGbCksOff.m2 =
040cd2d1 2168 cpu_to_le32(avfs_params.ulGbFuseTableCksoffM2);
f83a9991
EH
2169 pp_table->AvfsGbCksOff.b =
2170 cpu_to_le32(avfs_params.ulGbFuseTableCksoffB);
2171 pp_table->AvfsGbCksOff.m1_shift = 24;
2172 pp_table->AvfsGbCksOff.m2_shift = 12;
6524e494
RZ
2173 pp_table->AvfsGbCksOff.b_shift = 0;
2174
16d6e962
EH
2175 for (i = 0; i < dep_table->count; i++)
2176 pp_table->StaticVoltageOffsetVid[i] =
2177 convert_to_vid((uint8_t)(dep_table->entries[i].sclk_offset));
f83a9991
EH
2178
2179 if ((PPREGKEY_VEGA10QUADRATICEQUATION_DFLT !=
2180 data->disp_clk_quad_eqn_a) &&
2181 (PPREGKEY_VEGA10QUADRATICEQUATION_DFLT !=
2182 data->disp_clk_quad_eqn_b)) {
2183 pp_table->DisplayClock2Gfxclk[DSPCLK_DISPCLK].m1 =
2184 (int32_t)data->disp_clk_quad_eqn_a;
2185 pp_table->DisplayClock2Gfxclk[DSPCLK_DISPCLK].m2 =
6524e494 2186 (int32_t)data->disp_clk_quad_eqn_b;
f83a9991
EH
2187 pp_table->DisplayClock2Gfxclk[DSPCLK_DISPCLK].b =
2188 (int32_t)data->disp_clk_quad_eqn_c;
2189 } else {
2190 pp_table->DisplayClock2Gfxclk[DSPCLK_DISPCLK].m1 =
2191 (int32_t)avfs_params.ulDispclk2GfxclkM1;
2192 pp_table->DisplayClock2Gfxclk[DSPCLK_DISPCLK].m2 =
6524e494 2193 (int32_t)avfs_params.ulDispclk2GfxclkM2;
f83a9991
EH
2194 pp_table->DisplayClock2Gfxclk[DSPCLK_DISPCLK].b =
2195 (int32_t)avfs_params.ulDispclk2GfxclkB;
2196 }
2197
2198 pp_table->DisplayClock2Gfxclk[DSPCLK_DISPCLK].m1_shift = 24;
2199 pp_table->DisplayClock2Gfxclk[DSPCLK_DISPCLK].m2_shift = 12;
4bae05e1 2200 pp_table->DisplayClock2Gfxclk[DSPCLK_DISPCLK].b_shift = 12;
f83a9991
EH
2201
2202 if ((PPREGKEY_VEGA10QUADRATICEQUATION_DFLT !=
2203 data->dcef_clk_quad_eqn_a) &&
2204 (PPREGKEY_VEGA10QUADRATICEQUATION_DFLT !=
2205 data->dcef_clk_quad_eqn_b)) {
2206 pp_table->DisplayClock2Gfxclk[DSPCLK_DCEFCLK].m1 =
2207 (int32_t)data->dcef_clk_quad_eqn_a;
2208 pp_table->DisplayClock2Gfxclk[DSPCLK_DCEFCLK].m2 =
6524e494 2209 (int32_t)data->dcef_clk_quad_eqn_b;
f83a9991
EH
2210 pp_table->DisplayClock2Gfxclk[DSPCLK_DCEFCLK].b =
2211 (int32_t)data->dcef_clk_quad_eqn_c;
2212 } else {
2213 pp_table->DisplayClock2Gfxclk[DSPCLK_DCEFCLK].m1 =
2214 (int32_t)avfs_params.ulDcefclk2GfxclkM1;
2215 pp_table->DisplayClock2Gfxclk[DSPCLK_DCEFCLK].m2 =
6524e494 2216 (int32_t)avfs_params.ulDcefclk2GfxclkM2;
f83a9991
EH
2217 pp_table->DisplayClock2Gfxclk[DSPCLK_DCEFCLK].b =
2218 (int32_t)avfs_params.ulDcefclk2GfxclkB;
2219 }
2220
2221 pp_table->DisplayClock2Gfxclk[DSPCLK_DCEFCLK].m1_shift = 24;
2222 pp_table->DisplayClock2Gfxclk[DSPCLK_DCEFCLK].m2_shift = 12;
4bae05e1 2223 pp_table->DisplayClock2Gfxclk[DSPCLK_DCEFCLK].b_shift = 12;
f83a9991
EH
2224
2225 if ((PPREGKEY_VEGA10QUADRATICEQUATION_DFLT !=
2226 data->pixel_clk_quad_eqn_a) &&
2227 (PPREGKEY_VEGA10QUADRATICEQUATION_DFLT !=
2228 data->pixel_clk_quad_eqn_b)) {
2229 pp_table->DisplayClock2Gfxclk[DSPCLK_PIXCLK].m1 =
2230 (int32_t)data->pixel_clk_quad_eqn_a;
2231 pp_table->DisplayClock2Gfxclk[DSPCLK_PIXCLK].m2 =
6524e494 2232 (int32_t)data->pixel_clk_quad_eqn_b;
f83a9991
EH
2233 pp_table->DisplayClock2Gfxclk[DSPCLK_PIXCLK].b =
2234 (int32_t)data->pixel_clk_quad_eqn_c;
2235 } else {
2236 pp_table->DisplayClock2Gfxclk[DSPCLK_PIXCLK].m1 =
2237 (int32_t)avfs_params.ulPixelclk2GfxclkM1;
2238 pp_table->DisplayClock2Gfxclk[DSPCLK_PIXCLK].m2 =
6524e494 2239 (int32_t)avfs_params.ulPixelclk2GfxclkM2;
f83a9991
EH
2240 pp_table->DisplayClock2Gfxclk[DSPCLK_PIXCLK].b =
2241 (int32_t)avfs_params.ulPixelclk2GfxclkB;
2242 }
2243
2244 pp_table->DisplayClock2Gfxclk[DSPCLK_PIXCLK].m1_shift = 24;
2245 pp_table->DisplayClock2Gfxclk[DSPCLK_PIXCLK].m2_shift = 12;
4bae05e1 2246 pp_table->DisplayClock2Gfxclk[DSPCLK_PIXCLK].b_shift = 12;
f83a9991
EH
2247 if ((PPREGKEY_VEGA10QUADRATICEQUATION_DFLT !=
2248 data->phy_clk_quad_eqn_a) &&
2249 (PPREGKEY_VEGA10QUADRATICEQUATION_DFLT !=
2250 data->phy_clk_quad_eqn_b)) {
2251 pp_table->DisplayClock2Gfxclk[DSPCLK_PHYCLK].m1 =
2252 (int32_t)data->phy_clk_quad_eqn_a;
2253 pp_table->DisplayClock2Gfxclk[DSPCLK_PHYCLK].m2 =
6524e494 2254 (int32_t)data->phy_clk_quad_eqn_b;
f83a9991
EH
2255 pp_table->DisplayClock2Gfxclk[DSPCLK_PHYCLK].b =
2256 (int32_t)data->phy_clk_quad_eqn_c;
2257 } else {
2258 pp_table->DisplayClock2Gfxclk[DSPCLK_PHYCLK].m1 =
2259 (int32_t)avfs_params.ulPhyclk2GfxclkM1;
2260 pp_table->DisplayClock2Gfxclk[DSPCLK_PHYCLK].m2 =
6524e494 2261 (int32_t)avfs_params.ulPhyclk2GfxclkM2;
f83a9991
EH
2262 pp_table->DisplayClock2Gfxclk[DSPCLK_PHYCLK].b =
2263 (int32_t)avfs_params.ulPhyclk2GfxclkB;
2264 }
2265
2266 pp_table->DisplayClock2Gfxclk[DSPCLK_PHYCLK].m1_shift = 24;
2267 pp_table->DisplayClock2Gfxclk[DSPCLK_PHYCLK].m2_shift = 12;
4bae05e1 2268 pp_table->DisplayClock2Gfxclk[DSPCLK_PHYCLK].b_shift = 12;
bdb8cd10
RZ
2269
2270 pp_table->AcgBtcGbVdroopTable.a0 = avfs_params.ulAcgGbVdroopTableA0;
2271 pp_table->AcgBtcGbVdroopTable.a0_shift = 20;
2272 pp_table->AcgBtcGbVdroopTable.a1 = avfs_params.ulAcgGbVdroopTableA1;
2273 pp_table->AcgBtcGbVdroopTable.a1_shift = 20;
2274 pp_table->AcgBtcGbVdroopTable.a2 = avfs_params.ulAcgGbVdroopTableA2;
2275 pp_table->AcgBtcGbVdroopTable.a2_shift = 20;
2276
2277 pp_table->AcgAvfsGb.m1 = avfs_params.ulAcgGbFuseTableM1;
2278 pp_table->AcgAvfsGb.m2 = avfs_params.ulAcgGbFuseTableM2;
2279 pp_table->AcgAvfsGb.b = avfs_params.ulAcgGbFuseTableB;
4c6097ef
EQ
2280 pp_table->AcgAvfsGb.m1_shift = 24;
2281 pp_table->AcgAvfsGb.m2_shift = 12;
bdb8cd10
RZ
2282 pp_table->AcgAvfsGb.b_shift = 0;
2283
f83a9991
EH
2284 } else {
2285 data->smu_features[GNLD_AVFS].supported = false;
2286 }
2287 }
2288
2289 return 0;
2290}
2291
bdb8cd10
RZ
2292static int vega10_acg_enable(struct pp_hwmgr *hwmgr)
2293{
690dc626 2294 struct vega10_hwmgr *data = hwmgr->backend;
bdb8cd10
RZ
2295 uint32_t agc_btc_response;
2296
2297 if (data->smu_features[GNLD_ACG].supported) {
d3f8c0ab 2298 if (0 == vega10_enable_smc_features(hwmgr, true,
bdb8cd10
RZ
2299 data->smu_features[GNLD_DPM_PREFETCHER].smu_feature_bitmap))
2300 data->smu_features[GNLD_DPM_PREFETCHER].enabled = true;
2301
d3f8c0ab 2302 smum_send_msg_to_smc(hwmgr, PPSMC_MSG_InitializeAcg);
bdb8cd10 2303
d3f8c0ab 2304 smum_send_msg_to_smc(hwmgr, PPSMC_MSG_RunAcgBtc);
3f9ca14a 2305 agc_btc_response = smum_get_argument(hwmgr);
bdb8cd10
RZ
2306
2307 if (1 == agc_btc_response) {
2308 if (1 == data->acg_loop_state)
d3f8c0ab 2309 smum_send_msg_to_smc(hwmgr, PPSMC_MSG_RunAcgInClosedLoop);
bdb8cd10 2310 else if (2 == data->acg_loop_state)
d3f8c0ab
RZ
2311 smum_send_msg_to_smc(hwmgr, PPSMC_MSG_RunAcgInOpenLoop);
2312 if (0 == vega10_enable_smc_features(hwmgr, true,
bdb8cd10
RZ
2313 data->smu_features[GNLD_ACG].smu_feature_bitmap))
2314 data->smu_features[GNLD_ACG].enabled = true;
2315 } else {
2316 pr_info("[ACG_Enable] ACG BTC Returned Failed Status!\n");
2317 data->smu_features[GNLD_ACG].enabled = false;
2318 }
2319 }
2320
2321 return 0;
2322}
2323
2324static int vega10_acg_disable(struct pp_hwmgr *hwmgr)
2325{
690dc626 2326 struct vega10_hwmgr *data = hwmgr->backend;
bdb8cd10 2327
06474d56
TSD
2328 if (data->smu_features[GNLD_ACG].supported &&
2329 data->smu_features[GNLD_ACG].enabled)
d3f8c0ab 2330 if (!vega10_enable_smc_features(hwmgr, false,
06474d56 2331 data->smu_features[GNLD_ACG].smu_feature_bitmap))
bdb8cd10 2332 data->smu_features[GNLD_ACG].enabled = false;
bdb8cd10
RZ
2333
2334 return 0;
2335}
2336
f83a9991
EH
2337static int vega10_populate_gpio_parameters(struct pp_hwmgr *hwmgr)
2338{
690dc626 2339 struct vega10_hwmgr *data = hwmgr->backend;
f83a9991
EH
2340 PPTable_t *pp_table = &(data->smc_state_table.pp_table);
2341 struct pp_atomfwctrl_gpio_parameters gpio_params = {0};
2342 int result;
2343
2344 result = pp_atomfwctrl_get_gpio_information(hwmgr, &gpio_params);
2345 if (!result) {
dd5a6fe2
TSD
2346 if (PP_CAP(PHM_PlatformCaps_RegulatorHot) &&
2347 data->registry_data.regulator_hot_gpio_support) {
f83a9991
EH
2348 pp_table->VR0HotGpio = gpio_params.ucVR0HotGpio;
2349 pp_table->VR0HotPolarity = gpio_params.ucVR0HotPolarity;
2350 pp_table->VR1HotGpio = gpio_params.ucVR1HotGpio;
2351 pp_table->VR1HotPolarity = gpio_params.ucVR1HotPolarity;
2352 } else {
2353 pp_table->VR0HotGpio = 0;
2354 pp_table->VR0HotPolarity = 0;
2355 pp_table->VR1HotGpio = 0;
2356 pp_table->VR1HotPolarity = 0;
2357 }
2358
dd5a6fe2
TSD
2359 if (PP_CAP(PHM_PlatformCaps_AutomaticDCTransition) &&
2360 data->registry_data.ac_dc_switch_gpio_support) {
f83a9991
EH
2361 pp_table->AcDcGpio = gpio_params.ucAcDcGpio;
2362 pp_table->AcDcPolarity = gpio_params.ucAcDcPolarity;
2363 } else {
2364 pp_table->AcDcGpio = 0;
2365 pp_table->AcDcPolarity = 0;
2366 }
2367 }
2368
2369 return result;
2370}
2371
2372static int vega10_avfs_enable(struct pp_hwmgr *hwmgr, bool enable)
2373{
690dc626 2374 struct vega10_hwmgr *data = hwmgr->backend;
f83a9991
EH
2375
2376 if (data->smu_features[GNLD_AVFS].supported) {
319dd476
EQ
2377 /* Already enabled or disabled */
2378 if (!(enable ^ data->smu_features[GNLD_AVFS].enabled))
2379 return 0;
2380
f83a9991 2381 if (enable) {
d3f8c0ab 2382 PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr,
f83a9991
EH
2383 true,
2384 data->smu_features[GNLD_AVFS].smu_feature_bitmap),
2385 "[avfs_control] Attempt to Enable AVFS feature Failed!",
2386 return -1);
2387 data->smu_features[GNLD_AVFS].enabled = true;
2388 } else {
d3f8c0ab 2389 PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr,
f83a9991 2390 false,
de196036 2391 data->smu_features[GNLD_AVFS].smu_feature_bitmap),
f83a9991
EH
2392 "[avfs_control] Attempt to Disable AVFS feature Failed!",
2393 return -1);
2394 data->smu_features[GNLD_AVFS].enabled = false;
2395 }
2396 }
2397
2398 return 0;
2399}
2400
c5a44849
RZ
2401static int vega10_update_avfs(struct pp_hwmgr *hwmgr)
2402{
2403 struct vega10_hwmgr *data = hwmgr->backend;
2404
2405 if (data->need_update_dpm_table & DPMTABLE_OD_UPDATE_VDDC) {
2406 vega10_avfs_enable(hwmgr, false);
2407 } else if (data->need_update_dpm_table) {
2408 vega10_avfs_enable(hwmgr, false);
2409 vega10_avfs_enable(hwmgr, true);
2410 } else {
2411 vega10_avfs_enable(hwmgr, true);
2412 }
2413
2414 return 0;
2415}
2416
ab5cf3a5
RZ
2417static int vega10_populate_and_upload_avfs_fuse_override(struct pp_hwmgr *hwmgr)
2418{
2419 int result = 0;
2420
2421 uint64_t serial_number = 0;
2422 uint32_t top32, bottom32;
2423 struct phm_fuses_default fuse;
2424
690dc626 2425 struct vega10_hwmgr *data = hwmgr->backend;
ab5cf3a5
RZ
2426 AvfsFuseOverride_t *avfs_fuse_table = &(data->smc_state_table.avfs_fuse_override_table);
2427
d3f8c0ab 2428 smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ReadSerialNumTop32);
3f9ca14a 2429 top32 = smum_get_argument(hwmgr);
ab5cf3a5 2430
d3f8c0ab 2431 smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ReadSerialNumBottom32);
3f9ca14a 2432 bottom32 = smum_get_argument(hwmgr);
ab5cf3a5
RZ
2433
2434 serial_number = ((uint64_t)bottom32 << 32) | top32;
2435
819c4b94 2436 if (pp_override_get_default_fuse_value(serial_number, &fuse) == 0) {
ab5cf3a5
RZ
2437 avfs_fuse_table->VFT0_b = fuse.VFT0_b;
2438 avfs_fuse_table->VFT0_m1 = fuse.VFT0_m1;
2439 avfs_fuse_table->VFT0_m2 = fuse.VFT0_m2;
2440 avfs_fuse_table->VFT1_b = fuse.VFT1_b;
2441 avfs_fuse_table->VFT1_m1 = fuse.VFT1_m1;
2442 avfs_fuse_table->VFT1_m2 = fuse.VFT1_m2;
2443 avfs_fuse_table->VFT2_b = fuse.VFT2_b;
2444 avfs_fuse_table->VFT2_m1 = fuse.VFT2_m1;
2445 avfs_fuse_table->VFT2_m2 = fuse.VFT2_m2;
3f9ca14a
RZ
2446 result = smum_smc_table_manager(hwmgr, (uint8_t *)avfs_fuse_table,
2447 AVFSFUSETABLE, false);
ab5cf3a5
RZ
2448 PP_ASSERT_WITH_CODE(!result,
2449 "Failed to upload FuseOVerride!",
2450 );
2451 }
2452
2453 return result;
2454}
2455
ecfee95a
RZ
2456static void vega10_check_dpm_table_updated(struct pp_hwmgr *hwmgr)
2457{
2458 struct vega10_hwmgr *data = hwmgr->backend;
2459 struct vega10_odn_dpm_table *odn_table = &(data->odn_dpm_table);
2460 struct phm_ppt_v2_information *table_info = hwmgr->pptable;
2461 struct phm_ppt_v1_clock_voltage_dependency_table *dep_table;
2462 struct phm_ppt_v1_clock_voltage_dependency_table *odn_dep_table;
2463 uint32_t i;
2464
2465 dep_table = table_info->vdd_dep_on_mclk;
2466 odn_dep_table = (struct phm_ppt_v1_clock_voltage_dependency_table *)&(odn_table->vdd_dep_on_mclk);
2467
2468 for (i = 0; i < dep_table->count; i++) {
2469 if (dep_table->entries[i].vddc != odn_dep_table->entries[i].vddc) {
2470 data->need_update_dpm_table |= DPMTABLE_OD_UPDATE_VDDC | DPMTABLE_OD_UPDATE_MCLK;
2471 return;
2472 }
2473 }
2474
2475 dep_table = table_info->vdd_dep_on_sclk;
2476 odn_dep_table = (struct phm_ppt_v1_clock_voltage_dependency_table *)&(odn_table->vdd_dep_on_sclk);
2477 for (i = 0; i < dep_table->count; i++) {
2478 if (dep_table->entries[i].vddc != odn_dep_table->entries[i].vddc) {
2479 data->need_update_dpm_table |= DPMTABLE_OD_UPDATE_VDDC | DPMTABLE_OD_UPDATE_SCLK;
2480 return;
2481 }
2482 }
ecfee95a
RZ
2483}
2484
f83a9991
EH
2485/**
2486* Initializes the SMC table and uploads it
2487*
2488* @param hwmgr the address of the powerplay hardware manager.
2489* @param pInput the pointer to input data (PowerState)
2490* @return always 0
2491*/
2492static int vega10_init_smc_table(struct pp_hwmgr *hwmgr)
2493{
2494 int result;
690dc626 2495 struct vega10_hwmgr *data = hwmgr->backend;
f83a9991
EH
2496 struct phm_ppt_v2_information *table_info =
2497 (struct phm_ppt_v2_information *)(hwmgr->pptable);
2498 PPTable_t *pp_table = &(data->smc_state_table.pp_table);
2499 struct pp_atomfwctrl_voltage_table voltage_table;
05ee3215 2500 struct pp_atomfwctrl_bios_boot_up_values boot_up_values;
ecfee95a 2501 struct vega10_odn_dpm_table *odn_table = &(data->odn_dpm_table);
f83a9991
EH
2502
2503 result = vega10_setup_default_dpm_tables(hwmgr);
2504 PP_ASSERT_WITH_CODE(!result,
2505 "Failed to setup default DPM tables!",
2506 return result);
2507
c5a44849 2508 /* initialize ODN table */
ecfee95a
RZ
2509 if (hwmgr->od_enabled) {
2510 if (odn_table->max_vddc) {
2511 data->need_update_dpm_table |= DPMTABLE_OD_UPDATE_SCLK | DPMTABLE_OD_UPDATE_MCLK;
2512 vega10_check_dpm_table_updated(hwmgr);
2513 } else {
2514 vega10_odn_initial_default_setting(hwmgr);
2515 }
2516 }
c5a44849 2517
f83a9991
EH
2518 pp_atomfwctrl_get_voltage_table_v4(hwmgr, VOLTAGE_TYPE_VDDC,
2519 VOLTAGE_OBJ_SVID2, &voltage_table);
2520 pp_table->MaxVidStep = voltage_table.max_vid_step;
2521
2522 pp_table->GfxDpmVoltageMode =
2523 (uint8_t)(table_info->uc_gfx_dpm_voltage_mode);
2524 pp_table->SocDpmVoltageMode =
2525 (uint8_t)(table_info->uc_soc_dpm_voltage_mode);
2526 pp_table->UclkDpmVoltageMode =
2527 (uint8_t)(table_info->uc_uclk_dpm_voltage_mode);
2528 pp_table->UvdDpmVoltageMode =
2529 (uint8_t)(table_info->uc_uvd_dpm_voltage_mode);
2530 pp_table->VceDpmVoltageMode =
2531 (uint8_t)(table_info->uc_vce_dpm_voltage_mode);
2532 pp_table->Mp0DpmVoltageMode =
2533 (uint8_t)(table_info->uc_mp0_dpm_voltage_mode);
effa290c 2534
f83a9991
EH
2535 pp_table->DisplayDpmVoltageMode =
2536 (uint8_t)(table_info->uc_dcef_dpm_voltage_mode);
2537
becdaf3f
RZ
2538 data->vddc_voltage_table.psi0_enable = voltage_table.psi0_enable;
2539 data->vddc_voltage_table.psi1_enable = voltage_table.psi1_enable;
2540
f83a9991
EH
2541 if (data->registry_data.ulv_support &&
2542 table_info->us_ulv_voltage_offset) {
2543 result = vega10_populate_ulv_state(hwmgr);
2544 PP_ASSERT_WITH_CODE(!result,
2545 "Failed to initialize ULV state!",
2546 return result);
2547 }
2548
2549 result = vega10_populate_smc_link_levels(hwmgr);
2550 PP_ASSERT_WITH_CODE(!result,
2551 "Failed to initialize Link Level!",
2552 return result);
2553
2554 result = vega10_populate_all_graphic_levels(hwmgr);
2555 PP_ASSERT_WITH_CODE(!result,
2556 "Failed to initialize Graphics Level!",
2557 return result);
2558
2559 result = vega10_populate_all_memory_levels(hwmgr);
2560 PP_ASSERT_WITH_CODE(!result,
2561 "Failed to initialize Memory Level!",
2562 return result);
2563
c5a44849
RZ
2564 vega10_populate_vddc_soc_levels(hwmgr);
2565
f83a9991
EH
2566 result = vega10_populate_all_display_clock_levels(hwmgr);
2567 PP_ASSERT_WITH_CODE(!result,
2568 "Failed to initialize Display Level!",
2569 return result);
2570
2571 result = vega10_populate_smc_vce_levels(hwmgr);
2572 PP_ASSERT_WITH_CODE(!result,
2573 "Failed to initialize VCE Level!",
2574 return result);
2575
2576 result = vega10_populate_smc_uvd_levels(hwmgr);
2577 PP_ASSERT_WITH_CODE(!result,
2578 "Failed to initialize UVD Level!",
2579 return result);
2580
afc0255c 2581 if (data->registry_data.clock_stretcher_support) {
f83a9991
EH
2582 result = vega10_populate_clock_stretcher_table(hwmgr);
2583 PP_ASSERT_WITH_CODE(!result,
2584 "Failed to populate Clock Stretcher Table!",
2585 return result);
2586 }
2587
05ee3215
RZ
2588 result = pp_atomfwctrl_get_vbios_bootup_values(hwmgr, &boot_up_values);
2589 if (!result) {
2590 data->vbios_boot_state.vddc = boot_up_values.usVddc;
2591 data->vbios_boot_state.vddci = boot_up_values.usVddci;
2592 data->vbios_boot_state.mvddc = boot_up_values.usMvddc;
2593 data->vbios_boot_state.gfx_clock = boot_up_values.ulGfxClk;
2594 data->vbios_boot_state.mem_clock = boot_up_values.ulUClk;
f73f9e35 2595 pp_atomfwctrl_get_clk_information_by_clkid(hwmgr,
2e41a874 2596 SMU9_SYSPLL0_SOCCLK_ID, 0, &boot_up_values.ulSocClk);
f73f9e35
RZ
2597
2598 pp_atomfwctrl_get_clk_information_by_clkid(hwmgr,
2e41a874 2599 SMU9_SYSPLL0_DCEFCLK_ID, 0, &boot_up_values.ulDCEFClk);
f73f9e35 2600
05ee3215 2601 data->vbios_boot_state.soc_clock = boot_up_values.ulSocClk;
c5b053d2 2602 data->vbios_boot_state.dcef_clock = boot_up_values.ulDCEFClk;
05ee3215 2603 if (0 != boot_up_values.usVddc) {
d3f8c0ab 2604 smum_send_msg_to_smc_with_parameter(hwmgr,
05ee3215
RZ
2605 PPSMC_MSG_SetFloorSocVoltage,
2606 (boot_up_values.usVddc * 4));
2607 data->vbios_boot_state.bsoc_vddc_lock = true;
2608 } else {
2609 data->vbios_boot_state.bsoc_vddc_lock = false;
2610 }
d3f8c0ab 2611 smum_send_msg_to_smc_with_parameter(hwmgr,
c5b053d2
RZ
2612 PPSMC_MSG_SetMinDeepSleepDcefclk,
2613 (uint32_t)(data->vbios_boot_state.dcef_clock / 100));
05ee3215
RZ
2614 }
2615
f83a9991
EH
2616 result = vega10_populate_avfs_parameters(hwmgr);
2617 PP_ASSERT_WITH_CODE(!result,
2618 "Failed to initialize AVFS Parameters!",
2619 return result);
2620
2621 result = vega10_populate_gpio_parameters(hwmgr);
2622 PP_ASSERT_WITH_CODE(!result,
2623 "Failed to initialize GPIO Parameters!",
2624 return result);
2625
2626 pp_table->GfxclkAverageAlpha = (uint8_t)
2627 (data->gfxclk_average_alpha);
2628 pp_table->SocclkAverageAlpha = (uint8_t)
2629 (data->socclk_average_alpha);
2630 pp_table->UclkAverageAlpha = (uint8_t)
2631 (data->uclk_average_alpha);
2632 pp_table->GfxActivityAverageAlpha = (uint8_t)
2633 (data->gfx_activity_average_alpha);
2634
ab5cf3a5
RZ
2635 vega10_populate_and_upload_avfs_fuse_override(hwmgr);
2636
3f9ca14a
RZ
2637 result = smum_smc_table_manager(hwmgr, (uint8_t *)pp_table, PPTABLE, false);
2638
f83a9991
EH
2639 PP_ASSERT_WITH_CODE(!result,
2640 "Failed to upload PPtable!", return result);
2641
2211a787
RZ
2642 result = vega10_avfs_enable(hwmgr, true);
2643 PP_ASSERT_WITH_CODE(!result, "Attempt to enable AVFS feature Failed!",
f83a9991 2644 return result);
bdb8cd10 2645 vega10_acg_enable(hwmgr);
d6c025d2 2646
f83a9991
EH
2647 return 0;
2648}
2649
2650static int vega10_enable_thermal_protection(struct pp_hwmgr *hwmgr)
2651{
690dc626 2652 struct vega10_hwmgr *data = hwmgr->backend;
f83a9991
EH
2653
2654 if (data->smu_features[GNLD_THERMAL].supported) {
2655 if (data->smu_features[GNLD_THERMAL].enabled)
2656 pr_info("THERMAL Feature Already enabled!");
2657
2658 PP_ASSERT_WITH_CODE(
d3f8c0ab 2659 !vega10_enable_smc_features(hwmgr,
f83a9991
EH
2660 true,
2661 data->smu_features[GNLD_THERMAL].smu_feature_bitmap),
2662 "Enable THERMAL Feature Failed!",
2663 return -1);
2664 data->smu_features[GNLD_THERMAL].enabled = true;
2665 }
2666
2667 return 0;
2668}
2669
8b9242ed
RZ
2670static int vega10_disable_thermal_protection(struct pp_hwmgr *hwmgr)
2671{
690dc626 2672 struct vega10_hwmgr *data = hwmgr->backend;
8b9242ed
RZ
2673
2674 if (data->smu_features[GNLD_THERMAL].supported) {
2675 if (!data->smu_features[GNLD_THERMAL].enabled)
2676 pr_info("THERMAL Feature Already disabled!");
2677
2678 PP_ASSERT_WITH_CODE(
d3f8c0ab 2679 !vega10_enable_smc_features(hwmgr,
8b9242ed
RZ
2680 false,
2681 data->smu_features[GNLD_THERMAL].smu_feature_bitmap),
2682 "disable THERMAL Feature Failed!",
2683 return -1);
2684 data->smu_features[GNLD_THERMAL].enabled = false;
2685 }
2686
2687 return 0;
2688}
2689
f83a9991
EH
2690static int vega10_enable_vrhot_feature(struct pp_hwmgr *hwmgr)
2691{
690dc626 2692 struct vega10_hwmgr *data = hwmgr->backend;
f83a9991 2693
dd5a6fe2 2694 if (PP_CAP(PHM_PlatformCaps_RegulatorHot)) {
f83a9991
EH
2695 if (data->smu_features[GNLD_VR0HOT].supported) {
2696 PP_ASSERT_WITH_CODE(
d3f8c0ab 2697 !vega10_enable_smc_features(hwmgr,
f83a9991
EH
2698 true,
2699 data->smu_features[GNLD_VR0HOT].smu_feature_bitmap),
2700 "Attempt to Enable VR0 Hot feature Failed!",
2701 return -1);
2702 data->smu_features[GNLD_VR0HOT].enabled = true;
2703 } else {
2704 if (data->smu_features[GNLD_VR1HOT].supported) {
2705 PP_ASSERT_WITH_CODE(
d3f8c0ab 2706 !vega10_enable_smc_features(hwmgr,
f83a9991
EH
2707 true,
2708 data->smu_features[GNLD_VR1HOT].smu_feature_bitmap),
2709 "Attempt to Enable VR0 Hot feature Failed!",
2710 return -1);
2711 data->smu_features[GNLD_VR1HOT].enabled = true;
2712 }
2713 }
2714 }
2715 return 0;
2716}
2717
2718static int vega10_enable_ulv(struct pp_hwmgr *hwmgr)
2719{
690dc626 2720 struct vega10_hwmgr *data = hwmgr->backend;
f83a9991
EH
2721
2722 if (data->registry_data.ulv_support) {
d3f8c0ab 2723 PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr,
f83a9991
EH
2724 true, data->smu_features[GNLD_ULV].smu_feature_bitmap),
2725 "Enable ULV Feature Failed!",
2726 return -1);
2727 data->smu_features[GNLD_ULV].enabled = true;
2728 }
2729
2730 return 0;
2731}
2732
4022e4f2
RZ
2733static int vega10_disable_ulv(struct pp_hwmgr *hwmgr)
2734{
690dc626 2735 struct vega10_hwmgr *data = hwmgr->backend;
4022e4f2
RZ
2736
2737 if (data->registry_data.ulv_support) {
d3f8c0ab 2738 PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr,
4022e4f2
RZ
2739 false, data->smu_features[GNLD_ULV].smu_feature_bitmap),
2740 "disable ULV Feature Failed!",
2741 return -EINVAL);
2742 data->smu_features[GNLD_ULV].enabled = false;
2743 }
2744
2745 return 0;
2746}
2747
f83a9991
EH
2748static int vega10_enable_deep_sleep_master_switch(struct pp_hwmgr *hwmgr)
2749{
690dc626 2750 struct vega10_hwmgr *data = hwmgr->backend;
f83a9991
EH
2751
2752 if (data->smu_features[GNLD_DS_GFXCLK].supported) {
d3f8c0ab 2753 PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr,
f83a9991
EH
2754 true, data->smu_features[GNLD_DS_GFXCLK].smu_feature_bitmap),
2755 "Attempt to Enable DS_GFXCLK Feature Failed!",
df057e02 2756 return -EINVAL);
f83a9991
EH
2757 data->smu_features[GNLD_DS_GFXCLK].enabled = true;
2758 }
2759
2760 if (data->smu_features[GNLD_DS_SOCCLK].supported) {
d3f8c0ab 2761 PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr,
f83a9991 2762 true, data->smu_features[GNLD_DS_SOCCLK].smu_feature_bitmap),
df057e02
RZ
2763 "Attempt to Enable DS_SOCCLK Feature Failed!",
2764 return -EINVAL);
f83a9991
EH
2765 data->smu_features[GNLD_DS_SOCCLK].enabled = true;
2766 }
2767
2768 if (data->smu_features[GNLD_DS_LCLK].supported) {
d3f8c0ab 2769 PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr,
f83a9991 2770 true, data->smu_features[GNLD_DS_LCLK].smu_feature_bitmap),
df057e02
RZ
2771 "Attempt to Enable DS_LCLK Feature Failed!",
2772 return -EINVAL);
f83a9991
EH
2773 data->smu_features[GNLD_DS_LCLK].enabled = true;
2774 }
2775
df057e02 2776 if (data->smu_features[GNLD_DS_DCEFCLK].supported) {
d3f8c0ab 2777 PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr,
df057e02
RZ
2778 true, data->smu_features[GNLD_DS_DCEFCLK].smu_feature_bitmap),
2779 "Attempt to Enable DS_DCEFCLK Feature Failed!",
2780 return -EINVAL);
2781 data->smu_features[GNLD_DS_DCEFCLK].enabled = true;
2782 }
2783
2784 return 0;
2785}
2786
2787static int vega10_disable_deep_sleep_master_switch(struct pp_hwmgr *hwmgr)
2788{
690dc626 2789 struct vega10_hwmgr *data = hwmgr->backend;
df057e02
RZ
2790
2791 if (data->smu_features[GNLD_DS_GFXCLK].supported) {
d3f8c0ab 2792 PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr,
df057e02
RZ
2793 false, data->smu_features[GNLD_DS_GFXCLK].smu_feature_bitmap),
2794 "Attempt to disable DS_GFXCLK Feature Failed!",
2795 return -EINVAL);
2796 data->smu_features[GNLD_DS_GFXCLK].enabled = false;
2797 }
2798
2799 if (data->smu_features[GNLD_DS_SOCCLK].supported) {
d3f8c0ab 2800 PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr,
df057e02
RZ
2801 false, data->smu_features[GNLD_DS_SOCCLK].smu_feature_bitmap),
2802 "Attempt to disable DS_ Feature Failed!",
2803 return -EINVAL);
2804 data->smu_features[GNLD_DS_SOCCLK].enabled = false;
2805 }
2806
2807 if (data->smu_features[GNLD_DS_LCLK].supported) {
d3f8c0ab 2808 PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr,
df057e02
RZ
2809 false, data->smu_features[GNLD_DS_LCLK].smu_feature_bitmap),
2810 "Attempt to disable DS_LCLK Feature Failed!",
2811 return -EINVAL);
2812 data->smu_features[GNLD_DS_LCLK].enabled = false;
2813 }
2814
2815 if (data->smu_features[GNLD_DS_DCEFCLK].supported) {
d3f8c0ab 2816 PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr,
df057e02
RZ
2817 false, data->smu_features[GNLD_DS_DCEFCLK].smu_feature_bitmap),
2818 "Attempt to disable DS_DCEFCLK Feature Failed!",
2819 return -EINVAL);
2820 data->smu_features[GNLD_DS_DCEFCLK].enabled = false;
2821 }
2822
f83a9991
EH
2823 return 0;
2824}
2825
8b9242ed
RZ
2826static int vega10_stop_dpm(struct pp_hwmgr *hwmgr, uint32_t bitmap)
2827{
690dc626 2828 struct vega10_hwmgr *data = hwmgr->backend;
8b9242ed
RZ
2829 uint32_t i, feature_mask = 0;
2830
2831
2832 if(data->smu_features[GNLD_LED_DISPLAY].supported == true){
d3f8c0ab 2833 PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr,
f06fed92
RZ
2834 false, data->smu_features[GNLD_LED_DISPLAY].smu_feature_bitmap),
2835 "Attempt to disable LED DPM feature failed!", return -EINVAL);
2836 data->smu_features[GNLD_LED_DISPLAY].enabled = false;
8b9242ed
RZ
2837 }
2838
2839 for (i = 0; i < GNLD_DPM_MAX; i++) {
2840 if (data->smu_features[i].smu_feature_bitmap & bitmap) {
2841 if (data->smu_features[i].supported) {
2842 if (data->smu_features[i].enabled) {
2843 feature_mask |= data->smu_features[i].
2844 smu_feature_bitmap;
2845 data->smu_features[i].enabled = false;
2846 }
2847 }
2848 }
2849 }
2850
d3f8c0ab 2851 vega10_enable_smc_features(hwmgr, false, feature_mask);
8b9242ed
RZ
2852
2853 return 0;
2854}
2855
f83a9991
EH
2856/**
2857 * @brief Tell SMC to enabled the supported DPMs.
2858 *
2859 * @param hwmgr - the address of the powerplay hardware manager.
2860 * @Param bitmap - bitmap for the features to enabled.
2861 * @return 0 on at least one DPM is successfully enabled.
2862 */
2863static int vega10_start_dpm(struct pp_hwmgr *hwmgr, uint32_t bitmap)
2864{
690dc626 2865 struct vega10_hwmgr *data = hwmgr->backend;
f83a9991
EH
2866 uint32_t i, feature_mask = 0;
2867
2868 for (i = 0; i < GNLD_DPM_MAX; i++) {
2869 if (data->smu_features[i].smu_feature_bitmap & bitmap) {
2870 if (data->smu_features[i].supported) {
2871 if (!data->smu_features[i].enabled) {
2872 feature_mask |= data->smu_features[i].
2873 smu_feature_bitmap;
2874 data->smu_features[i].enabled = true;
2875 }
2876 }
2877 }
2878 }
2879
d3f8c0ab 2880 if (vega10_enable_smc_features(hwmgr,
f83a9991
EH
2881 true, feature_mask)) {
2882 for (i = 0; i < GNLD_DPM_MAX; i++) {
2883 if (data->smu_features[i].smu_feature_bitmap &
2884 feature_mask)
2885 data->smu_features[i].enabled = false;
2886 }
2887 }
2888
2889 if(data->smu_features[GNLD_LED_DISPLAY].supported == true){
d3f8c0ab 2890 PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr,
f83a9991
EH
2891 true, data->smu_features[GNLD_LED_DISPLAY].smu_feature_bitmap),
2892 "Attempt to Enable LED DPM feature Failed!", return -EINVAL);
2893 data->smu_features[GNLD_LED_DISPLAY].enabled = true;
2894 }
2895
05ee3215 2896 if (data->vbios_boot_state.bsoc_vddc_lock) {
d3f8c0ab 2897 smum_send_msg_to_smc_with_parameter(hwmgr,
05ee3215
RZ
2898 PPSMC_MSG_SetFloorSocVoltage, 0);
2899 data->vbios_boot_state.bsoc_vddc_lock = false;
2900 }
2901
dd5a6fe2 2902 if (PP_CAP(PHM_PlatformCaps_Falcon_QuickTransition)) {
f83a9991 2903 if (data->smu_features[GNLD_ACDC].supported) {
d3f8c0ab 2904 PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr,
f83a9991
EH
2905 true, data->smu_features[GNLD_ACDC].smu_feature_bitmap),
2906 "Attempt to Enable DS_GFXCLK Feature Failed!",
2907 return -1);
2908 data->smu_features[GNLD_ACDC].enabled = true;
2909 }
2910 }
2911
2912 return 0;
2913}
2914
15826fbf
RZ
2915static int vega10_enable_disable_PCC_limit_feature(struct pp_hwmgr *hwmgr, bool enable)
2916{
690dc626 2917 struct vega10_hwmgr *data = hwmgr->backend;
15826fbf
RZ
2918
2919 if (data->smu_features[GNLD_PCC_LIMIT].supported) {
2920 if (enable == data->smu_features[GNLD_PCC_LIMIT].enabled)
2921 pr_info("GNLD_PCC_LIMIT has been %s \n", enable ? "enabled" : "disabled");
2922 PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr,
2923 enable, data->smu_features[GNLD_PCC_LIMIT].smu_feature_bitmap),
2924 "Attempt to Enable PCC Limit feature Failed!",
2925 return -EINVAL);
2926 data->smu_features[GNLD_PCC_LIMIT].enabled = enable;
2927 }
2928
2929 return 0;
2930}
2931
f83a9991
EH
2932static int vega10_enable_dpm_tasks(struct pp_hwmgr *hwmgr)
2933{
690dc626 2934 struct vega10_hwmgr *data = hwmgr->backend;
f83a9991
EH
2935 int tmp_result, result = 0;
2936
15826fbf
RZ
2937 vega10_enable_disable_PCC_limit_feature(hwmgr, true);
2938
e21148ec
RZ
2939 smum_send_msg_to_smc_with_parameter(hwmgr,
2940 PPSMC_MSG_ConfigureTelemetry, data->config_telemetry);
2941
f83a9991
EH
2942 tmp_result = vega10_construct_voltage_tables(hwmgr);
2943 PP_ASSERT_WITH_CODE(!tmp_result,
3d3c4f1b 2944 "Failed to construct voltage tables!",
f83a9991
EH
2945 result = tmp_result);
2946
2947 tmp_result = vega10_init_smc_table(hwmgr);
2948 PP_ASSERT_WITH_CODE(!tmp_result,
2949 "Failed to initialize SMC table!",
2950 result = tmp_result);
2951
dd5a6fe2 2952 if (PP_CAP(PHM_PlatformCaps_ThermalController)) {
f83a9991
EH
2953 tmp_result = vega10_enable_thermal_protection(hwmgr);
2954 PP_ASSERT_WITH_CODE(!tmp_result,
2955 "Failed to enable thermal protection!",
2956 result = tmp_result);
2957 }
2958
2959 tmp_result = vega10_enable_vrhot_feature(hwmgr);
2960 PP_ASSERT_WITH_CODE(!tmp_result,
2961 "Failed to enable VR hot feature!",
2962 result = tmp_result);
2963
f83a9991
EH
2964 tmp_result = vega10_enable_deep_sleep_master_switch(hwmgr);
2965 PP_ASSERT_WITH_CODE(!tmp_result,
2966 "Failed to enable deep sleep master switch!",
2967 result = tmp_result);
2968
2969 tmp_result = vega10_start_dpm(hwmgr, SMC_DPM_FEATURES);
2970 PP_ASSERT_WITH_CODE(!tmp_result,
2971 "Failed to start DPM!", result = tmp_result);
2972
9b7b8154
EQ
2973 /* enable didt, do not abort if failed didt */
2974 tmp_result = vega10_enable_didt_config(hwmgr);
2975 PP_ASSERT(!tmp_result,
2976 "Failed to enable didt config!");
2977
f83a9991
EH
2978 tmp_result = vega10_enable_power_containment(hwmgr);
2979 PP_ASSERT_WITH_CODE(!tmp_result,
2980 "Failed to enable power containment!",
2981 result = tmp_result);
2982
2983 tmp_result = vega10_power_control_set_level(hwmgr);
2984 PP_ASSERT_WITH_CODE(!tmp_result,
2985 "Failed to power control set level!",
2986 result = tmp_result);
2987
4022e4f2
RZ
2988 tmp_result = vega10_enable_ulv(hwmgr);
2989 PP_ASSERT_WITH_CODE(!tmp_result,
2990 "Failed to enable ULV!",
2991 result = tmp_result);
2992
f83a9991
EH
2993 return result;
2994}
2995
2996static int vega10_get_power_state_size(struct pp_hwmgr *hwmgr)
2997{
2998 return sizeof(struct vega10_power_state);
2999}
3000
3001static int vega10_get_pp_table_entry_callback_func(struct pp_hwmgr *hwmgr,
3002 void *state, struct pp_power_state *power_state,
3003 void *pp_table, uint32_t classification_flag)
3004{
ebc1c9c1 3005 ATOM_Vega10_GFXCLK_Dependency_Record_V2 *patom_record_V2;
f83a9991
EH
3006 struct vega10_power_state *vega10_power_state =
3007 cast_phw_vega10_power_state(&(power_state->hardware));
3008 struct vega10_performance_level *performance_level;
3009 ATOM_Vega10_State *state_entry = (ATOM_Vega10_State *)state;
3010 ATOM_Vega10_POWERPLAYTABLE *powerplay_table =
3011 (ATOM_Vega10_POWERPLAYTABLE *)pp_table;
3012 ATOM_Vega10_SOCCLK_Dependency_Table *socclk_dep_table =
3013 (ATOM_Vega10_SOCCLK_Dependency_Table *)
3014 (((unsigned long)powerplay_table) +
3015 le16_to_cpu(powerplay_table->usSocclkDependencyTableOffset));
3016 ATOM_Vega10_GFXCLK_Dependency_Table *gfxclk_dep_table =
3017 (ATOM_Vega10_GFXCLK_Dependency_Table *)
3018 (((unsigned long)powerplay_table) +
3019 le16_to_cpu(powerplay_table->usGfxclkDependencyTableOffset));
3020 ATOM_Vega10_MCLK_Dependency_Table *mclk_dep_table =
3021 (ATOM_Vega10_MCLK_Dependency_Table *)
3022 (((unsigned long)powerplay_table) +
3023 le16_to_cpu(powerplay_table->usMclkDependencyTableOffset));
3024
3025
3026 /* The following fields are not initialized here:
3027 * id orderedList allStatesList
3028 */
3029 power_state->classification.ui_label =
3030 (le16_to_cpu(state_entry->usClassification) &
3031 ATOM_PPLIB_CLASSIFICATION_UI_MASK) >>
3032 ATOM_PPLIB_CLASSIFICATION_UI_SHIFT;
3033 power_state->classification.flags = classification_flag;
3034 /* NOTE: There is a classification2 flag in BIOS
3035 * that is not being used right now
3036 */
3037 power_state->classification.temporary_state = false;
3038 power_state->classification.to_be_deleted = false;
3039
3040 power_state->validation.disallowOnDC =
3041 ((le32_to_cpu(state_entry->ulCapsAndSettings) &
3042 ATOM_Vega10_DISALLOW_ON_DC) != 0);
3043
3044 power_state->display.disableFrameModulation = false;
3045 power_state->display.limitRefreshrate = false;
3046 power_state->display.enableVariBright =
3047 ((le32_to_cpu(state_entry->ulCapsAndSettings) &
3048 ATOM_Vega10_ENABLE_VARIBRIGHT) != 0);
3049
3050 power_state->validation.supportedPowerLevels = 0;
3051 power_state->uvd_clocks.VCLK = 0;
3052 power_state->uvd_clocks.DCLK = 0;
3053 power_state->temperatures.min = 0;
3054 power_state->temperatures.max = 0;
3055
3056 performance_level = &(vega10_power_state->performance_levels
3057 [vega10_power_state->performance_level_count++]);
3058
3059 PP_ASSERT_WITH_CODE(
3060 (vega10_power_state->performance_level_count <
3061 NUM_GFXCLK_DPM_LEVELS),
3062 "Performance levels exceeds SMC limit!",
3063 return -1);
3064
3065 PP_ASSERT_WITH_CODE(
3066 (vega10_power_state->performance_level_count <=
3067 hwmgr->platform_descriptor.
3068 hardwareActivityPerformanceLevels),
3069 "Performance levels exceeds Driver limit!",
3070 return -1);
3071
3072 /* Performance levels are arranged from low to high. */
3073 performance_level->soc_clock = socclk_dep_table->entries
3074 [state_entry->ucSocClockIndexLow].ulClk;
3075 performance_level->gfx_clock = gfxclk_dep_table->entries
3076 [state_entry->ucGfxClockIndexLow].ulClk;
3077 performance_level->mem_clock = mclk_dep_table->entries
3078 [state_entry->ucMemClockIndexLow].ulMemClk;
3079
3080 performance_level = &(vega10_power_state->performance_levels
3081 [vega10_power_state->performance_level_count++]);
f83a9991 3082 performance_level->soc_clock = socclk_dep_table->entries
ebc1c9c1
RZ
3083 [state_entry->ucSocClockIndexHigh].ulClk;
3084 if (gfxclk_dep_table->ucRevId == 0) {
3085 performance_level->gfx_clock = gfxclk_dep_table->entries
f83a9991 3086 [state_entry->ucGfxClockIndexHigh].ulClk;
ebc1c9c1
RZ
3087 } else if (gfxclk_dep_table->ucRevId == 1) {
3088 patom_record_V2 = (ATOM_Vega10_GFXCLK_Dependency_Record_V2 *)gfxclk_dep_table->entries;
3089 performance_level->gfx_clock = patom_record_V2[state_entry->ucGfxClockIndexHigh].ulClk;
3090 }
3091
f83a9991
EH
3092 performance_level->mem_clock = mclk_dep_table->entries
3093 [state_entry->ucMemClockIndexHigh].ulMemClk;
3094 return 0;
3095}
3096
3097static int vega10_get_pp_table_entry(struct pp_hwmgr *hwmgr,
3098 unsigned long entry_index, struct pp_power_state *state)
3099{
3100 int result;
3101 struct vega10_power_state *ps;
3102
3103 state->hardware.magic = PhwVega10_Magic;
3104
3105 ps = cast_phw_vega10_power_state(&state->hardware);
3106
3107 result = vega10_get_powerplay_table_entry(hwmgr, entry_index, state,
3108 vega10_get_pp_table_entry_callback_func);
3109
3110 /*
3111 * This is the earliest time we have all the dependency table
3112 * and the VBIOS boot state
3113 */
3114 /* set DC compatible flag if this state supports DC */
3115 if (!state->validation.disallowOnDC)
3116 ps->dc_compatible = true;
3117
3118 ps->uvd_clks.vclk = state->uvd_clocks.VCLK;
3119 ps->uvd_clks.dclk = state->uvd_clocks.DCLK;
3120
3121 return 0;
3122}
3123
3124static int vega10_patch_boot_state(struct pp_hwmgr *hwmgr,
3125 struct pp_hw_power_state *hw_ps)
3126{
3127 return 0;
3128}
3129
3130static int vega10_apply_state_adjust_rules(struct pp_hwmgr *hwmgr,
3131 struct pp_power_state *request_ps,
3132 const struct pp_power_state *current_ps)
3133{
600ae890 3134 struct amdgpu_device *adev = hwmgr->adev;
f83a9991
EH
3135 struct vega10_power_state *vega10_ps =
3136 cast_phw_vega10_power_state(&request_ps->hardware);
3137 uint32_t sclk;
3138 uint32_t mclk;
3139 struct PP_Clocks minimum_clocks = {0};
3140 bool disable_mclk_switching;
3141 bool disable_mclk_switching_for_frame_lock;
3142 bool disable_mclk_switching_for_vr;
3143 bool force_mclk_high;
f83a9991
EH
3144 const struct phm_clock_and_voltage_limits *max_limits;
3145 uint32_t i;
690dc626 3146 struct vega10_hwmgr *data = hwmgr->backend;
f83a9991
EH
3147 struct phm_ppt_v2_information *table_info =
3148 (struct phm_ppt_v2_information *)(hwmgr->pptable);
3149 int32_t count;
3150 uint32_t stable_pstate_sclk_dpm_percentage;
3151 uint32_t stable_pstate_sclk = 0, stable_pstate_mclk = 0;
3152 uint32_t latency;
3153
3154 data->battery_state = (PP_StateUILabel_Battery ==
3155 request_ps->classification.ui_label);
3156
3157 if (vega10_ps->performance_level_count != 2)
3158 pr_info("VI should always have 2 performance levels");
3159
600ae890 3160 max_limits = adev->pm.ac_power ?
f83a9991
EH
3161 &(hwmgr->dyn_state.max_clock_voltage_on_ac) :
3162 &(hwmgr->dyn_state.max_clock_voltage_on_dc);
3163
3164 /* Cap clock DPM tables at DC MAX if it is in DC. */
600ae890 3165 if (!adev->pm.ac_power) {
f83a9991
EH
3166 for (i = 0; i < vega10_ps->performance_level_count; i++) {
3167 if (vega10_ps->performance_levels[i].mem_clock >
3168 max_limits->mclk)
3169 vega10_ps->performance_levels[i].mem_clock =
3170 max_limits->mclk;
3171 if (vega10_ps->performance_levels[i].gfx_clock >
3172 max_limits->sclk)
3173 vega10_ps->performance_levels[i].gfx_clock =
3174 max_limits->sclk;
3175 }
3176 }
3177
f83a9991 3178 /* result = PHM_CheckVBlankTime(hwmgr, &vblankTooShort);*/
555fd70c
RZ
3179 minimum_clocks.engineClock = hwmgr->display_config->min_core_set_clock;
3180 minimum_clocks.memoryClock = hwmgr->display_config->min_mem_set_clock;
f83a9991 3181
dd5a6fe2 3182 if (PP_CAP(PHM_PlatformCaps_StablePState)) {
0d1da3c1
NI
3183 stable_pstate_sclk_dpm_percentage =
3184 data->registry_data.stable_pstate_sclk_dpm_percentage;
f83a9991
EH
3185 PP_ASSERT_WITH_CODE(
3186 data->registry_data.stable_pstate_sclk_dpm_percentage >= 1 &&
3187 data->registry_data.stable_pstate_sclk_dpm_percentage <= 100,
3188 "percent sclk value must range from 1% to 100%, setting default value",
3189 stable_pstate_sclk_dpm_percentage = 75);
3190
3191 max_limits = &(hwmgr->dyn_state.max_clock_voltage_on_ac);
3192 stable_pstate_sclk = (max_limits->sclk *
3193 stable_pstate_sclk_dpm_percentage) / 100;
3194
3195 for (count = table_info->vdd_dep_on_sclk->count - 1;
3196 count >= 0; count--) {
3197 if (stable_pstate_sclk >=
3198 table_info->vdd_dep_on_sclk->entries[count].clk) {
3199 stable_pstate_sclk =
3200 table_info->vdd_dep_on_sclk->entries[count].clk;
3201 break;
3202 }
3203 }
3204
3205 if (count < 0)
3206 stable_pstate_sclk = table_info->vdd_dep_on_sclk->entries[0].clk;
3207
3208 stable_pstate_mclk = max_limits->mclk;
3209
3210 minimum_clocks.engineClock = stable_pstate_sclk;
3211 minimum_clocks.memoryClock = stable_pstate_mclk;
3212 }
3213
6ce2d46c
AD
3214 disable_mclk_switching_for_frame_lock =
3215 PP_CAP(PHM_PlatformCaps_DisableMclkSwitchingForFrameLock);
3216 disable_mclk_switching_for_vr =
3217 PP_CAP(PHM_PlatformCaps_DisableMclkSwitchForVR);
dd5a6fe2 3218 force_mclk_high = PP_CAP(PHM_PlatformCaps_ForceMclkHigh);
f83a9991 3219
555fd70c 3220 if (hwmgr->display_config->num_display == 0)
d6bca7e7
AD
3221 disable_mclk_switching = false;
3222 else
55b85206
AD
3223 disable_mclk_switching = ((1 < hwmgr->display_config->num_display) &&
3224 !hwmgr->display_config->multi_monitor_in_sync) ||
d6bca7e7
AD
3225 disable_mclk_switching_for_frame_lock ||
3226 disable_mclk_switching_for_vr ||
3227 force_mclk_high;
f83a9991
EH
3228
3229 sclk = vega10_ps->performance_levels[0].gfx_clock;
3230 mclk = vega10_ps->performance_levels[0].mem_clock;
3231
3232 if (sclk < minimum_clocks.engineClock)
3233 sclk = (minimum_clocks.engineClock > max_limits->sclk) ?
3234 max_limits->sclk : minimum_clocks.engineClock;
3235
3236 if (mclk < minimum_clocks.memoryClock)
3237 mclk = (minimum_clocks.memoryClock > max_limits->mclk) ?
3238 max_limits->mclk : minimum_clocks.memoryClock;
3239
3240 vega10_ps->performance_levels[0].gfx_clock = sclk;
3241 vega10_ps->performance_levels[0].mem_clock = mclk;
3242
d0856f3a
RZ
3243 if (vega10_ps->performance_levels[1].gfx_clock <
3244 vega10_ps->performance_levels[0].gfx_clock)
3245 vega10_ps->performance_levels[0].gfx_clock =
3246 vega10_ps->performance_levels[1].gfx_clock;
f83a9991
EH
3247
3248 if (disable_mclk_switching) {
3249 /* Set Mclk the max of level 0 and level 1 */
3250 if (mclk < vega10_ps->performance_levels[1].mem_clock)
3251 mclk = vega10_ps->performance_levels[1].mem_clock;
3252
3253 /* Find the lowest MCLK frequency that is within
3254 * the tolerable latency defined in DAL
3255 */
7d8d968d 3256 latency = hwmgr->display_config->dce_tolerable_mclk_in_active_latency;
f83a9991
EH
3257 for (i = 0; i < data->mclk_latency_table.count; i++) {
3258 if ((data->mclk_latency_table.entries[i].latency <= latency) &&
3259 (data->mclk_latency_table.entries[i].frequency >=
3260 vega10_ps->performance_levels[0].mem_clock) &&
3261 (data->mclk_latency_table.entries[i].frequency <=
3262 vega10_ps->performance_levels[1].mem_clock))
3263 mclk = data->mclk_latency_table.entries[i].frequency;
3264 }
3265 vega10_ps->performance_levels[0].mem_clock = mclk;
3266 } else {
3267 if (vega10_ps->performance_levels[1].mem_clock <
3268 vega10_ps->performance_levels[0].mem_clock)
d0856f3a
RZ
3269 vega10_ps->performance_levels[0].mem_clock =
3270 vega10_ps->performance_levels[1].mem_clock;
f83a9991
EH
3271 }
3272
dd5a6fe2 3273 if (PP_CAP(PHM_PlatformCaps_StablePState)) {
f83a9991
EH
3274 for (i = 0; i < vega10_ps->performance_level_count; i++) {
3275 vega10_ps->performance_levels[i].gfx_clock = stable_pstate_sclk;
3276 vega10_ps->performance_levels[i].mem_clock = stable_pstate_mclk;
3277 }
3278 }
3279
3280 return 0;
3281}
3282
3283static int vega10_find_dpm_states_clocks_in_dpm_table(struct pp_hwmgr *hwmgr, const void *input)
3284{
690dc626 3285 struct vega10_hwmgr *data = hwmgr->backend;
47fdd897
RZ
3286 const struct phm_set_power_state_input *states =
3287 (const struct phm_set_power_state_input *)input;
3288 const struct vega10_power_state *vega10_ps =
3289 cast_const_phw_vega10_power_state(states->pnew_state);
3290 struct vega10_single_dpm_table *sclk_table = &(data->dpm_table.gfx_table);
3291 uint32_t sclk = vega10_ps->performance_levels
3292 [vega10_ps->performance_level_count - 1].gfx_clock;
3293 struct vega10_single_dpm_table *mclk_table = &(data->dpm_table.mem_table);
3294 uint32_t mclk = vega10_ps->performance_levels
3295 [vega10_ps->performance_level_count - 1].mem_clock;
3296 uint32_t i;
3297
3298 for (i = 0; i < sclk_table->count; i++) {
3299 if (sclk == sclk_table->dpm_levels[i].value)
3300 break;
3301 }
3302
3303 if (i >= sclk_table->count) {
1b3b27b2 3304 if (sclk > sclk_table->dpm_levels[i-1].value) {
3305 data->need_update_dpm_table |= DPMTABLE_OD_UPDATE_SCLK;
3306 sclk_table->dpm_levels[i-1].value = sclk;
3307 }
47fdd897
RZ
3308 }
3309
3310 for (i = 0; i < mclk_table->count; i++) {
3311 if (mclk == mclk_table->dpm_levels[i].value)
3312 break;
3313 }
3314
3315 if (i >= mclk_table->count) {
1b3b27b2 3316 if (mclk > mclk_table->dpm_levels[i-1].value) {
3317 data->need_update_dpm_table |= DPMTABLE_OD_UPDATE_MCLK;
3318 mclk_table->dpm_levels[i-1].value = mclk;
3319 }
47fdd897 3320 }
f83a9991 3321
c5a44849
RZ
3322 if (data->display_timing.num_existing_displays != hwmgr->display_config->num_display)
3323 data->need_update_dpm_table |= DPMTABLE_UPDATE_MCLK;
f83a9991 3324
f83a9991
EH
3325 return 0;
3326}
3327
3328static int vega10_populate_and_upload_sclk_mclk_dpm_levels(
3329 struct pp_hwmgr *hwmgr, const void *input)
3330{
3331 int result = 0;
690dc626 3332 struct vega10_hwmgr *data = hwmgr->backend;
a0c3bf0f
RZ
3333 struct vega10_dpm_table *dpm_table = &data->dpm_table;
3334 struct vega10_odn_dpm_table *odn_table = &data->odn_dpm_table;
3335 struct vega10_odn_clock_voltage_dependency_table *odn_clk_table = &odn_table->vdd_dep_on_sclk;
3336 int count;
f83a9991 3337
c5a44849
RZ
3338 if (!data->need_update_dpm_table)
3339 return 0;
f83a9991 3340
a0c3bf0f
RZ
3341 if (hwmgr->od_enabled && data->need_update_dpm_table & DPMTABLE_OD_UPDATE_SCLK) {
3342 for (count = 0; count < dpm_table->gfx_table.count; count++)
3343 dpm_table->gfx_table.dpm_levels[count].value = odn_clk_table->entries[count].clk;
3344 }
3345
3346 odn_clk_table = &odn_table->vdd_dep_on_mclk;
3347 if (hwmgr->od_enabled && data->need_update_dpm_table & DPMTABLE_OD_UPDATE_MCLK) {
3348 for (count = 0; count < dpm_table->mem_table.count; count++)
3349 dpm_table->mem_table.dpm_levels[count].value = odn_clk_table->entries[count].clk;
3350 }
3351
c5a44849
RZ
3352 if (data->need_update_dpm_table &
3353 (DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_UPDATE_SCLK + DPMTABLE_UPDATE_SOCCLK)) {
3354 result = vega10_populate_all_graphic_levels(hwmgr);
3355 PP_ASSERT_WITH_CODE((0 == result),
3356 "Failed to populate SCLK during PopulateNewDPMClocksStates Function!",
3357 return result);
3358 }
f83a9991 3359
c5a44849
RZ
3360 if (data->need_update_dpm_table &
3361 (DPMTABLE_OD_UPDATE_MCLK + DPMTABLE_UPDATE_MCLK)) {
3362 result = vega10_populate_all_memory_levels(hwmgr);
3363 PP_ASSERT_WITH_CODE((0 == result),
3364 "Failed to populate MCLK during PopulateNewDPMClocksStates Function!",
3365 return result);
3366 }
f83a9991 3367
c5a44849 3368 vega10_populate_vddc_soc_levels(hwmgr);
f83a9991 3369
f83a9991
EH
3370 return result;
3371}
3372
3373static int vega10_trim_single_dpm_states(struct pp_hwmgr *hwmgr,
3374 struct vega10_single_dpm_table *dpm_table,
3375 uint32_t low_limit, uint32_t high_limit)
3376{
3377 uint32_t i;
3378
3379 for (i = 0; i < dpm_table->count; i++) {
3380 if ((dpm_table->dpm_levels[i].value < low_limit) ||
3381 (dpm_table->dpm_levels[i].value > high_limit))
3382 dpm_table->dpm_levels[i].enabled = false;
3383 else
3384 dpm_table->dpm_levels[i].enabled = true;
3385 }
3386 return 0;
3387}
3388
3389static int vega10_trim_single_dpm_states_with_mask(struct pp_hwmgr *hwmgr,
3390 struct vega10_single_dpm_table *dpm_table,
3391 uint32_t low_limit, uint32_t high_limit,
3392 uint32_t disable_dpm_mask)
3393{
3394 uint32_t i;
3395
3396 for (i = 0; i < dpm_table->count; i++) {
3397 if ((dpm_table->dpm_levels[i].value < low_limit) ||
3398 (dpm_table->dpm_levels[i].value > high_limit))
3399 dpm_table->dpm_levels[i].enabled = false;
3400 else if (!((1 << i) & disable_dpm_mask))
3401 dpm_table->dpm_levels[i].enabled = false;
3402 else
3403 dpm_table->dpm_levels[i].enabled = true;
3404 }
3405 return 0;
3406}
3407
3408static int vega10_trim_dpm_states(struct pp_hwmgr *hwmgr,
3409 const struct vega10_power_state *vega10_ps)
3410{
690dc626 3411 struct vega10_hwmgr *data = hwmgr->backend;
f83a9991
EH
3412 uint32_t high_limit_count;
3413
3414 PP_ASSERT_WITH_CODE((vega10_ps->performance_level_count >= 1),
3415 "power state did not have any performance level",
3416 return -1);
3417
3418 high_limit_count = (vega10_ps->performance_level_count == 1) ? 0 : 1;
3419
3420 vega10_trim_single_dpm_states(hwmgr,
3421 &(data->dpm_table.soc_table),
3422 vega10_ps->performance_levels[0].soc_clock,
3423 vega10_ps->performance_levels[high_limit_count].soc_clock);
3424
3425 vega10_trim_single_dpm_states_with_mask(hwmgr,
3426 &(data->dpm_table.gfx_table),
3427 vega10_ps->performance_levels[0].gfx_clock,
3428 vega10_ps->performance_levels[high_limit_count].gfx_clock,
3429 data->disable_dpm_mask);
3430
3431 vega10_trim_single_dpm_states(hwmgr,
3432 &(data->dpm_table.mem_table),
3433 vega10_ps->performance_levels[0].mem_clock,
3434 vega10_ps->performance_levels[high_limit_count].mem_clock);
3435
3436 return 0;
3437}
3438
3439static uint32_t vega10_find_lowest_dpm_level(
3440 struct vega10_single_dpm_table *table)
3441{
3442 uint32_t i;
3443
3444 for (i = 0; i < table->count; i++) {
3445 if (table->dpm_levels[i].enabled)
3446 break;
3447 }
3448
3449 return i;
3450}
3451
3452static uint32_t vega10_find_highest_dpm_level(
3453 struct vega10_single_dpm_table *table)
3454{
3455 uint32_t i = 0;
3456
3457 if (table->count <= MAX_REGULAR_DPM_NUMBER) {
3458 for (i = table->count; i > 0; i--) {
3459 if (table->dpm_levels[i - 1].enabled)
3460 return i - 1;
3461 }
3462 } else {
3463 pr_info("DPM Table Has Too Many Entries!");
3464 return MAX_REGULAR_DPM_NUMBER - 1;
3465 }
3466
3467 return i;
3468}
3469
3470static void vega10_apply_dal_minimum_voltage_request(
3471 struct pp_hwmgr *hwmgr)
3472{
3473 return;
3474}
3475
3d4d4fd0
RZ
3476static int vega10_get_soc_index_for_max_uclk(struct pp_hwmgr *hwmgr)
3477{
3478 struct phm_ppt_v1_clock_voltage_dependency_table *vdd_dep_table_on_mclk;
3479 struct phm_ppt_v2_information *table_info =
3480 (struct phm_ppt_v2_information *)(hwmgr->pptable);
3481
3482 vdd_dep_table_on_mclk = table_info->vdd_dep_on_mclk;
3483
3484 return vdd_dep_table_on_mclk->entries[NUM_UCLK_DPM_LEVELS - 1].vddInd + 1;
3485}
3486
f83a9991
EH
3487static int vega10_upload_dpm_bootup_level(struct pp_hwmgr *hwmgr)
3488{
690dc626 3489 struct vega10_hwmgr *data = hwmgr->backend;
3d4d4fd0 3490 uint32_t socclk_idx;
f83a9991
EH
3491
3492 vega10_apply_dal_minimum_voltage_request(hwmgr);
3493
3494 if (!data->registry_data.sclk_dpm_key_disabled) {
3495 if (data->smc_state_table.gfx_boot_level !=
3496 data->dpm_table.gfx_table.dpm_state.soft_min_level) {
d246cd53 3497 smum_send_msg_to_smc_with_parameter(hwmgr,
f83a9991 3498 PPSMC_MSG_SetSoftMinGfxclkByIndex,
d246cd53 3499 data->smc_state_table.gfx_boot_level);
f83a9991
EH
3500 data->dpm_table.gfx_table.dpm_state.soft_min_level =
3501 data->smc_state_table.gfx_boot_level;
3502 }
3503 }
3504
3505 if (!data->registry_data.mclk_dpm_key_disabled) {
3506 if (data->smc_state_table.mem_boot_level !=
3507 data->dpm_table.mem_table.dpm_state.soft_min_level) {
3d4d4fd0
RZ
3508 if (data->smc_state_table.mem_boot_level == NUM_UCLK_DPM_LEVELS - 1) {
3509 socclk_idx = vega10_get_soc_index_for_max_uclk(hwmgr);
d246cd53 3510 smum_send_msg_to_smc_with_parameter(hwmgr,
3d4d4fd0 3511 PPSMC_MSG_SetSoftMinSocclkByIndex,
d246cd53 3512 socclk_idx);
3d4d4fd0 3513 } else {
d246cd53 3514 smum_send_msg_to_smc_with_parameter(hwmgr,
3d4d4fd0 3515 PPSMC_MSG_SetSoftMinUclkByIndex,
d246cd53 3516 data->smc_state_table.mem_boot_level);
3d4d4fd0 3517 }
f83a9991
EH
3518 data->dpm_table.mem_table.dpm_state.soft_min_level =
3519 data->smc_state_table.mem_boot_level;
3520 }
3521 }
3522
bb05821b
EQ
3523 if (!data->registry_data.socclk_dpm_key_disabled) {
3524 if (data->smc_state_table.soc_boot_level !=
3525 data->dpm_table.soc_table.dpm_state.soft_min_level) {
3526 smum_send_msg_to_smc_with_parameter(hwmgr,
3527 PPSMC_MSG_SetSoftMinSocclkByIndex,
3528 data->smc_state_table.soc_boot_level);
3529 data->dpm_table.soc_table.dpm_state.soft_min_level =
3530 data->smc_state_table.soc_boot_level;
3531 }
3532 }
3533
f83a9991
EH
3534 return 0;
3535}
3536
3537static int vega10_upload_dpm_max_level(struct pp_hwmgr *hwmgr)
3538{
690dc626 3539 struct vega10_hwmgr *data = hwmgr->backend;
f83a9991
EH
3540
3541 vega10_apply_dal_minimum_voltage_request(hwmgr);
3542
3543 if (!data->registry_data.sclk_dpm_key_disabled) {
3544 if (data->smc_state_table.gfx_max_level !=
d246cd53
RZ
3545 data->dpm_table.gfx_table.dpm_state.soft_max_level) {
3546 smum_send_msg_to_smc_with_parameter(hwmgr,
f83a9991 3547 PPSMC_MSG_SetSoftMaxGfxclkByIndex,
d246cd53 3548 data->smc_state_table.gfx_max_level);
f83a9991
EH
3549 data->dpm_table.gfx_table.dpm_state.soft_max_level =
3550 data->smc_state_table.gfx_max_level;
3551 }
3552 }
3553
3554 if (!data->registry_data.mclk_dpm_key_disabled) {
3555 if (data->smc_state_table.mem_max_level !=
d246cd53
RZ
3556 data->dpm_table.mem_table.dpm_state.soft_max_level) {
3557 smum_send_msg_to_smc_with_parameter(hwmgr,
3558 PPSMC_MSG_SetSoftMaxUclkByIndex,
3559 data->smc_state_table.mem_max_level);
f83a9991
EH
3560 data->dpm_table.mem_table.dpm_state.soft_max_level =
3561 data->smc_state_table.mem_max_level;
3562 }
3563 }
3564
bb05821b
EQ
3565 if (!data->registry_data.socclk_dpm_key_disabled) {
3566 if (data->smc_state_table.soc_max_level !=
3567 data->dpm_table.soc_table.dpm_state.soft_max_level) {
3568 smum_send_msg_to_smc_with_parameter(hwmgr,
3569 PPSMC_MSG_SetSoftMaxSocclkByIndex,
3570 data->smc_state_table.soc_max_level);
3571 data->dpm_table.soc_table.dpm_state.soft_max_level =
3572 data->smc_state_table.soc_max_level;
3573 }
3574 }
3575
f83a9991
EH
3576 return 0;
3577}
3578
3579static int vega10_generate_dpm_level_enable_mask(
3580 struct pp_hwmgr *hwmgr, const void *input)
3581{
690dc626 3582 struct vega10_hwmgr *data = hwmgr->backend;
f83a9991
EH
3583 const struct phm_set_power_state_input *states =
3584 (const struct phm_set_power_state_input *)input;
3585 const struct vega10_power_state *vega10_ps =
3586 cast_const_phw_vega10_power_state(states->pnew_state);
3587 int i;
3588
3589 PP_ASSERT_WITH_CODE(!vega10_trim_dpm_states(hwmgr, vega10_ps),
3590 "Attempt to Trim DPM States Failed!",
3591 return -1);
3592
3593 data->smc_state_table.gfx_boot_level =
3594 vega10_find_lowest_dpm_level(&(data->dpm_table.gfx_table));
3595 data->smc_state_table.gfx_max_level =
3596 vega10_find_highest_dpm_level(&(data->dpm_table.gfx_table));
3597 data->smc_state_table.mem_boot_level =
3598 vega10_find_lowest_dpm_level(&(data->dpm_table.mem_table));
3599 data->smc_state_table.mem_max_level =
3600 vega10_find_highest_dpm_level(&(data->dpm_table.mem_table));
373e87fc
KF
3601 data->smc_state_table.soc_boot_level =
3602 vega10_find_lowest_dpm_level(&(data->dpm_table.soc_table));
3603 data->smc_state_table.soc_max_level =
3604 vega10_find_highest_dpm_level(&(data->dpm_table.soc_table));
f83a9991
EH
3605
3606 PP_ASSERT_WITH_CODE(!vega10_upload_dpm_bootup_level(hwmgr),
3607 "Attempt to upload DPM Bootup Levels Failed!",
3608 return -1);
3609 PP_ASSERT_WITH_CODE(!vega10_upload_dpm_max_level(hwmgr),
3610 "Attempt to upload DPM Max Levels Failed!",
3611 return -1);
3612 for(i = data->smc_state_table.gfx_boot_level; i < data->smc_state_table.gfx_max_level; i++)
3613 data->dpm_table.gfx_table.dpm_levels[i].enabled = true;
3614
3615
3616 for(i = data->smc_state_table.mem_boot_level; i < data->smc_state_table.mem_max_level; i++)
3617 data->dpm_table.mem_table.dpm_levels[i].enabled = true;
3618
373e87fc
KF
3619 for (i = data->smc_state_table.soc_boot_level; i < data->smc_state_table.soc_max_level; i++)
3620 data->dpm_table.soc_table.dpm_levels[i].enabled = true;
3621
f83a9991
EH
3622 return 0;
3623}
3624
3625int vega10_enable_disable_vce_dpm(struct pp_hwmgr *hwmgr, bool enable)
3626{
690dc626 3627 struct vega10_hwmgr *data = hwmgr->backend;
f83a9991
EH
3628
3629 if (data->smu_features[GNLD_DPM_VCE].supported) {
d3f8c0ab 3630 PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr,
f83a9991
EH
3631 enable,
3632 data->smu_features[GNLD_DPM_VCE].smu_feature_bitmap),
3633 "Attempt to Enable/Disable DPM VCE Failed!",
3634 return -1);
3635 data->smu_features[GNLD_DPM_VCE].enabled = enable;
3636 }
3637
3638 return 0;
3639}
3640
3641static int vega10_update_sclk_threshold(struct pp_hwmgr *hwmgr)
3642{
690dc626 3643 struct vega10_hwmgr *data = hwmgr->backend;
f83a9991
EH
3644 uint32_t low_sclk_interrupt_threshold = 0;
3645
dd5a6fe2 3646 if (PP_CAP(PHM_PlatformCaps_SclkThrottleLowNotification) &&
29411f05 3647 (data->low_sclk_interrupt_threshold != 0)) {
f83a9991
EH
3648 low_sclk_interrupt_threshold =
3649 data->low_sclk_interrupt_threshold;
3650
3651 data->smc_state_table.pp_table.LowGfxclkInterruptThreshold =
3652 cpu_to_le32(low_sclk_interrupt_threshold);
3653
3654 /* This message will also enable SmcToHost Interrupt */
d246cd53 3655 smum_send_msg_to_smc_with_parameter(hwmgr,
f83a9991
EH
3656 PPSMC_MSG_SetLowGfxclkInterruptThreshold,
3657 (uint32_t)low_sclk_interrupt_threshold);
3658 }
3659
d246cd53 3660 return 0;
f83a9991
EH
3661}
3662
3663static int vega10_set_power_state_tasks(struct pp_hwmgr *hwmgr,
3664 const void *input)
3665{
3666 int tmp_result, result = 0;
690dc626 3667 struct vega10_hwmgr *data = hwmgr->backend;
f83a9991
EH
3668 PPTable_t *pp_table = &(data->smc_state_table.pp_table);
3669
3670 tmp_result = vega10_find_dpm_states_clocks_in_dpm_table(hwmgr, input);
3671 PP_ASSERT_WITH_CODE(!tmp_result,
3672 "Failed to find DPM states clocks in DPM table!",
3673 result = tmp_result);
3674
3675 tmp_result = vega10_populate_and_upload_sclk_mclk_dpm_levels(hwmgr, input);
3676 PP_ASSERT_WITH_CODE(!tmp_result,
3677 "Failed to populate and upload SCLK MCLK DPM levels!",
3678 result = tmp_result);
3679
3680 tmp_result = vega10_generate_dpm_level_enable_mask(hwmgr, input);
3681 PP_ASSERT_WITH_CODE(!tmp_result,
3682 "Failed to generate DPM level enabled mask!",
3683 result = tmp_result);
3684
3685 tmp_result = vega10_update_sclk_threshold(hwmgr);
3686 PP_ASSERT_WITH_CODE(!tmp_result,
3687 "Failed to update SCLK threshold!",
3688 result = tmp_result);
3689
3f9ca14a 3690 result = smum_smc_table_manager(hwmgr, (uint8_t *)pp_table, PPTABLE, false);
f83a9991
EH
3691 PP_ASSERT_WITH_CODE(!result,
3692 "Failed to upload PPtable!", return result);
3693
c5a44849
RZ
3694 vega10_update_avfs(hwmgr);
3695
36f5f8a7
EQ
3696 /*
3697 * Clear all OD flags except DPMTABLE_OD_UPDATE_VDDC.
3698 * That will help to keep AVFS disabled.
3699 */
c5a44849 3700 data->need_update_dpm_table &= DPMTABLE_OD_UPDATE_VDDC;
f83a9991
EH
3701
3702 return 0;
3703}
3704
f93f0c3a 3705static uint32_t vega10_dpm_get_sclk(struct pp_hwmgr *hwmgr, bool low)
f83a9991
EH
3706{
3707 struct pp_power_state *ps;
3708 struct vega10_power_state *vega10_ps;
3709
3710 if (hwmgr == NULL)
3711 return -EINVAL;
3712
3713 ps = hwmgr->request_ps;
3714
3715 if (ps == NULL)
3716 return -EINVAL;
3717
3718 vega10_ps = cast_phw_vega10_power_state(&ps->hardware);
3719
3720 if (low)
3721 return vega10_ps->performance_levels[0].gfx_clock;
3722 else
3723 return vega10_ps->performance_levels
3724 [vega10_ps->performance_level_count - 1].gfx_clock;
3725}
3726
f93f0c3a 3727static uint32_t vega10_dpm_get_mclk(struct pp_hwmgr *hwmgr, bool low)
f83a9991
EH
3728{
3729 struct pp_power_state *ps;
3730 struct vega10_power_state *vega10_ps;
3731
3732 if (hwmgr == NULL)
3733 return -EINVAL;
3734
3735 ps = hwmgr->request_ps;
3736
3737 if (ps == NULL)
3738 return -EINVAL;
3739
3740 vega10_ps = cast_phw_vega10_power_state(&ps->hardware);
3741
3742 if (low)
3743 return vega10_ps->performance_levels[0].mem_clock;
3744 else
3745 return vega10_ps->performance_levels
3746 [vega10_ps->performance_level_count-1].mem_clock;
3747}
3748
17d176a5 3749static int vega10_get_gpu_power(struct pp_hwmgr *hwmgr,
5b79d048 3750 uint32_t *query)
17d176a5 3751{
6b5defd6
EH
3752 uint32_t value;
3753
5b79d048
RZ
3754 if (!query)
3755 return -EINVAL;
3756
d246cd53 3757 smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetCurrPkgPwr);
3f9ca14a 3758 value = smum_get_argument(hwmgr);
fda519fb 3759
5b79d048
RZ
3760 /* SMC returning actual watts, keep consistent with legacy asics, low 8 bit as 8 fractional bits */
3761 *query = value << 8;
6b5defd6
EH
3762
3763 return 0;
17d176a5
EH
3764}
3765
f83a9991
EH
3766static int vega10_read_sensor(struct pp_hwmgr *hwmgr, int idx,
3767 void *value, int *size)
3768{
b8a55591 3769 struct amdgpu_device *adev = hwmgr->adev;
c11d8afe 3770 uint32_t sclk_mhz, mclk_idx, activity_percent = 0;
690dc626 3771 struct vega10_hwmgr *data = hwmgr->backend;
f83a9991
EH
3772 struct vega10_dpm_table *dpm_table = &data->dpm_table;
3773 int ret = 0;
b8a55591 3774 uint32_t val_vid;
f83a9991
EH
3775
3776 switch (idx) {
3777 case AMDGPU_PP_SENSOR_GFX_SCLK:
c11d8afe
EQ
3778 smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetAverageGfxclkActualFrequency);
3779 sclk_mhz = smum_get_argument(hwmgr);
3780 *((uint32_t *)value) = sclk_mhz * 100;
f83a9991
EH
3781 break;
3782 case AMDGPU_PP_SENSOR_GFX_MCLK:
952e5daa 3783 smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetCurrentUclkIndex);
3f9ca14a 3784 mclk_idx = smum_get_argument(hwmgr);
952e5daa 3785 if (mclk_idx < dpm_table->mem_table.count) {
f83a9991
EH
3786 *((uint32_t *)value) = dpm_table->mem_table.dpm_levels[mclk_idx].value;
3787 *size = 4;
952e5daa
RZ
3788 } else {
3789 ret = -EINVAL;
f83a9991
EH
3790 }
3791 break;
3792 case AMDGPU_PP_SENSOR_GPU_LOAD:
952e5daa 3793 smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_GetAverageGfxActivity, 0);
3f9ca14a 3794 activity_percent = smum_get_argument(hwmgr);
952e5daa
RZ
3795 *((uint32_t *)value) = activity_percent > 100 ? 100 : activity_percent;
3796 *size = 4;
f83a9991
EH
3797 break;
3798 case AMDGPU_PP_SENSOR_GPU_TEMP:
3799 *((uint32_t *)value) = vega10_thermal_get_temperature(hwmgr);
3800 *size = 4;
3801 break;
a34d1166
EQ
3802 case AMDGPU_PP_SENSOR_HOTSPOT_TEMP:
3803 smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetTemperatureHotspot);
3804 *((uint32_t *)value) = smum_get_argument(hwmgr) *
3805 PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
3806 *size = 4;
3807 break;
3808 case AMDGPU_PP_SENSOR_MEM_TEMP:
3809 smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetTemperatureHBM);
3810 *((uint32_t *)value) = smum_get_argument(hwmgr) *
3811 PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
3812 *size = 4;
3813 break;
f83a9991
EH
3814 case AMDGPU_PP_SENSOR_UVD_POWER:
3815 *((uint32_t *)value) = data->uvd_power_gated ? 0 : 1;
3816 *size = 4;
3817 break;
3818 case AMDGPU_PP_SENSOR_VCE_POWER:
3819 *((uint32_t *)value) = data->vce_power_gated ? 0 : 1;
3820 *size = 4;
3821 break;
17d176a5 3822 case AMDGPU_PP_SENSOR_GPU_POWER:
5b79d048 3823 ret = vega10_get_gpu_power(hwmgr, (uint32_t *)value);
17d176a5 3824 break;
59655cb6 3825 case AMDGPU_PP_SENSOR_VDDGFX:
b8a55591 3826 val_vid = (RREG32_SOC15(SMUIO, 0, mmSMUSVI0_PLANE0_CURRENTVID) &
59655cb6
RZ
3827 SMUSVI0_PLANE0_CURRENTVID__CURRENT_SVI0_PLANE0_VID_MASK) >>
3828 SMUSVI0_PLANE0_CURRENTVID__CURRENT_SVI0_PLANE0_VID__SHIFT;
3829 *((uint32_t *)value) = (uint32_t)convert_to_vddc((uint8_t)val_vid);
3830 return 0;
1f6c52ed
AD
3831 case AMDGPU_PP_SENSOR_ENABLED_SMC_FEATURES_MASK:
3832 ret = vega10_get_enabled_smc_features(hwmgr, (uint64_t *)value);
3833 if (!ret)
3834 *size = 8;
3835 break;
f83a9991
EH
3836 default:
3837 ret = -EINVAL;
3838 break;
3839 }
6390258a 3840
f83a9991
EH
3841 return ret;
3842}
3843
d246cd53 3844static void vega10_notify_smc_display_change(struct pp_hwmgr *hwmgr,
f83a9991
EH
3845 bool has_disp)
3846{
d246cd53 3847 smum_send_msg_to_smc_with_parameter(hwmgr,
f83a9991 3848 PPSMC_MSG_SetUclkFastSwitch,
f132d561 3849 has_disp ? 1 : 0);
f83a9991
EH
3850}
3851
3852int vega10_display_clock_voltage_request(struct pp_hwmgr *hwmgr,
3853 struct pp_display_clock_request *clock_req)
3854{
3855 int result = 0;
3856 enum amd_pp_clock_type clk_type = clock_req->clock_type;
75f0e32b 3857 uint32_t clk_freq = clock_req->clock_freq_in_khz / 1000;
f83a9991
EH
3858 DSPCLK_e clk_select = 0;
3859 uint32_t clk_request = 0;
3860
3861 switch (clk_type) {
3862 case amd_pp_dcef_clock:
3863 clk_select = DSPCLK_DCEFCLK;
3864 break;
3865 case amd_pp_disp_clock:
3866 clk_select = DSPCLK_DISPCLK;
3867 break;
3868 case amd_pp_pixel_clock:
3869 clk_select = DSPCLK_PIXCLK;
3870 break;
3871 case amd_pp_phy_clock:
3872 clk_select = DSPCLK_PHYCLK;
3873 break;
3874 default:
3875 pr_info("[DisplayClockVoltageRequest]Invalid Clock Type!");
3876 result = -1;
3877 break;
3878 }
3879
3880 if (!result) {
3881 clk_request = (clk_freq << 16) | clk_select;
d246cd53 3882 smum_send_msg_to_smc_with_parameter(hwmgr,
f83a9991
EH
3883 PPSMC_MSG_RequestDisplayClockByFreq,
3884 clk_request);
3885 }
3886
3887 return result;
3888}
3889
75f0e32b
RZ
3890static uint8_t vega10_get_uclk_index(struct pp_hwmgr *hwmgr,
3891 struct phm_ppt_v1_clock_voltage_dependency_table *mclk_table,
3892 uint32_t frequency)
3893{
3894 uint8_t count;
3895 uint8_t i;
3896
3897 if (mclk_table == NULL || mclk_table->count == 0)
3898 return 0;
3899
3900 count = (uint8_t)(mclk_table->count);
3901
3902 for(i = 0; i < count; i++) {
3903 if(mclk_table->entries[i].clk >= frequency)
3904 return i;
3905 }
3906
3907 return i-1;
3908}
3909
f83a9991
EH
3910static int vega10_notify_smc_display_config_after_ps_adjustment(
3911 struct pp_hwmgr *hwmgr)
3912{
690dc626 3913 struct vega10_hwmgr *data = hwmgr->backend;
f83a9991
EH
3914 struct vega10_single_dpm_table *dpm_table =
3915 &data->dpm_table.dcef_table;
75f0e32b
RZ
3916 struct phm_ppt_v2_information *table_info =
3917 (struct phm_ppt_v2_information *)hwmgr->pptable;
3918 struct phm_ppt_v1_clock_voltage_dependency_table *mclk_table = table_info->vdd_dep_on_mclk;
3919 uint32_t idx;
f83a9991
EH
3920 struct PP_Clocks min_clocks = {0};
3921 uint32_t i;
3922 struct pp_display_clock_request clock_req;
3923
59a8348f 3924 if ((hwmgr->display_config->num_display > 1) &&
92859e0d
EQ
3925 !hwmgr->display_config->multi_monitor_in_sync &&
3926 !hwmgr->display_config->nb_pstate_switch_disable)
f83a9991
EH
3927 vega10_notify_smc_display_change(hwmgr, false);
3928 else
3929 vega10_notify_smc_display_change(hwmgr, true);
3930
555fd70c
RZ
3931 min_clocks.dcefClock = hwmgr->display_config->min_dcef_set_clk;
3932 min_clocks.dcefClockInSR = hwmgr->display_config->min_dcef_deep_sleep_set_clk;
3933 min_clocks.memoryClock = hwmgr->display_config->min_mem_set_clock;
f83a9991
EH
3934
3935 for (i = 0; i < dpm_table->count; i++) {
3936 if (dpm_table->dpm_levels[i].value == min_clocks.dcefClock)
3937 break;
3938 }
3939
3940 if (i < dpm_table->count) {
3941 clock_req.clock_type = amd_pp_dcef_clock;
ed092664 3942 clock_req.clock_freq_in_khz = dpm_table->dpm_levels[i].value * 10;
f83a9991 3943 if (!vega10_display_clock_voltage_request(hwmgr, &clock_req)) {
d246cd53 3944 smum_send_msg_to_smc_with_parameter(
d3f8c0ab 3945 hwmgr, PPSMC_MSG_SetMinDeepSleepDcefclk,
d246cd53 3946 min_clocks.dcefClockInSR / 100);
75f0e32b 3947 } else {
f83a9991 3948 pr_info("Attempt to set Hard Min for DCEFCLK Failed!");
75f0e32b
RZ
3949 }
3950 } else {
5bbc5c64 3951 pr_debug("Cannot find requested DCEFCLK!");
75f0e32b
RZ
3952 }
3953
3954 if (min_clocks.memoryClock != 0) {
3955 idx = vega10_get_uclk_index(hwmgr, mclk_table, min_clocks.memoryClock);
d3f8c0ab 3956 smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetSoftMinUclkByIndex, idx);
75f0e32b
RZ
3957 data->dpm_table.mem_table.dpm_state.soft_min_level= idx;
3958 }
f83a9991
EH
3959
3960 return 0;
3961}
3962
3963static int vega10_force_dpm_highest(struct pp_hwmgr *hwmgr)
3964{
690dc626 3965 struct vega10_hwmgr *data = hwmgr->backend;
f83a9991
EH
3966
3967 data->smc_state_table.gfx_boot_level =
3968 data->smc_state_table.gfx_max_level =
3969 vega10_find_highest_dpm_level(&(data->dpm_table.gfx_table));
3970 data->smc_state_table.mem_boot_level =
3971 data->smc_state_table.mem_max_level =
3972 vega10_find_highest_dpm_level(&(data->dpm_table.mem_table));
3973
3974 PP_ASSERT_WITH_CODE(!vega10_upload_dpm_bootup_level(hwmgr),
3975 "Failed to upload boot level to highest!",
3976 return -1);
3977
3978 PP_ASSERT_WITH_CODE(!vega10_upload_dpm_max_level(hwmgr),
3979 "Failed to upload dpm max level to highest!",
3980 return -1);
3981
3982 return 0;
3983}
3984
3985static int vega10_force_dpm_lowest(struct pp_hwmgr *hwmgr)
3986{
690dc626 3987 struct vega10_hwmgr *data = hwmgr->backend;
f83a9991
EH
3988
3989 data->smc_state_table.gfx_boot_level =
3990 data->smc_state_table.gfx_max_level =
3991 vega10_find_lowest_dpm_level(&(data->dpm_table.gfx_table));
3992 data->smc_state_table.mem_boot_level =
3993 data->smc_state_table.mem_max_level =
3994 vega10_find_lowest_dpm_level(&(data->dpm_table.mem_table));
3995
3996 PP_ASSERT_WITH_CODE(!vega10_upload_dpm_bootup_level(hwmgr),
3997 "Failed to upload boot level to highest!",
3998 return -1);
3999
4000 PP_ASSERT_WITH_CODE(!vega10_upload_dpm_max_level(hwmgr),
4001 "Failed to upload dpm max level to highest!",
4002 return -1);
4003
4004 return 0;
4005
4006}
4007
4008static int vega10_unforce_dpm_levels(struct pp_hwmgr *hwmgr)
4009{
690dc626 4010 struct vega10_hwmgr *data = hwmgr->backend;
f83a9991
EH
4011
4012 data->smc_state_table.gfx_boot_level =
4013 vega10_find_lowest_dpm_level(&(data->dpm_table.gfx_table));
4014 data->smc_state_table.gfx_max_level =
4015 vega10_find_highest_dpm_level(&(data->dpm_table.gfx_table));
4016 data->smc_state_table.mem_boot_level =
4017 vega10_find_lowest_dpm_level(&(data->dpm_table.mem_table));
4018 data->smc_state_table.mem_max_level =
4019 vega10_find_highest_dpm_level(&(data->dpm_table.mem_table));
4020
4021 PP_ASSERT_WITH_CODE(!vega10_upload_dpm_bootup_level(hwmgr),
4022 "Failed to upload DPM Bootup Levels!",
4023 return -1);
4024
4025 PP_ASSERT_WITH_CODE(!vega10_upload_dpm_max_level(hwmgr),
4026 "Failed to upload DPM Max Levels!",
4027 return -1);
4028 return 0;
4029}
4030
53a4b90d
RZ
4031static int vega10_get_profiling_clk_mask(struct pp_hwmgr *hwmgr, enum amd_dpm_forced_level level,
4032 uint32_t *sclk_mask, uint32_t *mclk_mask, uint32_t *soc_mask)
f83a9991 4033{
53a4b90d
RZ
4034 struct phm_ppt_v2_information *table_info =
4035 (struct phm_ppt_v2_information *)(hwmgr->pptable);
f83a9991 4036
53a4b90d
RZ
4037 if (table_info->vdd_dep_on_sclk->count > VEGA10_UMD_PSTATE_GFXCLK_LEVEL &&
4038 table_info->vdd_dep_on_socclk->count > VEGA10_UMD_PSTATE_SOCCLK_LEVEL &&
4039 table_info->vdd_dep_on_mclk->count > VEGA10_UMD_PSTATE_MCLK_LEVEL) {
4040 *sclk_mask = VEGA10_UMD_PSTATE_GFXCLK_LEVEL;
4041 *soc_mask = VEGA10_UMD_PSTATE_SOCCLK_LEVEL;
4042 *mclk_mask = VEGA10_UMD_PSTATE_MCLK_LEVEL;
dd70949d
RZ
4043 hwmgr->pstate_sclk = table_info->vdd_dep_on_sclk->entries[VEGA10_UMD_PSTATE_GFXCLK_LEVEL].clk;
4044 hwmgr->pstate_mclk = table_info->vdd_dep_on_mclk->entries[VEGA10_UMD_PSTATE_MCLK_LEVEL].clk;
f83a9991
EH
4045 }
4046
53a4b90d
RZ
4047 if (level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK) {
4048 *sclk_mask = 0;
4049 } else if (level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK) {
4050 *mclk_mask = 0;
4051 } else if (level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) {
4052 *sclk_mask = table_info->vdd_dep_on_sclk->count - 1;
4053 *soc_mask = table_info->vdd_dep_on_socclk->count - 1;
4054 *mclk_mask = table_info->vdd_dep_on_mclk->count - 1;
4055 }
4056 return 0;
f83a9991
EH
4057}
4058
f93f0c3a 4059static void vega10_set_fan_control_mode(struct pp_hwmgr *hwmgr, uint32_t mode)
f83a9991 4060{
7522ffc4
RZ
4061 switch (mode) {
4062 case AMD_FAN_CTRL_NONE:
f93f0c3a 4063 vega10_fan_ctrl_set_fan_speed_percent(hwmgr, 100);
7522ffc4
RZ
4064 break;
4065 case AMD_FAN_CTRL_MANUAL:
dd5a6fe2 4066 if (PP_CAP(PHM_PlatformCaps_MicrocodeFanControl))
f93f0c3a 4067 vega10_fan_ctrl_stop_smc_fan_control(hwmgr);
7522ffc4
RZ
4068 break;
4069 case AMD_FAN_CTRL_AUTO:
710931c2 4070 if (PP_CAP(PHM_PlatformCaps_MicrocodeFanControl))
f93f0c3a 4071 vega10_fan_ctrl_start_smc_fan_control(hwmgr);
7522ffc4
RZ
4072 break;
4073 default:
4074 break;
4075 }
f83a9991
EH
4076}
4077
29ae1118
RZ
4078static int vega10_force_clock_level(struct pp_hwmgr *hwmgr,
4079 enum pp_clock_type type, uint32_t mask)
4080{
4081 struct vega10_hwmgr *data = hwmgr->backend;
4082
4083 switch (type) {
4084 case PP_SCLK:
4085 data->smc_state_table.gfx_boot_level = mask ? (ffs(mask) - 1) : 0;
4086 data->smc_state_table.gfx_max_level = mask ? (fls(mask) - 1) : 0;
4087
4088 PP_ASSERT_WITH_CODE(!vega10_upload_dpm_bootup_level(hwmgr),
4089 "Failed to upload boot level to lowest!",
4090 return -EINVAL);
4091
4092 PP_ASSERT_WITH_CODE(!vega10_upload_dpm_max_level(hwmgr),
4093 "Failed to upload dpm max level to highest!",
4094 return -EINVAL);
4095 break;
4096
4097 case PP_MCLK:
4098 data->smc_state_table.mem_boot_level = mask ? (ffs(mask) - 1) : 0;
4099 data->smc_state_table.mem_max_level = mask ? (fls(mask) - 1) : 0;
4100
4101 PP_ASSERT_WITH_CODE(!vega10_upload_dpm_bootup_level(hwmgr),
4102 "Failed to upload boot level to lowest!",
4103 return -EINVAL);
4104
4105 PP_ASSERT_WITH_CODE(!vega10_upload_dpm_max_level(hwmgr),
4106 "Failed to upload dpm max level to highest!",
4107 return -EINVAL);
4108
4109 break;
4110
bb05821b
EQ
4111 case PP_SOCCLK:
4112 data->smc_state_table.soc_boot_level = mask ? (ffs(mask) - 1) : 0;
4113 data->smc_state_table.soc_max_level = mask ? (fls(mask) - 1) : 0;
4114
4115 PP_ASSERT_WITH_CODE(!vega10_upload_dpm_bootup_level(hwmgr),
4116 "Failed to upload boot level to lowest!",
4117 return -EINVAL);
4118
4119 PP_ASSERT_WITH_CODE(!vega10_upload_dpm_max_level(hwmgr),
4120 "Failed to upload dpm max level to highest!",
4121 return -EINVAL);
4122
4123 break;
4124
4125 case PP_DCEFCLK:
4126 pr_info("Setting DCEFCLK min/max dpm level is not supported!\n");
4127 break;
4128
29ae1118
RZ
4129 case PP_PCIE:
4130 default:
4131 break;
4132 }
4133
4134 return 0;
4135}
4136
53a4b90d
RZ
4137static int vega10_dpm_force_dpm_level(struct pp_hwmgr *hwmgr,
4138 enum amd_dpm_forced_level level)
4139{
4140 int ret = 0;
4141 uint32_t sclk_mask = 0;
4142 uint32_t mclk_mask = 0;
4143 uint32_t soc_mask = 0;
53a4b90d 4144
dd70949d
RZ
4145 if (hwmgr->pstate_sclk == 0)
4146 vega10_get_profiling_clk_mask(hwmgr, level, &sclk_mask, &mclk_mask, &soc_mask);
4147
53a4b90d
RZ
4148 switch (level) {
4149 case AMD_DPM_FORCED_LEVEL_HIGH:
4150 ret = vega10_force_dpm_highest(hwmgr);
53a4b90d
RZ
4151 break;
4152 case AMD_DPM_FORCED_LEVEL_LOW:
4153 ret = vega10_force_dpm_lowest(hwmgr);
53a4b90d
RZ
4154 break;
4155 case AMD_DPM_FORCED_LEVEL_AUTO:
4156 ret = vega10_unforce_dpm_levels(hwmgr);
53a4b90d
RZ
4157 break;
4158 case AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD:
4159 case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK:
4160 case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK:
4161 case AMD_DPM_FORCED_LEVEL_PROFILE_PEAK:
4162 ret = vega10_get_profiling_clk_mask(hwmgr, level, &sclk_mask, &mclk_mask, &soc_mask);
4163 if (ret)
4164 return ret;
53a4b90d
RZ
4165 vega10_force_clock_level(hwmgr, PP_SCLK, 1<<sclk_mask);
4166 vega10_force_clock_level(hwmgr, PP_MCLK, 1<<mclk_mask);
4167 break;
4168 case AMD_DPM_FORCED_LEVEL_MANUAL:
53a4b90d
RZ
4169 case AMD_DPM_FORCED_LEVEL_PROFILE_EXIT:
4170 default:
4171 break;
4172 }
4173
9947f704
RZ
4174 if (!ret) {
4175 if (level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK && hwmgr->dpm_level != AMD_DPM_FORCED_LEVEL_PROFILE_PEAK)
4176 vega10_set_fan_control_mode(hwmgr, AMD_FAN_CTRL_NONE);
4177 else if (level != AMD_DPM_FORCED_LEVEL_PROFILE_PEAK && hwmgr->dpm_level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK)
4178 vega10_set_fan_control_mode(hwmgr, AMD_FAN_CTRL_AUTO);
4179 }
9ac870c7 4180
9947f704 4181 return ret;
53a4b90d
RZ
4182}
4183
f93f0c3a 4184static uint32_t vega10_get_fan_control_mode(struct pp_hwmgr *hwmgr)
f83a9991 4185{
690dc626 4186 struct vega10_hwmgr *data = hwmgr->backend;
f83a9991 4187
7522ffc4
RZ
4188 if (data->smu_features[GNLD_FAN_CONTROL].enabled == false)
4189 return AMD_FAN_CTRL_MANUAL;
4190 else
4191 return AMD_FAN_CTRL_AUTO;
f83a9991
EH
4192}
4193
4194static int vega10_get_dal_power_level(struct pp_hwmgr *hwmgr,
4195 struct amd_pp_simple_clock_info *info)
4196{
4197 struct phm_ppt_v2_information *table_info =
4198 (struct phm_ppt_v2_information *)hwmgr->pptable;
4199 struct phm_clock_and_voltage_limits *max_limits =
4200 &table_info->max_clock_voltage_on_ac;
4201
4202 info->engine_max_clock = max_limits->sclk;
4203 info->memory_max_clock = max_limits->mclk;
4204
4205 return 0;
4206}
4207
4208static void vega10_get_sclks(struct pp_hwmgr *hwmgr,
4209 struct pp_clock_levels_with_latency *clocks)
4210{
4211 struct phm_ppt_v2_information *table_info =
4212 (struct phm_ppt_v2_information *)hwmgr->pptable;
4213 struct phm_ppt_v1_clock_voltage_dependency_table *dep_table =
4214 table_info->vdd_dep_on_sclk;
4215 uint32_t i;
4216
23ec3d14 4217 clocks->num_levels = 0;
f83a9991
EH
4218 for (i = 0; i < dep_table->count; i++) {
4219 if (dep_table->entries[i].clk) {
4220 clocks->data[clocks->num_levels].clocks_in_khz =
23ec3d14 4221 dep_table->entries[i].clk * 10;
f83a9991
EH
4222 clocks->num_levels++;
4223 }
4224 }
4225
4226}
4227
f83a9991
EH
4228static void vega10_get_memclocks(struct pp_hwmgr *hwmgr,
4229 struct pp_clock_levels_with_latency *clocks)
4230{
4231 struct phm_ppt_v2_information *table_info =
4232 (struct phm_ppt_v2_information *)hwmgr->pptable;
4233 struct phm_ppt_v1_clock_voltage_dependency_table *dep_table =
4234 table_info->vdd_dep_on_mclk;
690dc626 4235 struct vega10_hwmgr *data = hwmgr->backend;
23ec3d14 4236 uint32_t j = 0;
f83a9991
EH
4237 uint32_t i;
4238
f83a9991
EH
4239 for (i = 0; i < dep_table->count; i++) {
4240 if (dep_table->entries[i].clk) {
6eb9d603 4241
23ec3d14
RZ
4242 clocks->data[j].clocks_in_khz =
4243 dep_table->entries[i].clk * 10;
4244 data->mclk_latency_table.entries[j].frequency =
4245 dep_table->entries[i].clk;
4246 clocks->data[j].latency_in_us =
6eb9d603 4247 data->mclk_latency_table.entries[j].latency = 25;
23ec3d14 4248 j++;
f83a9991
EH
4249 }
4250 }
23ec3d14 4251 clocks->num_levels = data->mclk_latency_table.count = j;
f83a9991
EH
4252}
4253
4254static void vega10_get_dcefclocks(struct pp_hwmgr *hwmgr,
4255 struct pp_clock_levels_with_latency *clocks)
4256{
4257 struct phm_ppt_v2_information *table_info =
4258 (struct phm_ppt_v2_information *)hwmgr->pptable;
4259 struct phm_ppt_v1_clock_voltage_dependency_table *dep_table =
4260 table_info->vdd_dep_on_dcefclk;
4261 uint32_t i;
4262
4263 for (i = 0; i < dep_table->count; i++) {
23ec3d14 4264 clocks->data[i].clocks_in_khz = dep_table->entries[i].clk * 10;
f83a9991
EH
4265 clocks->data[i].latency_in_us = 0;
4266 clocks->num_levels++;
4267 }
4268}
4269
4270static void vega10_get_socclocks(struct pp_hwmgr *hwmgr,
4271 struct pp_clock_levels_with_latency *clocks)
4272{
4273 struct phm_ppt_v2_information *table_info =
4274 (struct phm_ppt_v2_information *)hwmgr->pptable;
4275 struct phm_ppt_v1_clock_voltage_dependency_table *dep_table =
4276 table_info->vdd_dep_on_socclk;
4277 uint32_t i;
4278
4279 for (i = 0; i < dep_table->count; i++) {
23ec3d14 4280 clocks->data[i].clocks_in_khz = dep_table->entries[i].clk * 10;
f83a9991
EH
4281 clocks->data[i].latency_in_us = 0;
4282 clocks->num_levels++;
4283 }
4284}
4285
4286static int vega10_get_clock_by_type_with_latency(struct pp_hwmgr *hwmgr,
4287 enum amd_pp_clock_type type,
4288 struct pp_clock_levels_with_latency *clocks)
4289{
4290 switch (type) {
4291 case amd_pp_sys_clock:
4292 vega10_get_sclks(hwmgr, clocks);
4293 break;
4294 case amd_pp_mem_clock:
4295 vega10_get_memclocks(hwmgr, clocks);
4296 break;
4297 case amd_pp_dcef_clock:
4298 vega10_get_dcefclocks(hwmgr, clocks);
4299 break;
4300 case amd_pp_soc_clock:
4301 vega10_get_socclocks(hwmgr, clocks);
4302 break;
4303 default:
4304 return -1;
4305 }
4306
4307 return 0;
4308}
4309
4310static int vega10_get_clock_by_type_with_voltage(struct pp_hwmgr *hwmgr,
4311 enum amd_pp_clock_type type,
4312 struct pp_clock_levels_with_voltage *clocks)
4313{
4314 struct phm_ppt_v2_information *table_info =
4315 (struct phm_ppt_v2_information *)hwmgr->pptable;
4316 struct phm_ppt_v1_clock_voltage_dependency_table *dep_table;
4317 uint32_t i;
4318
4319 switch (type) {
4320 case amd_pp_mem_clock:
4321 dep_table = table_info->vdd_dep_on_mclk;
4322 break;
4323 case amd_pp_dcef_clock:
4324 dep_table = table_info->vdd_dep_on_dcefclk;
4325 break;
4326 case amd_pp_disp_clock:
4327 dep_table = table_info->vdd_dep_on_dispclk;
4328 break;
4329 case amd_pp_pixel_clock:
4330 dep_table = table_info->vdd_dep_on_pixclk;
4331 break;
4332 case amd_pp_phy_clock:
4333 dep_table = table_info->vdd_dep_on_phyclk;
4334 break;
4335 default:
4336 return -1;
4337 }
4338
4339 for (i = 0; i < dep_table->count; i++) {
23ec3d14 4340 clocks->data[i].clocks_in_khz = dep_table->entries[i].clk * 10;
f83a9991
EH
4341 clocks->data[i].voltage_in_mv = (uint32_t)(table_info->vddc_lookup_table->
4342 entries[dep_table->entries[i].vddInd].us_vdd);
4343 clocks->num_levels++;
4344 }
4345
4346 if (i < dep_table->count)
4347 return -1;
4348
4349 return 0;
4350}
4351
4352static int vega10_set_watermarks_for_clocks_ranges(struct pp_hwmgr *hwmgr,
99c5e27d 4353 void *clock_range)
f83a9991 4354{
690dc626 4355 struct vega10_hwmgr *data = hwmgr->backend;
20582319 4356 struct dm_pp_wm_sets_with_clock_ranges_soc15 *wm_with_clock_ranges = clock_range;
f83a9991
EH
4357 Watermarks_t *table = &(data->smc_state_table.water_marks_table);
4358 int result = 0;
f83a9991
EH
4359
4360 if (!data->registry_data.disable_water_mark) {
63c2f7ed 4361 smu_set_watermarks_for_clocks_ranges(table, wm_with_clock_ranges);
f83a9991
EH
4362 data->water_marks_bitmap = WaterMarksExist;
4363 }
4364
4365 return result;
4366}
4367
d6e40301
EQ
4368static int vega10_get_ppfeature_status(struct pp_hwmgr *hwmgr, char *buf)
4369{
4370 static const char *ppfeature_name[] = {
4371 "DPM_PREFETCHER",
4372 "GFXCLK_DPM",
4373 "UCLK_DPM",
4374 "SOCCLK_DPM",
4375 "UVD_DPM",
4376 "VCE_DPM",
4377 "ULV",
4378 "MP0CLK_DPM",
4379 "LINK_DPM",
4380 "DCEFCLK_DPM",
4381 "AVFS",
4382 "GFXCLK_DS",
4383 "SOCCLK_DS",
4384 "LCLK_DS",
4385 "PPT",
4386 "TDC",
4387 "THERMAL",
4388 "GFX_PER_CU_CG",
4389 "RM",
4390 "DCEFCLK_DS",
4391 "ACDC",
4392 "VR0HOT",
4393 "VR1HOT",
4394 "FW_CTF",
4395 "LED_DISPLAY",
4396 "FAN_CONTROL",
4397 "FAST_PPT",
4398 "DIDT",
4399 "ACG",
4400 "PCC_LIMIT"};
4401 static const char *output_title[] = {
4402 "FEATURES",
4403 "BITMASK",
4404 "ENABLEMENT"};
4405 uint64_t features_enabled;
4406 int i;
4407 int ret = 0;
4408 int size = 0;
4409
4410 ret = vega10_get_enabled_smc_features(hwmgr, &features_enabled);
4411 PP_ASSERT_WITH_CODE(!ret,
4412 "[EnableAllSmuFeatures] Failed to get enabled smc features!",
4413 return ret);
4414
4415 size += sprintf(buf + size, "Current ppfeatures: 0x%016llx\n", features_enabled);
4416 size += sprintf(buf + size, "%-19s %-22s %s\n",
4417 output_title[0],
4418 output_title[1],
4419 output_title[2]);
4420 for (i = 0; i < GNLD_FEATURES_MAX; i++) {
4421 size += sprintf(buf + size, "%-19s 0x%016llx %6s\n",
4422 ppfeature_name[i],
4423 1ULL << i,
4424 (features_enabled & (1ULL << i)) ? "Y" : "N");
4425 }
4426
4427 return size;
4428}
4429
4430static int vega10_set_ppfeature_status(struct pp_hwmgr *hwmgr, uint64_t new_ppfeature_masks)
4431{
4432 uint64_t features_enabled;
4433 uint64_t features_to_enable;
4434 uint64_t features_to_disable;
4435 int ret = 0;
4436
4437 if (new_ppfeature_masks >= (1ULL << GNLD_FEATURES_MAX))
4438 return -EINVAL;
4439
4440 ret = vega10_get_enabled_smc_features(hwmgr, &features_enabled);
4441 if (ret)
4442 return ret;
4443
4444 features_to_disable =
b7d485df 4445 features_enabled & ~new_ppfeature_masks;
d6e40301 4446 features_to_enable =
b7d485df 4447 ~features_enabled & new_ppfeature_masks;
d6e40301
EQ
4448
4449 pr_debug("features_to_disable 0x%llx\n", features_to_disable);
4450 pr_debug("features_to_enable 0x%llx\n", features_to_enable);
4451
4452 if (features_to_disable) {
4453 ret = vega10_enable_smc_features(hwmgr, false, features_to_disable);
4454 if (ret)
4455 return ret;
4456 }
4457
4458 if (features_to_enable) {
4459 ret = vega10_enable_smc_features(hwmgr, true, features_to_enable);
4460 if (ret)
4461 return ret;
4462 }
4463
4464 return 0;
4465}
4466
f83a9991
EH
4467static int vega10_print_clock_levels(struct pp_hwmgr *hwmgr,
4468 enum pp_clock_type type, char *buf)
4469{
690dc626 4470 struct vega10_hwmgr *data = hwmgr->backend;
f83a9991
EH
4471 struct vega10_single_dpm_table *sclk_table = &(data->dpm_table.gfx_table);
4472 struct vega10_single_dpm_table *mclk_table = &(data->dpm_table.mem_table);
bb05821b
EQ
4473 struct vega10_single_dpm_table *soc_table = &(data->dpm_table.soc_table);
4474 struct vega10_single_dpm_table *dcef_table = &(data->dpm_table.dcef_table);
f83a9991 4475 struct vega10_pcie_table *pcie_table = &(data->dpm_table.pcie_table);
c5a44849
RZ
4476 struct vega10_odn_clock_voltage_dependency_table *podn_vdd_dep = NULL;
4477
f83a9991
EH
4478 int i, now, size = 0;
4479
4480 switch (type) {
4481 case PP_SCLK:
4482 if (data->registry_data.sclk_dpm_key_disabled)
4483 break;
4484
d246cd53 4485 smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetCurrentGfxclkIndex);
3f9ca14a 4486 now = smum_get_argument(hwmgr);
f83a9991
EH
4487
4488 for (i = 0; i < sclk_table->count; i++)
4489 size += sprintf(buf + size, "%d: %uMhz %s\n",
4490 i, sclk_table->dpm_levels[i].value / 100,
4491 (i == now) ? "*" : "");
4492 break;
4493 case PP_MCLK:
4494 if (data->registry_data.mclk_dpm_key_disabled)
4495 break;
4496
d246cd53 4497 smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetCurrentUclkIndex);
3f9ca14a 4498 now = smum_get_argument(hwmgr);
f83a9991
EH
4499
4500 for (i = 0; i < mclk_table->count; i++)
4501 size += sprintf(buf + size, "%d: %uMhz %s\n",
4502 i, mclk_table->dpm_levels[i].value / 100,
4503 (i == now) ? "*" : "");
4504 break;
bb05821b
EQ
4505 case PP_SOCCLK:
4506 if (data->registry_data.socclk_dpm_key_disabled)
4507 break;
4508
4509 smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetCurrentSocclkIndex);
4510 now = smum_get_argument(hwmgr);
4511
4512 for (i = 0; i < soc_table->count; i++)
4513 size += sprintf(buf + size, "%d: %uMhz %s\n",
4514 i, soc_table->dpm_levels[i].value / 100,
4515 (i == now) ? "*" : "");
4516 break;
4517 case PP_DCEFCLK:
4518 if (data->registry_data.dcefclk_dpm_key_disabled)
4519 break;
4520
4521 smum_send_msg_to_smc_with_parameter(hwmgr,
4522 PPSMC_MSG_GetClockFreqMHz, CLK_DCEFCLK);
4523 now = smum_get_argument(hwmgr);
4524
4525 for (i = 0; i < dcef_table->count; i++)
4526 size += sprintf(buf + size, "%d: %uMhz %s\n",
4527 i, dcef_table->dpm_levels[i].value / 100,
4528 (dcef_table->dpm_levels[i].value / 100 == now) ?
4529 "*" : "");
4530 break;
f83a9991 4531 case PP_PCIE:
d246cd53 4532 smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetCurrentLinkIndex);
3f9ca14a 4533 now = smum_get_argument(hwmgr);
f83a9991
EH
4534
4535 for (i = 0; i < pcie_table->count; i++)
4536 size += sprintf(buf + size, "%d: %s %s\n", i,
7413d2fa
EQ
4537 (pcie_table->pcie_gen[i] == 0) ? "2.5GT/s, x1" :
4538 (pcie_table->pcie_gen[i] == 1) ? "5.0GT/s, x16" :
4539 (pcie_table->pcie_gen[i] == 2) ? "8.0GT/s, x16" : "",
f83a9991
EH
4540 (i == now) ? "*" : "");
4541 break;
c5a44849
RZ
4542 case OD_SCLK:
4543 if (hwmgr->od_enabled) {
4544 size = sprintf(buf, "%s:\n", "OD_SCLK");
4545 podn_vdd_dep = &data->odn_dpm_table.vdd_dep_on_sclk;
4546 for (i = 0; i < podn_vdd_dep->count; i++)
4547 size += sprintf(buf + size, "%d: %10uMhz %10umV\n",
4548 i, podn_vdd_dep->entries[i].clk / 100,
4549 podn_vdd_dep->entries[i].vddc);
4550 }
4551 break;
4552 case OD_MCLK:
4553 if (hwmgr->od_enabled) {
4554 size = sprintf(buf, "%s:\n", "OD_MCLK");
4555 podn_vdd_dep = &data->odn_dpm_table.vdd_dep_on_mclk;
4556 for (i = 0; i < podn_vdd_dep->count; i++)
4557 size += sprintf(buf + size, "%d: %10uMhz %10umV\n",
4558 i, podn_vdd_dep->entries[i].clk/100,
4559 podn_vdd_dep->entries[i].vddc);
4560 }
4561 break;
4562 case OD_RANGE:
4563 if (hwmgr->od_enabled) {
4564 size = sprintf(buf, "%s:\n", "OD_RANGE");
4565 size += sprintf(buf + size, "SCLK: %7uMHz %10uMHz\n",
4566 data->golden_dpm_table.gfx_table.dpm_levels[0].value/100,
4567 hwmgr->platform_descriptor.overdriveLimit.engineClock/100);
4568 size += sprintf(buf + size, "MCLK: %7uMHz %10uMHz\n",
4569 data->golden_dpm_table.mem_table.dpm_levels[0].value/100,
4570 hwmgr->platform_descriptor.overdriveLimit.memoryClock/100);
4571 size += sprintf(buf + size, "VDDC: %7umV %11umV\n",
4572 data->odn_dpm_table.min_vddc,
4573 data->odn_dpm_table.max_vddc);
4574 }
4575 break;
f83a9991
EH
4576 default:
4577 break;
4578 }
4579 return size;
4580}
4581
4582static int vega10_display_configuration_changed_task(struct pp_hwmgr *hwmgr)
4583{
690dc626 4584 struct vega10_hwmgr *data = hwmgr->backend;
f83a9991 4585 Watermarks_t *wm_table = &(data->smc_state_table.water_marks_table);
555fd70c 4586 int result = 0;
f83a9991
EH
4587
4588 if ((data->water_marks_bitmap & WaterMarksExist) &&
4589 !(data->water_marks_bitmap & WaterMarksLoaded)) {
3f9ca14a 4590 result = smum_smc_table_manager(hwmgr, (uint8_t *)wm_table, WMTABLE, false);
f83a9991
EH
4591 PP_ASSERT_WITH_CODE(result, "Failed to update WMTABLE!", return EINVAL);
4592 data->water_marks_bitmap |= WaterMarksLoaded;
4593 }
4594
4595 if (data->water_marks_bitmap & WaterMarksLoaded) {
d3f8c0ab 4596 smum_send_msg_to_smc_with_parameter(hwmgr,
555fd70c 4597 PPSMC_MSG_NumOfDisplays, hwmgr->display_config->num_display);
f83a9991
EH
4598 }
4599
4600 return result;
4601}
4602
4603int vega10_enable_disable_uvd_dpm(struct pp_hwmgr *hwmgr, bool enable)
4604{
690dc626 4605 struct vega10_hwmgr *data = hwmgr->backend;
f83a9991
EH
4606
4607 if (data->smu_features[GNLD_DPM_UVD].supported) {
d3f8c0ab 4608 PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr,
f83a9991
EH
4609 enable,
4610 data->smu_features[GNLD_DPM_UVD].smu_feature_bitmap),
4611 "Attempt to Enable/Disable DPM UVD Failed!",
4612 return -1);
4613 data->smu_features[GNLD_DPM_UVD].enabled = enable;
4614 }
4615 return 0;
4616}
4617
f93f0c3a 4618static void vega10_power_gate_vce(struct pp_hwmgr *hwmgr, bool bgate)
f83a9991 4619{
690dc626 4620 struct vega10_hwmgr *data = hwmgr->backend;
f83a9991
EH
4621
4622 data->vce_power_gated = bgate;
f93f0c3a 4623 vega10_enable_disable_vce_dpm(hwmgr, !bgate);
f83a9991
EH
4624}
4625
f93f0c3a 4626static void vega10_power_gate_uvd(struct pp_hwmgr *hwmgr, bool bgate)
f83a9991 4627{
690dc626 4628 struct vega10_hwmgr *data = hwmgr->backend;
f83a9991
EH
4629
4630 data->uvd_power_gated = bgate;
f93f0c3a 4631 vega10_enable_disable_uvd_dpm(hwmgr, !bgate);
f83a9991
EH
4632}
4633
4634static inline bool vega10_are_power_levels_equal(
4635 const struct vega10_performance_level *pl1,
4636 const struct vega10_performance_level *pl2)
4637{
4638 return ((pl1->soc_clock == pl2->soc_clock) &&
4639 (pl1->gfx_clock == pl2->gfx_clock) &&
4640 (pl1->mem_clock == pl2->mem_clock));
4641}
4642
4643static int vega10_check_states_equal(struct pp_hwmgr *hwmgr,
4644 const struct pp_hw_power_state *pstate1,
4645 const struct pp_hw_power_state *pstate2, bool *equal)
4646{
4647 const struct vega10_power_state *psa;
4648 const struct vega10_power_state *psb;
4649 int i;
4650
4651 if (pstate1 == NULL || pstate2 == NULL || equal == NULL)
4652 return -EINVAL;
4653
4654 psa = cast_const_phw_vega10_power_state(pstate1);
4655 psb = cast_const_phw_vega10_power_state(pstate2);
4656 /* If the two states don't even have the same number of performance levels they cannot be the same state. */
4657 if (psa->performance_level_count != psb->performance_level_count) {
4658 *equal = false;
4659 return 0;
4660 }
4661
4662 for (i = 0; i < psa->performance_level_count; i++) {
4663 if (!vega10_are_power_levels_equal(&(psa->performance_levels[i]), &(psb->performance_levels[i]))) {
4664 /* If we have found even one performance level pair that is different the states are different. */
4665 *equal = false;
4666 return 0;
4667 }
4668 }
4669
4670 /* If all performance levels are the same try to use the UVD clocks to break the tie.*/
4671 *equal = ((psa->uvd_clks.vclk == psb->uvd_clks.vclk) && (psa->uvd_clks.dclk == psb->uvd_clks.dclk));
4672 *equal &= ((psa->vce_clks.evclk == psb->vce_clks.evclk) && (psa->vce_clks.ecclk == psb->vce_clks.ecclk));
4673 *equal &= (psa->sclk_threshold == psb->sclk_threshold);
4674
4675 return 0;
4676}
4677
4678static bool
4679vega10_check_smc_update_required_for_display_configuration(struct pp_hwmgr *hwmgr)
4680{
690dc626 4681 struct vega10_hwmgr *data = hwmgr->backend;
f83a9991 4682 bool is_update_required = false;
f83a9991 4683
555fd70c 4684 if (data->display_timing.num_existing_displays != hwmgr->display_config->num_display)
f83a9991
EH
4685 is_update_required = true;
4686
dd5a6fe2 4687 if (PP_CAP(PHM_PlatformCaps_SclkDeepSleep)) {
555fd70c 4688 if (data->display_timing.min_clock_in_sr != hwmgr->display_config->min_core_set_clock_in_sr)
f83a9991
EH
4689 is_update_required = true;
4690 }
4691
4692 return is_update_required;
4693}
4694
8b9242ed
RZ
4695static int vega10_disable_dpm_tasks(struct pp_hwmgr *hwmgr)
4696{
4697 int tmp_result, result = 0;
4698
dd5a6fe2 4699 if (PP_CAP(PHM_PlatformCaps_ThermalController))
8b9242ed
RZ
4700 vega10_disable_thermal_protection(hwmgr);
4701
4702 tmp_result = vega10_disable_power_containment(hwmgr);
4703 PP_ASSERT_WITH_CODE((tmp_result == 0),
4704 "Failed to disable power containment!", result = tmp_result);
4705
9b7b8154
EQ
4706 tmp_result = vega10_disable_didt_config(hwmgr);
4707 PP_ASSERT_WITH_CODE((tmp_result == 0),
4708 "Failed to disable didt config!", result = tmp_result);
4709
8b9242ed
RZ
4710 tmp_result = vega10_avfs_enable(hwmgr, false);
4711 PP_ASSERT_WITH_CODE((tmp_result == 0),
4712 "Failed to disable AVFS!", result = tmp_result);
4713
4714 tmp_result = vega10_stop_dpm(hwmgr, SMC_DPM_FEATURES);
4715 PP_ASSERT_WITH_CODE((tmp_result == 0),
4716 "Failed to stop DPM!", result = tmp_result);
4717
df057e02
RZ
4718 tmp_result = vega10_disable_deep_sleep_master_switch(hwmgr);
4719 PP_ASSERT_WITH_CODE((tmp_result == 0),
4720 "Failed to disable deep sleep!", result = tmp_result);
4721
4022e4f2
RZ
4722 tmp_result = vega10_disable_ulv(hwmgr);
4723 PP_ASSERT_WITH_CODE((tmp_result == 0),
4724 "Failed to disable ulv!", result = tmp_result);
4725
bdb8cd10
RZ
4726 tmp_result = vega10_acg_disable(hwmgr);
4727 PP_ASSERT_WITH_CODE((tmp_result == 0),
4728 "Failed to disable acg!", result = tmp_result);
15826fbf
RZ
4729
4730 vega10_enable_disable_PCC_limit_feature(hwmgr, false);
8b9242ed
RZ
4731 return result;
4732}
4733
4734static int vega10_power_off_asic(struct pp_hwmgr *hwmgr)
4735{
690dc626 4736 struct vega10_hwmgr *data = hwmgr->backend;
8b9242ed
RZ
4737 int result;
4738
4739 result = vega10_disable_dpm_tasks(hwmgr);
4740 PP_ASSERT_WITH_CODE((0 == result),
4741 "[disable_dpm_tasks] Failed to disable DPM!",
4742 );
4743 data->water_marks_bitmap &= ~(WaterMarksLoaded);
4744
4745 return result;
4746}
4747
dd4e2237
EH
4748static int vega10_get_sclk_od(struct pp_hwmgr *hwmgr)
4749{
690dc626 4750 struct vega10_hwmgr *data = hwmgr->backend;
dd4e2237
EH
4751 struct vega10_single_dpm_table *sclk_table = &(data->dpm_table.gfx_table);
4752 struct vega10_single_dpm_table *golden_sclk_table =
4753 &(data->golden_dpm_table.gfx_table);
a4233cc9
GJ
4754 int value = sclk_table->dpm_levels[sclk_table->count - 1].value;
4755 int golden_value = golden_sclk_table->dpm_levels
dd4e2237
EH
4756 [golden_sclk_table->count - 1].value;
4757
a4233cc9
GJ
4758 value -= golden_value;
4759 value = DIV_ROUND_UP(value * 100, golden_value);
4760
dd4e2237
EH
4761 return value;
4762}
4763
4764static int vega10_set_sclk_od(struct pp_hwmgr *hwmgr, uint32_t value)
4765{
690dc626 4766 struct vega10_hwmgr *data = hwmgr->backend;
dd4e2237
EH
4767 struct vega10_single_dpm_table *golden_sclk_table =
4768 &(data->golden_dpm_table.gfx_table);
4769 struct pp_power_state *ps;
4770 struct vega10_power_state *vega10_ps;
4771
4772 ps = hwmgr->request_ps;
4773
4774 if (ps == NULL)
4775 return -EINVAL;
4776
4777 vega10_ps = cast_phw_vega10_power_state(&ps->hardware);
4778
4779 vega10_ps->performance_levels
4780 [vega10_ps->performance_level_count - 1].gfx_clock =
4781 golden_sclk_table->dpm_levels
4782 [golden_sclk_table->count - 1].value *
4783 value / 100 +
4784 golden_sclk_table->dpm_levels
4785 [golden_sclk_table->count - 1].value;
4786
4787 if (vega10_ps->performance_levels
4788 [vega10_ps->performance_level_count - 1].gfx_clock >
a4c3f247 4789 hwmgr->platform_descriptor.overdriveLimit.engineClock) {
dd4e2237
EH
4790 vega10_ps->performance_levels
4791 [vega10_ps->performance_level_count - 1].gfx_clock =
4792 hwmgr->platform_descriptor.overdriveLimit.engineClock;
a4c3f247
RZ
4793 pr_warn("max sclk supported by vbios is %d\n",
4794 hwmgr->platform_descriptor.overdriveLimit.engineClock);
4795 }
dd4e2237
EH
4796 return 0;
4797}
4798
4799static int vega10_get_mclk_od(struct pp_hwmgr *hwmgr)
4800{
690dc626 4801 struct vega10_hwmgr *data = hwmgr->backend;
dd4e2237
EH
4802 struct vega10_single_dpm_table *mclk_table = &(data->dpm_table.mem_table);
4803 struct vega10_single_dpm_table *golden_mclk_table =
4804 &(data->golden_dpm_table.mem_table);
a4233cc9
GJ
4805 int value = mclk_table->dpm_levels[mclk_table->count - 1].value;
4806 int golden_value = golden_mclk_table->dpm_levels
dd4e2237
EH
4807 [golden_mclk_table->count - 1].value;
4808
a4233cc9
GJ
4809 value -= golden_value;
4810 value = DIV_ROUND_UP(value * 100, golden_value);
4811
dd4e2237
EH
4812 return value;
4813}
4814
4815static int vega10_set_mclk_od(struct pp_hwmgr *hwmgr, uint32_t value)
4816{
690dc626 4817 struct vega10_hwmgr *data = hwmgr->backend;
dd4e2237
EH
4818 struct vega10_single_dpm_table *golden_mclk_table =
4819 &(data->golden_dpm_table.mem_table);
4820 struct pp_power_state *ps;
4821 struct vega10_power_state *vega10_ps;
4822
4823 ps = hwmgr->request_ps;
4824
4825 if (ps == NULL)
4826 return -EINVAL;
4827
4828 vega10_ps = cast_phw_vega10_power_state(&ps->hardware);
4829
4830 vega10_ps->performance_levels
4831 [vega10_ps->performance_level_count - 1].mem_clock =
4832 golden_mclk_table->dpm_levels
4833 [golden_mclk_table->count - 1].value *
4834 value / 100 +
4835 golden_mclk_table->dpm_levels
4836 [golden_mclk_table->count - 1].value;
4837
4838 if (vega10_ps->performance_levels
4839 [vega10_ps->performance_level_count - 1].mem_clock >
a4c3f247 4840 hwmgr->platform_descriptor.overdriveLimit.memoryClock) {
dd4e2237
EH
4841 vega10_ps->performance_levels
4842 [vega10_ps->performance_level_count - 1].mem_clock =
4843 hwmgr->platform_descriptor.overdriveLimit.memoryClock;
a4c3f247
RZ
4844 pr_warn("max mclk supported by vbios is %d\n",
4845 hwmgr->platform_descriptor.overdriveLimit.memoryClock);
4846 }
dd4e2237
EH
4847
4848 return 0;
4849}
8b9242ed 4850
52afb85e
RZ
4851static int vega10_notify_cac_buffer_info(struct pp_hwmgr *hwmgr,
4852 uint32_t virtual_addr_low,
4853 uint32_t virtual_addr_hi,
4854 uint32_t mc_addr_low,
4855 uint32_t mc_addr_hi,
4856 uint32_t size)
4857{
4858 smum_send_msg_to_smc_with_parameter(hwmgr,
4859 PPSMC_MSG_SetSystemVirtualDramAddrHigh,
4860 virtual_addr_hi);
4861 smum_send_msg_to_smc_with_parameter(hwmgr,
4862 PPSMC_MSG_SetSystemVirtualDramAddrLow,
4863 virtual_addr_low);
4864 smum_send_msg_to_smc_with_parameter(hwmgr,
4865 PPSMC_MSG_DramLogSetDramAddrHigh,
4866 mc_addr_hi);
4867
4868 smum_send_msg_to_smc_with_parameter(hwmgr,
4869 PPSMC_MSG_DramLogSetDramAddrLow,
4870 mc_addr_low);
4871
4872 smum_send_msg_to_smc_with_parameter(hwmgr,
4873 PPSMC_MSG_DramLogSetDramSize,
4874 size);
4875 return 0;
4876}
4877
0a91ee07
EQ
4878static int vega10_get_thermal_temperature_range(struct pp_hwmgr *hwmgr,
4879 struct PP_TemperatureRange *thermal_data)
4880{
437ccd17
EQ
4881 struct vega10_hwmgr *data = hwmgr->backend;
4882 PPTable_t *pp_table = &(data->smc_state_table.pp_table);
0a91ee07
EQ
4883
4884 memcpy(thermal_data, &SMU7ThermalWithDelayPolicy[0], sizeof(struct PP_TemperatureRange));
4885
437ccd17
EQ
4886 thermal_data->max = pp_table->TedgeLimit *
4887 PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
901cb599
EQ
4888 thermal_data->edge_emergency_max = (pp_table->TedgeLimit + CTF_OFFSET_EDGE) *
4889 PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
437ccd17
EQ
4890 thermal_data->hotspot_crit_max = pp_table->ThotspotLimit *
4891 PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
901cb599
EQ
4892 thermal_data->hotspot_emergency_max = (pp_table->ThotspotLimit + CTF_OFFSET_HOTSPOT) *
4893 PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
437ccd17 4894 thermal_data->mem_crit_max = pp_table->ThbmLimit *
0a91ee07 4895 PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
901cb599
EQ
4896 thermal_data->mem_emergency_max = (pp_table->ThbmLimit + CTF_OFFSET_HBM)*
4897 PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
0a91ee07
EQ
4898
4899 return 0;
4900}
4901
6390258a
RZ
4902static int vega10_get_power_profile_mode(struct pp_hwmgr *hwmgr, char *buf)
4903{
690dc626 4904 struct vega10_hwmgr *data = hwmgr->backend;
6390258a 4905 uint32_t i, size = 0;
c27c9778
EQ
4906 static const uint8_t profile_mode_setting[6][4] = {{70, 60, 0, 0,},
4907 {70, 60, 1, 3,},
6390258a
RZ
4908 {90, 60, 0, 0,},
4909 {70, 60, 0, 0,},
4910 {70, 90, 0, 0,},
4911 {30, 60, 0, 6,},
4912 };
c27c9778
EQ
4913 static const char *profile_name[7] = {"BOOTUP_DEFAULT",
4914 "3D_FULL_SCREEN",
6390258a
RZ
4915 "POWER_SAVING",
4916 "VIDEO",
4917 "VR",
04f618eb 4918 "COMPUTE",
6390258a
RZ
4919 "CUSTOM"};
4920 static const char *title[6] = {"NUM",
4921 "MODE_NAME",
4922 "BUSY_SET_POINT",
4923 "FPS",
4924 "USE_RLC_BUSY",
4925 "MIN_ACTIVE_LEVEL"};
4926
4927 if (!buf)
4928 return -EINVAL;
4929
4930 size += sprintf(buf + size, "%s %16s %s %s %s %s\n",title[0],
4931 title[1], title[2], title[3], title[4], title[5]);
4932
4933 for (i = 0; i < PP_SMC_POWER_PROFILE_CUSTOM; i++)
4934 size += sprintf(buf + size, "%3d %14s%s: %14d %3d %10d %14d\n",
4935 i, profile_name[i], (i == hwmgr->power_profile_mode) ? "*" : " ",
4936 profile_mode_setting[i][0], profile_mode_setting[i][1],
4937 profile_mode_setting[i][2], profile_mode_setting[i][3]);
4938 size += sprintf(buf + size, "%3d %14s%s: %14d %3d %10d %14d\n", i,
4939 profile_name[i], (i == hwmgr->power_profile_mode) ? "*" : " ",
4940 data->custom_profile_mode[0], data->custom_profile_mode[1],
4941 data->custom_profile_mode[2], data->custom_profile_mode[3]);
4942 return size;
4943}
4944
4945static int vega10_set_power_profile_mode(struct pp_hwmgr *hwmgr, long *input, uint32_t size)
4946{
690dc626 4947 struct vega10_hwmgr *data = hwmgr->backend;
6390258a
RZ
4948 uint8_t busy_set_point;
4949 uint8_t FPS;
4950 uint8_t use_rlc_busy;
4951 uint8_t min_active_level;
e815a9e6 4952 uint32_t power_profile_mode = input[size];
6390258a 4953
e815a9e6 4954 if (power_profile_mode == PP_SMC_POWER_PROFILE_CUSTOM) {
bbdf38cc 4955 if (size != 0 && size != 4)
6390258a
RZ
4956 return -EINVAL;
4957
bbdf38cc
KR
4958 /* If size = 0 and the CUSTOM profile has been set already
4959 * then just apply the profile. The copy stored in the hwmgr
4960 * is zeroed out on init
4961 */
4962 if (size == 0) {
4963 if (data->custom_profile_mode[0] != 0)
4964 goto out;
4965 else
4966 return -EINVAL;
4967 }
4968
6390258a
RZ
4969 data->custom_profile_mode[0] = busy_set_point = input[0];
4970 data->custom_profile_mode[1] = FPS = input[1];
4971 data->custom_profile_mode[2] = use_rlc_busy = input[2];
4972 data->custom_profile_mode[3] = min_active_level = input[3];
4973 smum_send_msg_to_smc_with_parameter(hwmgr,
4974 PPSMC_MSG_SetCustomGfxDpmParameters,
4975 busy_set_point | FPS<<8 |
4976 use_rlc_busy << 16 | min_active_level<<24);
6390258a
RZ
4977 }
4978
bbdf38cc 4979out:
7cdd4dc5
EQ
4980 smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetWorkloadMask,
4981 1 << power_profile_mode);
e815a9e6
EQ
4982 hwmgr->power_profile_mode = power_profile_mode;
4983
6390258a
RZ
4984 return 0;
4985}
4986
c5a44849
RZ
4987
4988static bool vega10_check_clk_voltage_valid(struct pp_hwmgr *hwmgr,
4989 enum PP_OD_DPM_TABLE_COMMAND type,
4990 uint32_t clk,
4991 uint32_t voltage)
4992{
4993 struct vega10_hwmgr *data = hwmgr->backend;
4994 struct vega10_odn_dpm_table *odn_table = &(data->odn_dpm_table);
4995 struct vega10_single_dpm_table *golden_table;
4996
4997 if (voltage < odn_table->min_vddc || voltage > odn_table->max_vddc) {
4998 pr_info("OD voltage is out of range [%d - %d] mV\n", odn_table->min_vddc, odn_table->max_vddc);
4999 return false;
5000 }
5001
5002 if (type == PP_OD_EDIT_SCLK_VDDC_TABLE) {
5003 golden_table = &(data->golden_dpm_table.gfx_table);
5004 if (golden_table->dpm_levels[0].value > clk ||
5005 hwmgr->platform_descriptor.overdriveLimit.engineClock < clk) {
5006 pr_info("OD engine clock is out of range [%d - %d] MHz\n",
5007 golden_table->dpm_levels[0].value/100,
5008 hwmgr->platform_descriptor.overdriveLimit.engineClock/100);
5009 return false;
5010 }
5011 } else if (type == PP_OD_EDIT_MCLK_VDDC_TABLE) {
5012 golden_table = &(data->golden_dpm_table.mem_table);
5013 if (golden_table->dpm_levels[0].value > clk ||
5014 hwmgr->platform_descriptor.overdriveLimit.memoryClock < clk) {
5015 pr_info("OD memory clock is out of range [%d - %d] MHz\n",
5016 golden_table->dpm_levels[0].value/100,
5017 hwmgr->platform_descriptor.overdriveLimit.memoryClock/100);
5018 return false;
5019 }
5020 } else {
5021 return false;
5022 }
5023
5024 return true;
5025}
5026
fc99f2be
EQ
5027static void vega10_odn_update_power_state(struct pp_hwmgr *hwmgr)
5028{
5029 struct vega10_hwmgr *data = hwmgr->backend;
5030 struct pp_power_state *ps = hwmgr->request_ps;
5031 struct vega10_power_state *vega10_ps;
5032 struct vega10_single_dpm_table *gfx_dpm_table =
5033 &data->dpm_table.gfx_table;
5034 struct vega10_single_dpm_table *soc_dpm_table =
5035 &data->dpm_table.soc_table;
5036 struct vega10_single_dpm_table *mem_dpm_table =
5037 &data->dpm_table.mem_table;
5038 int max_level;
5039
5040 if (!ps)
5041 return;
5042
5043 vega10_ps = cast_phw_vega10_power_state(&ps->hardware);
5044 max_level = vega10_ps->performance_level_count - 1;
5045
5046 if (vega10_ps->performance_levels[max_level].gfx_clock !=
5047 gfx_dpm_table->dpm_levels[gfx_dpm_table->count - 1].value)
5048 vega10_ps->performance_levels[max_level].gfx_clock =
5049 gfx_dpm_table->dpm_levels[gfx_dpm_table->count - 1].value;
5050
5051 if (vega10_ps->performance_levels[max_level].soc_clock !=
5052 soc_dpm_table->dpm_levels[soc_dpm_table->count - 1].value)
5053 vega10_ps->performance_levels[max_level].soc_clock =
5054 soc_dpm_table->dpm_levels[soc_dpm_table->count - 1].value;
5055
5056 if (vega10_ps->performance_levels[max_level].mem_clock !=
5057 mem_dpm_table->dpm_levels[mem_dpm_table->count - 1].value)
5058 vega10_ps->performance_levels[max_level].mem_clock =
5059 mem_dpm_table->dpm_levels[mem_dpm_table->count - 1].value;
5060
5061 if (!hwmgr->ps)
5062 return;
5063
5064 ps = (struct pp_power_state *)((unsigned long)(hwmgr->ps) + hwmgr->ps_size * (hwmgr->num_ps - 1));
5065 vega10_ps = cast_phw_vega10_power_state(&ps->hardware);
5066 max_level = vega10_ps->performance_level_count - 1;
5067
5068 if (vega10_ps->performance_levels[max_level].gfx_clock !=
5069 gfx_dpm_table->dpm_levels[gfx_dpm_table->count - 1].value)
5070 vega10_ps->performance_levels[max_level].gfx_clock =
5071 gfx_dpm_table->dpm_levels[gfx_dpm_table->count - 1].value;
5072
5073 if (vega10_ps->performance_levels[max_level].soc_clock !=
5074 soc_dpm_table->dpm_levels[soc_dpm_table->count - 1].value)
5075 vega10_ps->performance_levels[max_level].soc_clock =
5076 soc_dpm_table->dpm_levels[soc_dpm_table->count - 1].value;
5077
5078 if (vega10_ps->performance_levels[max_level].mem_clock !=
5079 mem_dpm_table->dpm_levels[mem_dpm_table->count - 1].value)
5080 vega10_ps->performance_levels[max_level].mem_clock =
5081 mem_dpm_table->dpm_levels[mem_dpm_table->count - 1].value;
5082}
5083
c5a44849
RZ
5084static void vega10_odn_update_soc_table(struct pp_hwmgr *hwmgr,
5085 enum PP_OD_DPM_TABLE_COMMAND type)
5086{
5087 struct vega10_hwmgr *data = hwmgr->backend;
5088 struct phm_ppt_v2_information *table_info = hwmgr->pptable;
5089 struct phm_ppt_v1_clock_voltage_dependency_table *dep_table = table_info->vdd_dep_on_socclk;
7d59c41b 5090 struct vega10_single_dpm_table *dpm_table = &data->golden_dpm_table.mem_table;
c5a44849
RZ
5091
5092 struct vega10_odn_clock_voltage_dependency_table *podn_vdd_dep_on_socclk =
5093 &data->odn_dpm_table.vdd_dep_on_socclk;
5094 struct vega10_odn_vddc_lookup_table *od_vddc_lookup_table = &data->odn_dpm_table.vddc_lookup_table;
5095
5096 struct vega10_odn_clock_voltage_dependency_table *podn_vdd_dep;
5097 uint8_t i, j;
5098
5099 if (type == PP_OD_EDIT_SCLK_VDDC_TABLE) {
5100 podn_vdd_dep = &data->odn_dpm_table.vdd_dep_on_sclk;
5101 for (i = 0; i < podn_vdd_dep->count - 1; i++)
5102 od_vddc_lookup_table->entries[i].us_vdd = podn_vdd_dep->entries[i].vddc;
5103 if (od_vddc_lookup_table->entries[i].us_vdd < podn_vdd_dep->entries[i].vddc)
5104 od_vddc_lookup_table->entries[i].us_vdd = podn_vdd_dep->entries[i].vddc;
5105 } else if (type == PP_OD_EDIT_MCLK_VDDC_TABLE) {
5106 podn_vdd_dep = &data->odn_dpm_table.vdd_dep_on_mclk;
5107 for (i = 0; i < dpm_table->count; i++) {
5108 for (j = 0; j < od_vddc_lookup_table->count; j++) {
5109 if (od_vddc_lookup_table->entries[j].us_vdd >
5110 podn_vdd_dep->entries[i].vddc)
5111 break;
5112 }
5113 if (j == od_vddc_lookup_table->count) {
7d59c41b
EQ
5114 j = od_vddc_lookup_table->count - 1;
5115 od_vddc_lookup_table->entries[j].us_vdd =
c5a44849
RZ
5116 podn_vdd_dep->entries[i].vddc;
5117 data->need_update_dpm_table |= DPMTABLE_OD_UPDATE_VDDC;
5118 }
5119 podn_vdd_dep->entries[i].vddInd = j;
5120 }
5121 dpm_table = &data->dpm_table.soc_table;
5122 for (i = 0; i < dep_table->count; i++) {
7d59c41b
EQ
5123 if (dep_table->entries[i].vddInd == podn_vdd_dep->entries[podn_vdd_dep->count-1].vddInd &&
5124 dep_table->entries[i].clk < podn_vdd_dep->entries[podn_vdd_dep->count-1].clk) {
c5a44849 5125 data->need_update_dpm_table |= DPMTABLE_UPDATE_SOCCLK;
7d59c41b
EQ
5126 for (; (i < dep_table->count) &&
5127 (dep_table->entries[i].clk < podn_vdd_dep->entries[podn_vdd_dep->count - 1].clk); i++) {
5128 podn_vdd_dep_on_socclk->entries[i].clk = podn_vdd_dep->entries[podn_vdd_dep->count-1].clk;
5129 dpm_table->dpm_levels[i].value = podn_vdd_dep_on_socclk->entries[i].clk;
5130 }
5131 break;
5132 } else {
5133 dpm_table->dpm_levels[i].value = dep_table->entries[i].clk;
5134 podn_vdd_dep_on_socclk->entries[i].vddc = dep_table->entries[i].vddc;
5135 podn_vdd_dep_on_socclk->entries[i].vddInd = dep_table->entries[i].vddInd;
5136 podn_vdd_dep_on_socclk->entries[i].clk = dep_table->entries[i].clk;
c5a44849
RZ
5137 }
5138 }
5139 if (podn_vdd_dep_on_socclk->entries[podn_vdd_dep_on_socclk->count - 1].clk <
7d59c41b 5140 podn_vdd_dep->entries[podn_vdd_dep->count - 1].clk) {
c5a44849 5141 data->need_update_dpm_table |= DPMTABLE_UPDATE_SOCCLK;
7d59c41b
EQ
5142 podn_vdd_dep_on_socclk->entries[podn_vdd_dep_on_socclk->count - 1].clk =
5143 podn_vdd_dep->entries[podn_vdd_dep->count - 1].clk;
5144 dpm_table->dpm_levels[podn_vdd_dep_on_socclk->count - 1].value =
5145 podn_vdd_dep->entries[podn_vdd_dep->count - 1].clk;
c5a44849
RZ
5146 }
5147 if (podn_vdd_dep_on_socclk->entries[podn_vdd_dep_on_socclk->count - 1].vddInd <
7d59c41b 5148 podn_vdd_dep->entries[podn_vdd_dep->count - 1].vddInd) {
c5a44849 5149 data->need_update_dpm_table |= DPMTABLE_UPDATE_SOCCLK;
7d59c41b
EQ
5150 podn_vdd_dep_on_socclk->entries[podn_vdd_dep_on_socclk->count - 1].vddInd =
5151 podn_vdd_dep->entries[podn_vdd_dep->count - 1].vddInd;
c5a44849
RZ
5152 }
5153 }
fc99f2be 5154 vega10_odn_update_power_state(hwmgr);
c5a44849
RZ
5155}
5156
5157static int vega10_odn_edit_dpm_table(struct pp_hwmgr *hwmgr,
5158 enum PP_OD_DPM_TABLE_COMMAND type,
5159 long *input, uint32_t size)
5160{
5161 struct vega10_hwmgr *data = hwmgr->backend;
5162 struct vega10_odn_clock_voltage_dependency_table *podn_vdd_dep_table;
5163 struct vega10_single_dpm_table *dpm_table;
5164
5165 uint32_t input_clk;
5166 uint32_t input_vol;
5167 uint32_t input_level;
5168 uint32_t i;
5169
5170 PP_ASSERT_WITH_CODE(input, "NULL user input for clock and voltage",
5171 return -EINVAL);
5172
5173 if (!hwmgr->od_enabled) {
5174 pr_info("OverDrive feature not enabled\n");
5175 return -EINVAL;
5176 }
5177
5178 if (PP_OD_EDIT_SCLK_VDDC_TABLE == type) {
5179 dpm_table = &data->dpm_table.gfx_table;
5180 podn_vdd_dep_table = &data->odn_dpm_table.vdd_dep_on_sclk;
5181 data->need_update_dpm_table |= DPMTABLE_OD_UPDATE_SCLK;
5182 } else if (PP_OD_EDIT_MCLK_VDDC_TABLE == type) {
5183 dpm_table = &data->dpm_table.mem_table;
5184 podn_vdd_dep_table = &data->odn_dpm_table.vdd_dep_on_mclk;
5185 data->need_update_dpm_table |= DPMTABLE_OD_UPDATE_MCLK;
5186 } else if (PP_OD_RESTORE_DEFAULT_TABLE == type) {
5187 memcpy(&(data->dpm_table), &(data->golden_dpm_table), sizeof(struct vega10_dpm_table));
5188 vega10_odn_initial_default_setting(hwmgr);
fc99f2be 5189 vega10_odn_update_power_state(hwmgr);
9995ac56
EQ
5190 /* force to update all clock tables */
5191 data->need_update_dpm_table = DPMTABLE_UPDATE_SCLK |
5192 DPMTABLE_UPDATE_MCLK |
5193 DPMTABLE_UPDATE_SOCCLK;
c5a44849
RZ
5194 return 0;
5195 } else if (PP_OD_COMMIT_DPM_TABLE == type) {
5196 vega10_check_dpm_table_updated(hwmgr);
5197 return 0;
5198 } else {
5199 return -EINVAL;
5200 }
5201
5202 for (i = 0; i < size; i += 3) {
5203 if (i + 3 > size || input[i] >= podn_vdd_dep_table->count) {
5204 pr_info("invalid clock voltage input\n");
5205 return 0;
5206 }
5207 input_level = input[i];
5208 input_clk = input[i+1] * 100;
5209 input_vol = input[i+2];
5210
5211 if (vega10_check_clk_voltage_valid(hwmgr, type, input_clk, input_vol)) {
5212 dpm_table->dpm_levels[input_level].value = input_clk;
5213 podn_vdd_dep_table->entries[input_level].clk = input_clk;
5214 podn_vdd_dep_table->entries[input_level].vddc = input_vol;
5215 } else {
5216 return -EINVAL;
5217 }
5218 }
5219 vega10_odn_update_soc_table(hwmgr, type);
5220 return 0;
5221}
5222
e254102d
AD
5223static int vega10_set_mp1_state(struct pp_hwmgr *hwmgr,
5224 enum pp_mp1_state mp1_state)
5225{
5226 uint16_t msg;
5227 int ret;
5228
5229 switch (mp1_state) {
5230 case PP_MP1_STATE_UNLOAD:
5231 msg = PPSMC_MSG_PrepareMp1ForUnload;
5232 break;
5233 case PP_MP1_STATE_SHUTDOWN:
5234 case PP_MP1_STATE_RESET:
5235 case PP_MP1_STATE_NONE:
5236 default:
5237 return 0;
5238 }
5239
5240 PP_ASSERT_WITH_CODE((ret = smum_send_msg_to_smc(hwmgr, msg)) == 0,
5241 "[PrepareMp1] Failed!",
5242 return ret);
5243
5244 return 0;
5245}
5246
f688b614
RZ
5247static int vega10_get_performance_level(struct pp_hwmgr *hwmgr, const struct pp_hw_power_state *state,
5248 PHM_PerformanceLevelDesignation designation, uint32_t index,
5249 PHM_PerformanceLevel *level)
5250{
5251 const struct vega10_power_state *ps;
5252 struct vega10_hwmgr *data;
5253 uint32_t i;
5254
5255 if (level == NULL || hwmgr == NULL || state == NULL)
5256 return -EINVAL;
5257
5258 data = hwmgr->backend;
5259 ps = cast_const_phw_vega10_power_state(state);
5260
5261 i = index > ps->performance_level_count - 1 ?
5262 ps->performance_level_count - 1 : index;
5263
5264 level->coreClock = ps->performance_levels[i].gfx_clock;
5265 level->memory_clock = ps->performance_levels[i].mem_clock;
5266
5267 return 0;
5268}
5269
f83a9991
EH
5270static const struct pp_hwmgr_func vega10_hwmgr_funcs = {
5271 .backend_init = vega10_hwmgr_backend_init,
5272 .backend_fini = vega10_hwmgr_backend_fini,
5273 .asic_setup = vega10_setup_asic_task,
5274 .dynamic_state_management_enable = vega10_enable_dpm_tasks,
8b9242ed 5275 .dynamic_state_management_disable = vega10_disable_dpm_tasks,
f83a9991
EH
5276 .get_num_of_pp_table_entries =
5277 vega10_get_number_of_powerplay_table_entries,
5278 .get_power_state_size = vega10_get_power_state_size,
5279 .get_pp_table_entry = vega10_get_pp_table_entry,
5280 .patch_boot_state = vega10_patch_boot_state,
5281 .apply_state_adjust_rules = vega10_apply_state_adjust_rules,
5282 .power_state_set = vega10_set_power_state_tasks,
5283 .get_sclk = vega10_dpm_get_sclk,
5284 .get_mclk = vega10_dpm_get_mclk,
5285 .notify_smc_display_config_after_ps_adjustment =
5286 vega10_notify_smc_display_config_after_ps_adjustment,
5287 .force_dpm_level = vega10_dpm_force_dpm_level,
f83a9991
EH
5288 .stop_thermal_controller = vega10_thermal_stop_thermal_controller,
5289 .get_fan_speed_info = vega10_fan_ctrl_get_fan_speed_info,
5290 .get_fan_speed_percent = vega10_fan_ctrl_get_fan_speed_percent,
5291 .set_fan_speed_percent = vega10_fan_ctrl_set_fan_speed_percent,
5292 .reset_fan_speed_to_default =
5293 vega10_fan_ctrl_reset_fan_speed_to_default,
5294 .get_fan_speed_rpm = vega10_fan_ctrl_get_fan_speed_rpm,
5295 .set_fan_speed_rpm = vega10_fan_ctrl_set_fan_speed_rpm,
5296 .uninitialize_thermal_controller =
5297 vega10_thermal_ctrl_uninitialize_thermal_controller,
5298 .set_fan_control_mode = vega10_set_fan_control_mode,
5299 .get_fan_control_mode = vega10_get_fan_control_mode,
5300 .read_sensor = vega10_read_sensor,
5301 .get_dal_power_level = vega10_get_dal_power_level,
5302 .get_clock_by_type_with_latency = vega10_get_clock_by_type_with_latency,
5303 .get_clock_by_type_with_voltage = vega10_get_clock_by_type_with_voltage,
5304 .set_watermarks_for_clocks_ranges = vega10_set_watermarks_for_clocks_ranges,
5305 .display_clock_voltage_request = vega10_display_clock_voltage_request,
5306 .force_clock_level = vega10_force_clock_level,
5307 .print_clock_levels = vega10_print_clock_levels,
5308 .display_config_changed = vega10_display_configuration_changed_task,
5309 .powergate_uvd = vega10_power_gate_uvd,
5310 .powergate_vce = vega10_power_gate_vce,
5311 .check_states_equal = vega10_check_states_equal,
5312 .check_smc_update_required_for_display_configuration =
5313 vega10_check_smc_update_required_for_display_configuration,
8b9242ed
RZ
5314 .power_off_asic = vega10_power_off_asic,
5315 .disable_smc_firmware_ctf = vega10_thermal_disable_alert,
dd4e2237
EH
5316 .get_sclk_od = vega10_get_sclk_od,
5317 .set_sclk_od = vega10_set_sclk_od,
5318 .get_mclk_od = vega10_get_mclk_od,
5319 .set_mclk_od = vega10_set_mclk_od,
9d90f0bd 5320 .avfs_control = vega10_avfs_enable,
52afb85e 5321 .notify_cac_buffer_info = vega10_notify_cac_buffer_info,
0a91ee07 5322 .get_thermal_temperature_range = vega10_get_thermal_temperature_range,
4d200372 5323 .register_irq_handlers = smu9_register_irq_handlers,
1ed05ff4 5324 .start_thermal_controller = vega10_start_thermal_controller,
6390258a
RZ
5325 .get_power_profile_mode = vega10_get_power_profile_mode,
5326 .set_power_profile_mode = vega10_set_power_profile_mode,
6ab8555e 5327 .set_power_limit = vega10_set_power_limit,
c5a44849 5328 .odn_edit_dpm_table = vega10_odn_edit_dpm_table,
f688b614 5329 .get_performance_level = vega10_get_performance_level,
518f6a54
AD
5330 .get_asic_baco_capability = smu9_baco_get_capability,
5331 .get_asic_baco_state = smu9_baco_get_state,
425db255 5332 .set_asic_baco_state = vega10_baco_set_state,
713b64a5 5333 .enable_mgpu_fan_boost = vega10_enable_mgpu_fan_boost,
d6e40301
EQ
5334 .get_ppfeature_status = vega10_get_ppfeature_status,
5335 .set_ppfeature_status = vega10_set_ppfeature_status,
e254102d 5336 .set_mp1_state = vega10_set_mp1_state,
f83a9991
EH
5337};
5338
5339int vega10_hwmgr_init(struct pp_hwmgr *hwmgr)
5340{
394e9a14
ED
5341 struct amdgpu_device *adev = hwmgr->adev;
5342
f83a9991
EH
5343 hwmgr->hwmgr_func = &vega10_hwmgr_funcs;
5344 hwmgr->pptable_func = &vega10_pptable_funcs;
394e9a14
ED
5345 if (amdgpu_passthrough(adev))
5346 return vega10_baco_set_cap(hwmgr);
1ab47204 5347
f83a9991
EH
5348 return 0;
5349}