drm/amd/pp: Move same macro definitions to hwmgr.h
[linux-block.git] / drivers / gpu / drm / amd / powerplay / hwmgr / vega10_hwmgr.c
CommitLineData
f83a9991
EH
1/*
2 * Copyright 2016 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
f90dee20
MY
23
24#include <linux/delay.h>
25#include <linux/fb.h>
f83a9991
EH
26#include <linux/module.h>
27#include <linux/slab.h>
f83a9991
EH
28
29#include "hwmgr.h"
30#include "amd_powerplay.h"
f83a9991
EH
31#include "hardwaremanager.h"
32#include "ppatomfwctrl.h"
33#include "atomfirmware.h"
34#include "cgs_common.h"
35#include "vega10_powertune.h"
36#include "smu9.h"
37#include "smu9_driver_if.h"
38#include "vega10_inc.h"
b8a55591 39#include "soc15_common.h"
f83a9991
EH
40#include "pppcielanes.h"
41#include "vega10_hwmgr.h"
42#include "vega10_processpptables.h"
43#include "vega10_pptable.h"
44#include "vega10_thermal.h"
45#include "pp_debug.h"
f83a9991 46#include "amd_pcie_helpers.h"
f83a9991 47#include "ppinterrupt.h"
ab5cf3a5 48#include "pp_overdriver.h"
0a91ee07 49#include "pp_thermal.h"
f83a9991 50
59655cb6
RZ
51#include "smuio/smuio_9_0_offset.h"
52#include "smuio/smuio_9_0_sh_mask.h"
53
f83a9991
EH
54#define HBM_MEMORY_CHANNEL_WIDTH 128
55
30b58a24 56static const uint32_t channel_number[] = {1, 2, 0, 4, 0, 8, 0, 16, 2};
f83a9991
EH
57
58#define MEM_FREQ_LOW_LATENCY 25000
59#define MEM_FREQ_HIGH_LATENCY 80000
60#define MEM_LATENCY_HIGH 245
61#define MEM_LATENCY_LOW 35
62#define MEM_LATENCY_ERR 0xFFFF
63
64#define mmDF_CS_AON0_DramBaseAddress0 0x0044
65#define mmDF_CS_AON0_DramBaseAddress0_BASE_IDX 0
66
67//DF_CS_AON0_DramBaseAddress0
68#define DF_CS_AON0_DramBaseAddress0__AddrRngVal__SHIFT 0x0
69#define DF_CS_AON0_DramBaseAddress0__LgcyMmioHoleEn__SHIFT 0x1
70#define DF_CS_AON0_DramBaseAddress0__IntLvNumChan__SHIFT 0x4
71#define DF_CS_AON0_DramBaseAddress0__IntLvAddrSel__SHIFT 0x8
72#define DF_CS_AON0_DramBaseAddress0__DramBaseAddr__SHIFT 0xc
73#define DF_CS_AON0_DramBaseAddress0__AddrRngVal_MASK 0x00000001L
74#define DF_CS_AON0_DramBaseAddress0__LgcyMmioHoleEn_MASK 0x00000002L
75#define DF_CS_AON0_DramBaseAddress0__IntLvNumChan_MASK 0x000000F0L
76#define DF_CS_AON0_DramBaseAddress0__IntLvAddrSel_MASK 0x00000700L
77#define DF_CS_AON0_DramBaseAddress0__DramBaseAddr_MASK 0xFFFFF000L
53a4b90d
RZ
78static int vega10_force_clock_level(struct pp_hwmgr *hwmgr,
79 enum pp_clock_type type, uint32_t mask);
f83a9991 80
f87c379e 81static const ULONG PhwVega10_Magic = (ULONG)(PHM_VIslands_Magic);
f83a9991
EH
82
83struct vega10_power_state *cast_phw_vega10_power_state(
84 struct pp_hw_power_state *hw_ps)
85{
86 PP_ASSERT_WITH_CODE((PhwVega10_Magic == hw_ps->magic),
87 "Invalid Powerstate Type!",
88 return NULL;);
89
90 return (struct vega10_power_state *)hw_ps;
91}
92
93const struct vega10_power_state *cast_const_phw_vega10_power_state(
94 const struct pp_hw_power_state *hw_ps)
95{
96 PP_ASSERT_WITH_CODE((PhwVega10_Magic == hw_ps->magic),
97 "Invalid Powerstate Type!",
98 return NULL;);
99
100 return (const struct vega10_power_state *)hw_ps;
101}
102
103static void vega10_set_default_registry_data(struct pp_hwmgr *hwmgr)
104{
690dc626 105 struct vega10_hwmgr *data = hwmgr->backend;
f83a9991
EH
106
107 data->registry_data.sclk_dpm_key_disabled =
108 hwmgr->feature_mask & PP_SCLK_DPM_MASK ? false : true;
109 data->registry_data.socclk_dpm_key_disabled =
110 hwmgr->feature_mask & PP_SOCCLK_DPM_MASK ? false : true;
111 data->registry_data.mclk_dpm_key_disabled =
112 hwmgr->feature_mask & PP_MCLK_DPM_MASK ? false : true;
2d5f5f94
RZ
113 data->registry_data.pcie_dpm_key_disabled =
114 hwmgr->feature_mask & PP_PCIE_DPM_MASK ? false : true;
f83a9991
EH
115
116 data->registry_data.dcefclk_dpm_key_disabled =
117 hwmgr->feature_mask & PP_DCEFCLK_DPM_MASK ? false : true;
118
119 if (hwmgr->feature_mask & PP_POWER_CONTAINMENT_MASK) {
120 data->registry_data.power_containment_support = 1;
121 data->registry_data.enable_pkg_pwr_tracking_feature = 1;
122 data->registry_data.enable_tdc_limit_feature = 1;
123 }
124
afc0255c 125 data->registry_data.clock_stretcher_support =
117a48a7 126 hwmgr->feature_mask & PP_CLOCK_STRETCH_MASK ? true : false;
afc0255c 127
4022e4f2
RZ
128 data->registry_data.ulv_support =
129 hwmgr->feature_mask & PP_ULV_MASK ? true : false;
130
df057e02
RZ
131 data->registry_data.sclk_deep_sleep_support =
132 hwmgr->feature_mask & PP_SCLK_DEEP_SLEEP_MASK ? true : false;
afc0255c 133
f83a9991
EH
134 data->registry_data.disable_water_mark = 0;
135
136 data->registry_data.fan_control_support = 1;
137 data->registry_data.thermal_support = 1;
138 data->registry_data.fw_ctf_enabled = 1;
139
140 data->registry_data.avfs_support = 1;
141 data->registry_data.led_dpm_enabled = 1;
142
143 data->registry_data.vr0hot_enabled = 1;
144 data->registry_data.vr1hot_enabled = 1;
145 data->registry_data.regulator_hot_gpio_support = 1;
146
9b7b8154
EQ
147 data->registry_data.didt_support = 1;
148 if (data->registry_data.didt_support) {
149 data->registry_data.didt_mode = 6;
150 data->registry_data.sq_ramping_support = 1;
151 data->registry_data.db_ramping_support = 0;
152 data->registry_data.td_ramping_support = 0;
153 data->registry_data.tcp_ramping_support = 0;
154 data->registry_data.dbr_ramping_support = 0;
155 data->registry_data.edc_didt_support = 1;
156 data->registry_data.gc_didt_support = 0;
157 data->registry_data.psm_didt_support = 0;
158 }
159
f83a9991
EH
160 data->display_voltage_mode = PPVEGA10_VEGA10DISPLAYVOLTAGEMODE_DFLT;
161 data->dcef_clk_quad_eqn_a = PPREGKEY_VEGA10QUADRATICEQUATION_DFLT;
162 data->dcef_clk_quad_eqn_b = PPREGKEY_VEGA10QUADRATICEQUATION_DFLT;
163 data->dcef_clk_quad_eqn_c = PPREGKEY_VEGA10QUADRATICEQUATION_DFLT;
164 data->disp_clk_quad_eqn_a = PPREGKEY_VEGA10QUADRATICEQUATION_DFLT;
165 data->disp_clk_quad_eqn_b = PPREGKEY_VEGA10QUADRATICEQUATION_DFLT;
166 data->disp_clk_quad_eqn_c = PPREGKEY_VEGA10QUADRATICEQUATION_DFLT;
167 data->pixel_clk_quad_eqn_a = PPREGKEY_VEGA10QUADRATICEQUATION_DFLT;
168 data->pixel_clk_quad_eqn_b = PPREGKEY_VEGA10QUADRATICEQUATION_DFLT;
169 data->pixel_clk_quad_eqn_c = PPREGKEY_VEGA10QUADRATICEQUATION_DFLT;
170 data->phy_clk_quad_eqn_a = PPREGKEY_VEGA10QUADRATICEQUATION_DFLT;
171 data->phy_clk_quad_eqn_b = PPREGKEY_VEGA10QUADRATICEQUATION_DFLT;
172 data->phy_clk_quad_eqn_c = PPREGKEY_VEGA10QUADRATICEQUATION_DFLT;
173
174 data->gfxclk_average_alpha = PPVEGA10_VEGA10GFXCLKAVERAGEALPHA_DFLT;
175 data->socclk_average_alpha = PPVEGA10_VEGA10SOCCLKAVERAGEALPHA_DFLT;
176 data->uclk_average_alpha = PPVEGA10_VEGA10UCLKCLKAVERAGEALPHA_DFLT;
177 data->gfx_activity_average_alpha = PPVEGA10_VEGA10GFXACTIVITYAVERAGEALPHA_DFLT;
178}
179
180static int vega10_set_features_platform_caps(struct pp_hwmgr *hwmgr)
181{
690dc626 182 struct vega10_hwmgr *data = hwmgr->backend;
f83a9991
EH
183 struct phm_ppt_v2_information *table_info =
184 (struct phm_ppt_v2_information *)hwmgr->pptable;
ada6770e 185 struct amdgpu_device *adev = hwmgr->adev;
f83a9991
EH
186
187 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
188 PHM_PlatformCaps_SclkDeepSleep);
189
190 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
191 PHM_PlatformCaps_DynamicPatchPowerState);
192
193 if (data->vddci_control == VEGA10_VOLTAGE_CONTROL_NONE)
194 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
195 PHM_PlatformCaps_ControlVDDCI);
196
f83a9991
EH
197 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
198 PHM_PlatformCaps_EnableSMU7ThermalManagement);
199
ada6770e 200 if (adev->pg_flags & AMD_PG_SUPPORT_UVD)
f83a9991
EH
201 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
202 PHM_PlatformCaps_UVDPowerGating);
203
ada6770e 204 if (adev->pg_flags & AMD_PG_SUPPORT_VCE)
f83a9991
EH
205 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
206 PHM_PlatformCaps_VCEPowerGating);
207
208 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
209 PHM_PlatformCaps_UnTabledHardwareInterface);
210
211 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
212 PHM_PlatformCaps_FanSpeedInTableIsRPM);
213
214 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
215 PHM_PlatformCaps_ODFuzzyFanControlSupport);
216
217 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
218 PHM_PlatformCaps_DynamicPowerManagement);
219
220 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
221 PHM_PlatformCaps_SMC);
222
223 /* power tune caps */
224 /* assume disabled */
225 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
226 PHM_PlatformCaps_PowerContainment);
9b7b8154
EQ
227 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
228 PHM_PlatformCaps_DiDtSupport);
f83a9991
EH
229 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
230 PHM_PlatformCaps_SQRamping);
231 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
232 PHM_PlatformCaps_DBRamping);
233 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
234 PHM_PlatformCaps_TDRamping);
235 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
236 PHM_PlatformCaps_TCPRamping);
9b7b8154
EQ
237 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
238 PHM_PlatformCaps_DBRRamping);
239 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
240 PHM_PlatformCaps_DiDtEDCEnable);
241 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
242 PHM_PlatformCaps_GCEDC);
243 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
244 PHM_PlatformCaps_PSM);
245
246 if (data->registry_data.didt_support) {
247 phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DiDtSupport);
248 if (data->registry_data.sq_ramping_support)
249 phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_SQRamping);
250 if (data->registry_data.db_ramping_support)
251 phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DBRamping);
252 if (data->registry_data.td_ramping_support)
253 phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_TDRamping);
254 if (data->registry_data.tcp_ramping_support)
255 phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_TCPRamping);
256 if (data->registry_data.dbr_ramping_support)
257 phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DBRRamping);
258 if (data->registry_data.edc_didt_support)
259 phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DiDtEDCEnable);
260 if (data->registry_data.gc_didt_support)
261 phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_GCEDC);
262 if (data->registry_data.psm_didt_support)
263 phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_PSM);
264 }
f83a9991
EH
265
266 if (data->registry_data.power_containment_support)
267 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
268 PHM_PlatformCaps_PowerContainment);
269 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
270 PHM_PlatformCaps_CAC);
271
272 if (table_info->tdp_table->usClockStretchAmount &&
273 data->registry_data.clock_stretcher_support)
274 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
275 PHM_PlatformCaps_ClockStretcher);
276
277 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
278 PHM_PlatformCaps_RegulatorHot);
279 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
280 PHM_PlatformCaps_AutomaticDCTransition);
281
282 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
283 PHM_PlatformCaps_UVDDPM);
284 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
285 PHM_PlatformCaps_VCEDPM);
286
287 return 0;
288}
289
290static void vega10_init_dpm_defaults(struct pp_hwmgr *hwmgr)
291{
690dc626 292 struct vega10_hwmgr *data = hwmgr->backend;
f83a9991 293 int i;
15826fbf
RZ
294 uint32_t sub_vendor_id, hw_revision;
295 struct amdgpu_device *adev = hwmgr->adev;
f83a9991
EH
296
297 vega10_initialize_power_tune_defaults(hwmgr);
298
299 for (i = 0; i < GNLD_FEATURES_MAX; i++) {
300 data->smu_features[i].smu_feature_id = 0xffff;
301 data->smu_features[i].smu_feature_bitmap = 1 << i;
302 data->smu_features[i].enabled = false;
303 data->smu_features[i].supported = false;
304 }
305
306 data->smu_features[GNLD_DPM_PREFETCHER].smu_feature_id =
307 FEATURE_DPM_PREFETCHER_BIT;
308 data->smu_features[GNLD_DPM_GFXCLK].smu_feature_id =
309 FEATURE_DPM_GFXCLK_BIT;
310 data->smu_features[GNLD_DPM_UCLK].smu_feature_id =
311 FEATURE_DPM_UCLK_BIT;
312 data->smu_features[GNLD_DPM_SOCCLK].smu_feature_id =
313 FEATURE_DPM_SOCCLK_BIT;
314 data->smu_features[GNLD_DPM_UVD].smu_feature_id =
315 FEATURE_DPM_UVD_BIT;
316 data->smu_features[GNLD_DPM_VCE].smu_feature_id =
317 FEATURE_DPM_VCE_BIT;
318 data->smu_features[GNLD_DPM_MP0CLK].smu_feature_id =
319 FEATURE_DPM_MP0CLK_BIT;
320 data->smu_features[GNLD_DPM_LINK].smu_feature_id =
321 FEATURE_DPM_LINK_BIT;
322 data->smu_features[GNLD_DPM_DCEFCLK].smu_feature_id =
323 FEATURE_DPM_DCEFCLK_BIT;
324 data->smu_features[GNLD_ULV].smu_feature_id =
325 FEATURE_ULV_BIT;
326 data->smu_features[GNLD_AVFS].smu_feature_id =
327 FEATURE_AVFS_BIT;
328 data->smu_features[GNLD_DS_GFXCLK].smu_feature_id =
329 FEATURE_DS_GFXCLK_BIT;
330 data->smu_features[GNLD_DS_SOCCLK].smu_feature_id =
331 FEATURE_DS_SOCCLK_BIT;
332 data->smu_features[GNLD_DS_LCLK].smu_feature_id =
333 FEATURE_DS_LCLK_BIT;
334 data->smu_features[GNLD_PPT].smu_feature_id =
335 FEATURE_PPT_BIT;
336 data->smu_features[GNLD_TDC].smu_feature_id =
337 FEATURE_TDC_BIT;
338 data->smu_features[GNLD_THERMAL].smu_feature_id =
339 FEATURE_THERMAL_BIT;
340 data->smu_features[GNLD_GFX_PER_CU_CG].smu_feature_id =
341 FEATURE_GFX_PER_CU_CG_BIT;
342 data->smu_features[GNLD_RM].smu_feature_id =
343 FEATURE_RM_BIT;
344 data->smu_features[GNLD_DS_DCEFCLK].smu_feature_id =
345 FEATURE_DS_DCEFCLK_BIT;
346 data->smu_features[GNLD_ACDC].smu_feature_id =
347 FEATURE_ACDC_BIT;
348 data->smu_features[GNLD_VR0HOT].smu_feature_id =
349 FEATURE_VR0HOT_BIT;
350 data->smu_features[GNLD_VR1HOT].smu_feature_id =
351 FEATURE_VR1HOT_BIT;
352 data->smu_features[GNLD_FW_CTF].smu_feature_id =
353 FEATURE_FW_CTF_BIT;
354 data->smu_features[GNLD_LED_DISPLAY].smu_feature_id =
355 FEATURE_LED_DISPLAY_BIT;
356 data->smu_features[GNLD_FAN_CONTROL].smu_feature_id =
357 FEATURE_FAN_CONTROL_BIT;
bdb8cd10 358 data->smu_features[GNLD_ACG].smu_feature_id = FEATURE_ACG_BIT;
9b7b8154 359 data->smu_features[GNLD_DIDT].smu_feature_id = FEATURE_GFX_EDC_BIT;
15826fbf 360 data->smu_features[GNLD_PCC_LIMIT].smu_feature_id = FEATURE_PCC_LIMIT_CONTROL_BIT;
f83a9991
EH
361
362 if (!data->registry_data.prefetcher_dpm_key_disabled)
363 data->smu_features[GNLD_DPM_PREFETCHER].supported = true;
364
365 if (!data->registry_data.sclk_dpm_key_disabled)
366 data->smu_features[GNLD_DPM_GFXCLK].supported = true;
367
368 if (!data->registry_data.mclk_dpm_key_disabled)
369 data->smu_features[GNLD_DPM_UCLK].supported = true;
370
371 if (!data->registry_data.socclk_dpm_key_disabled)
372 data->smu_features[GNLD_DPM_SOCCLK].supported = true;
373
dd5a6fe2 374 if (PP_CAP(PHM_PlatformCaps_UVDDPM))
f83a9991
EH
375 data->smu_features[GNLD_DPM_UVD].supported = true;
376
dd5a6fe2 377 if (PP_CAP(PHM_PlatformCaps_VCEDPM))
f83a9991
EH
378 data->smu_features[GNLD_DPM_VCE].supported = true;
379
380 if (!data->registry_data.pcie_dpm_key_disabled)
381 data->smu_features[GNLD_DPM_LINK].supported = true;
382
383 if (!data->registry_data.dcefclk_dpm_key_disabled)
384 data->smu_features[GNLD_DPM_DCEFCLK].supported = true;
385
dd5a6fe2
TSD
386 if (PP_CAP(PHM_PlatformCaps_SclkDeepSleep) &&
387 data->registry_data.sclk_deep_sleep_support) {
f83a9991
EH
388 data->smu_features[GNLD_DS_GFXCLK].supported = true;
389 data->smu_features[GNLD_DS_SOCCLK].supported = true;
390 data->smu_features[GNLD_DS_LCLK].supported = true;
df057e02 391 data->smu_features[GNLD_DS_DCEFCLK].supported = true;
f83a9991
EH
392 }
393
394 if (data->registry_data.enable_pkg_pwr_tracking_feature)
395 data->smu_features[GNLD_PPT].supported = true;
396
397 if (data->registry_data.enable_tdc_limit_feature)
398 data->smu_features[GNLD_TDC].supported = true;
399
400 if (data->registry_data.thermal_support)
401 data->smu_features[GNLD_THERMAL].supported = true;
402
403 if (data->registry_data.fan_control_support)
404 data->smu_features[GNLD_FAN_CONTROL].supported = true;
405
406 if (data->registry_data.fw_ctf_enabled)
407 data->smu_features[GNLD_FW_CTF].supported = true;
408
409 if (data->registry_data.avfs_support)
410 data->smu_features[GNLD_AVFS].supported = true;
411
412 if (data->registry_data.led_dpm_enabled)
413 data->smu_features[GNLD_LED_DISPLAY].supported = true;
414
415 if (data->registry_data.vr1hot_enabled)
416 data->smu_features[GNLD_VR1HOT].supported = true;
417
418 if (data->registry_data.vr0hot_enabled)
419 data->smu_features[GNLD_VR0HOT].supported = true;
420
d3f8c0ab 421 smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetSmuVersion);
3f9ca14a 422 hwmgr->smu_version = smum_get_argument(hwmgr);
bdb8cd10 423 /* ACG firmware has major version 5 */
d100033b 424 if ((hwmgr->smu_version & 0xff000000) == 0x5000000)
bdb8cd10
RZ
425 data->smu_features[GNLD_ACG].supported = true;
426
9b7b8154
EQ
427 if (data->registry_data.didt_support)
428 data->smu_features[GNLD_DIDT].supported = true;
429
15826fbf
RZ
430 hw_revision = adev->pdev->revision;
431 sub_vendor_id = adev->pdev->subsystem_vendor;
432
433 if ((hwmgr->chip_id == 0x6862 ||
434 hwmgr->chip_id == 0x6861 ||
435 hwmgr->chip_id == 0x6868) &&
436 (hw_revision == 0) &&
437 (sub_vendor_id != 0x1002))
438 data->smu_features[GNLD_PCC_LIMIT].supported = true;
f83a9991
EH
439}
440
441#ifdef PPLIB_VEGA10_EVV_SUPPORT
442static int vega10_get_socclk_for_voltage_evv(struct pp_hwmgr *hwmgr,
443 phm_ppt_v1_voltage_lookup_table *lookup_table,
444 uint16_t virtual_voltage_id, int32_t *socclk)
445{
446 uint8_t entry_id;
447 uint8_t voltage_id;
448 struct phm_ppt_v2_information *table_info =
449 (struct phm_ppt_v2_information *)(hwmgr->pptable);
450
451 PP_ASSERT_WITH_CODE(lookup_table->count != 0,
452 "Lookup table is empty",
453 return -EINVAL);
454
455 /* search for leakage voltage ID 0xff01 ~ 0xff08 and sclk */
456 for (entry_id = 0; entry_id < table_info->vdd_dep_on_sclk->count; entry_id++) {
457 voltage_id = table_info->vdd_dep_on_socclk->entries[entry_id].vddInd;
458 if (lookup_table->entries[voltage_id].us_vdd == virtual_voltage_id)
459 break;
460 }
461
462 PP_ASSERT_WITH_CODE(entry_id < table_info->vdd_dep_on_socclk->count,
463 "Can't find requested voltage id in vdd_dep_on_socclk table!",
464 return -EINVAL);
465
466 *socclk = table_info->vdd_dep_on_socclk->entries[entry_id].clk;
467
468 return 0;
469}
470
471#define ATOM_VIRTUAL_VOLTAGE_ID0 0xff01
472/**
473* Get Leakage VDDC based on leakage ID.
474*
475* @param hwmgr the address of the powerplay hardware manager.
476* @return always 0.
477*/
478static int vega10_get_evv_voltages(struct pp_hwmgr *hwmgr)
479{
690dc626 480 struct vega10_hwmgr *data = hwmgr->backend;
f83a9991
EH
481 uint16_t vv_id;
482 uint32_t vddc = 0;
483 uint16_t i, j;
484 uint32_t sclk = 0;
485 struct phm_ppt_v2_information *table_info =
486 (struct phm_ppt_v2_information *)hwmgr->pptable;
487 struct phm_ppt_v1_clock_voltage_dependency_table *socclk_table =
488 table_info->vdd_dep_on_socclk;
489 int result;
490
491 for (i = 0; i < VEGA10_MAX_LEAKAGE_COUNT; i++) {
492 vv_id = ATOM_VIRTUAL_VOLTAGE_ID0 + i;
493
494 if (!vega10_get_socclk_for_voltage_evv(hwmgr,
495 table_info->vddc_lookup_table, vv_id, &sclk)) {
dd5a6fe2 496 if (PP_CAP(PHM_PlatformCaps_ClockStretcher)) {
f83a9991
EH
497 for (j = 1; j < socclk_table->count; j++) {
498 if (socclk_table->entries[j].clk == sclk &&
499 socclk_table->entries[j].cks_enable == 0) {
500 sclk += 5000;
501 break;
502 }
503 }
504 }
505
506 PP_ASSERT_WITH_CODE(!atomctrl_get_voltage_evv_on_sclk_ai(hwmgr,
507 VOLTAGE_TYPE_VDDC, sclk, vv_id, &vddc),
508 "Error retrieving EVV voltage value!",
509 continue);
510
511
512 /* need to make sure vddc is less than 2v or else, it could burn the ASIC. */
513 PP_ASSERT_WITH_CODE((vddc < 2000 && vddc != 0),
514 "Invalid VDDC value", result = -EINVAL;);
515
516 /* the voltage should not be zero nor equal to leakage ID */
517 if (vddc != 0 && vddc != vv_id) {
518 data->vddc_leakage.actual_voltage[data->vddc_leakage.count] = (uint16_t)(vddc/100);
519 data->vddc_leakage.leakage_id[data->vddc_leakage.count] = vv_id;
520 data->vddc_leakage.count++;
521 }
522 }
523 }
524
525 return 0;
526}
527
528/**
529 * Change virtual leakage voltage to actual value.
530 *
531 * @param hwmgr the address of the powerplay hardware manager.
532 * @param pointer to changing voltage
533 * @param pointer to leakage table
534 */
535static void vega10_patch_with_vdd_leakage(struct pp_hwmgr *hwmgr,
536 uint16_t *voltage, struct vega10_leakage_voltage *leakage_table)
537{
538 uint32_t index;
539
540 /* search for leakage voltage ID 0xff01 ~ 0xff08 */
541 for (index = 0; index < leakage_table->count; index++) {
542 /* if this voltage matches a leakage voltage ID */
543 /* patch with actual leakage voltage */
544 if (leakage_table->leakage_id[index] == *voltage) {
545 *voltage = leakage_table->actual_voltage[index];
546 break;
547 }
548 }
549
550 if (*voltage > ATOM_VIRTUAL_VOLTAGE_ID0)
4f42a2dd 551 pr_info("Voltage value looks like a Leakage ID but it's not patched\n");
f83a9991
EH
552}
553
554/**
555* Patch voltage lookup table by EVV leakages.
556*
557* @param hwmgr the address of the powerplay hardware manager.
558* @param pointer to voltage lookup table
559* @param pointer to leakage table
560* @return always 0
561*/
562static int vega10_patch_lookup_table_with_leakage(struct pp_hwmgr *hwmgr,
563 phm_ppt_v1_voltage_lookup_table *lookup_table,
564 struct vega10_leakage_voltage *leakage_table)
565{
566 uint32_t i;
567
568 for (i = 0; i < lookup_table->count; i++)
569 vega10_patch_with_vdd_leakage(hwmgr,
570 &lookup_table->entries[i].us_vdd, leakage_table);
571
572 return 0;
573}
574
575static int vega10_patch_clock_voltage_limits_with_vddc_leakage(
576 struct pp_hwmgr *hwmgr, struct vega10_leakage_voltage *leakage_table,
577 uint16_t *vddc)
578{
579 vega10_patch_with_vdd_leakage(hwmgr, (uint16_t *)vddc, leakage_table);
580
581 return 0;
582}
583#endif
584
585static int vega10_patch_voltage_dependency_tables_with_lookup_table(
586 struct pp_hwmgr *hwmgr)
587{
9a5487ef
TSD
588 uint8_t entry_id, voltage_id;
589 unsigned i;
f83a9991
EH
590 struct phm_ppt_v2_information *table_info =
591 (struct phm_ppt_v2_information *)(hwmgr->pptable);
f83a9991
EH
592 struct phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table =
593 table_info->mm_dep_table;
9a5487ef
TSD
594 struct phm_ppt_v1_clock_voltage_dependency_table *mclk_table =
595 table_info->vdd_dep_on_mclk;
f83a9991 596
9a5487ef
TSD
597 for (i = 0; i < 6; i++) {
598 struct phm_ppt_v1_clock_voltage_dependency_table *vdt;
599 switch (i) {
600 case 0: vdt = table_info->vdd_dep_on_socclk; break;
601 case 1: vdt = table_info->vdd_dep_on_sclk; break;
602 case 2: vdt = table_info->vdd_dep_on_dcefclk; break;
603 case 3: vdt = table_info->vdd_dep_on_pixclk; break;
604 case 4: vdt = table_info->vdd_dep_on_dispclk; break;
605 case 5: vdt = table_info->vdd_dep_on_phyclk; break;
606 }
f83a9991 607
9a5487ef
TSD
608 for (entry_id = 0; entry_id < vdt->count; entry_id++) {
609 voltage_id = vdt->entries[entry_id].vddInd;
610 vdt->entries[entry_id].vddc =
611 table_info->vddc_lookup_table->entries[voltage_id].us_vdd;
612 }
f83a9991
EH
613 }
614
9a5487ef
TSD
615 for (entry_id = 0; entry_id < mm_table->count; ++entry_id) {
616 voltage_id = mm_table->entries[entry_id].vddcInd;
617 mm_table->entries[entry_id].vddc =
618 table_info->vddc_lookup_table->entries[voltage_id].us_vdd;
f83a9991
EH
619 }
620
621 for (entry_id = 0; entry_id < mclk_table->count; ++entry_id) {
622 voltage_id = mclk_table->entries[entry_id].vddInd;
623 mclk_table->entries[entry_id].vddc =
624 table_info->vddc_lookup_table->entries[voltage_id].us_vdd;
625 voltage_id = mclk_table->entries[entry_id].vddciInd;
626 mclk_table->entries[entry_id].vddci =
627 table_info->vddci_lookup_table->entries[voltage_id].us_vdd;
628 voltage_id = mclk_table->entries[entry_id].mvddInd;
629 mclk_table->entries[entry_id].mvdd =
630 table_info->vddmem_lookup_table->entries[voltage_id].us_vdd;
631 }
632
f83a9991
EH
633
634 return 0;
635
636}
637
638static int vega10_sort_lookup_table(struct pp_hwmgr *hwmgr,
639 struct phm_ppt_v1_voltage_lookup_table *lookup_table)
640{
641 uint32_t table_size, i, j;
642 struct phm_ppt_v1_voltage_lookup_record tmp_voltage_lookup_record;
643
644 PP_ASSERT_WITH_CODE(lookup_table && lookup_table->count,
645 "Lookup table is empty", return -EINVAL);
646
647 table_size = lookup_table->count;
648
649 /* Sorting voltages */
650 for (i = 0; i < table_size - 1; i++) {
651 for (j = i + 1; j > 0; j--) {
652 if (lookup_table->entries[j].us_vdd <
653 lookup_table->entries[j - 1].us_vdd) {
654 tmp_voltage_lookup_record = lookup_table->entries[j - 1];
655 lookup_table->entries[j - 1] = lookup_table->entries[j];
656 lookup_table->entries[j] = tmp_voltage_lookup_record;
657 }
658 }
659 }
660
661 return 0;
662}
663
664static int vega10_complete_dependency_tables(struct pp_hwmgr *hwmgr)
665{
666 int result = 0;
667 int tmp_result;
668 struct phm_ppt_v2_information *table_info =
669 (struct phm_ppt_v2_information *)(hwmgr->pptable);
670#ifdef PPLIB_VEGA10_EVV_SUPPORT
690dc626 671 struct vega10_hwmgr *data = hwmgr->backend;
f83a9991
EH
672
673 tmp_result = vega10_patch_lookup_table_with_leakage(hwmgr,
674 table_info->vddc_lookup_table, &(data->vddc_leakage));
675 if (tmp_result)
676 result = tmp_result;
677
678 tmp_result = vega10_patch_clock_voltage_limits_with_vddc_leakage(hwmgr,
679 &(data->vddc_leakage), &table_info->max_clock_voltage_on_dc.vddc);
680 if (tmp_result)
681 result = tmp_result;
682#endif
683
684 tmp_result = vega10_patch_voltage_dependency_tables_with_lookup_table(hwmgr);
685 if (tmp_result)
686 result = tmp_result;
687
688 tmp_result = vega10_sort_lookup_table(hwmgr, table_info->vddc_lookup_table);
689 if (tmp_result)
690 result = tmp_result;
691
692 return result;
693}
694
695static int vega10_set_private_data_based_on_pptable(struct pp_hwmgr *hwmgr)
696{
697 struct phm_ppt_v2_information *table_info =
698 (struct phm_ppt_v2_information *)(hwmgr->pptable);
699 struct phm_ppt_v1_clock_voltage_dependency_table *allowed_sclk_vdd_table =
700 table_info->vdd_dep_on_socclk;
701 struct phm_ppt_v1_clock_voltage_dependency_table *allowed_mclk_vdd_table =
702 table_info->vdd_dep_on_mclk;
703
704 PP_ASSERT_WITH_CODE(allowed_sclk_vdd_table,
4f42a2dd 705 "VDD dependency on SCLK table is missing. This table is mandatory", return -EINVAL);
f83a9991 706 PP_ASSERT_WITH_CODE(allowed_sclk_vdd_table->count >= 1,
4f42a2dd 707 "VDD dependency on SCLK table is empty. This table is mandatory", return -EINVAL);
f83a9991
EH
708
709 PP_ASSERT_WITH_CODE(allowed_mclk_vdd_table,
4f42a2dd 710 "VDD dependency on MCLK table is missing. This table is mandatory", return -EINVAL);
f83a9991 711 PP_ASSERT_WITH_CODE(allowed_mclk_vdd_table->count >= 1,
4f42a2dd 712 "VDD dependency on MCLK table is empty. This table is mandatory", return -EINVAL);
f83a9991
EH
713
714 table_info->max_clock_voltage_on_ac.sclk =
715 allowed_sclk_vdd_table->entries[allowed_sclk_vdd_table->count - 1].clk;
716 table_info->max_clock_voltage_on_ac.mclk =
717 allowed_mclk_vdd_table->entries[allowed_mclk_vdd_table->count - 1].clk;
718 table_info->max_clock_voltage_on_ac.vddc =
719 allowed_sclk_vdd_table->entries[allowed_sclk_vdd_table->count - 1].vddc;
720 table_info->max_clock_voltage_on_ac.vddci =
721 allowed_mclk_vdd_table->entries[allowed_mclk_vdd_table->count - 1].vddci;
722
723 hwmgr->dyn_state.max_clock_voltage_on_ac.sclk =
724 table_info->max_clock_voltage_on_ac.sclk;
725 hwmgr->dyn_state.max_clock_voltage_on_ac.mclk =
726 table_info->max_clock_voltage_on_ac.mclk;
727 hwmgr->dyn_state.max_clock_voltage_on_ac.vddc =
728 table_info->max_clock_voltage_on_ac.vddc;
729 hwmgr->dyn_state.max_clock_voltage_on_ac.vddci =
730 table_info->max_clock_voltage_on_ac.vddci;
731
732 return 0;
733}
734
735static int vega10_hwmgr_backend_fini(struct pp_hwmgr *hwmgr)
736{
737 kfree(hwmgr->dyn_state.vddc_dep_on_dal_pwrl);
738 hwmgr->dyn_state.vddc_dep_on_dal_pwrl = NULL;
739
740 kfree(hwmgr->backend);
741 hwmgr->backend = NULL;
742
743 return 0;
744}
745
746static int vega10_hwmgr_backend_init(struct pp_hwmgr *hwmgr)
747{
748 int result = 0;
749 struct vega10_hwmgr *data;
750 uint32_t config_telemetry = 0;
751 struct pp_atomfwctrl_voltage_table vol_table;
ada6770e 752 struct amdgpu_device *adev = hwmgr->adev;
f83a9991
EH
753
754 data = kzalloc(sizeof(struct vega10_hwmgr), GFP_KERNEL);
755 if (data == NULL)
756 return -ENOMEM;
757
758 hwmgr->backend = data;
759
052fe96d 760 hwmgr->workload_mask = 1 << hwmgr->workload_prority[PP_SMC_POWER_PROFILE_VIDEO];
6390258a 761 hwmgr->power_profile_mode = PP_SMC_POWER_PROFILE_VIDEO;
9ac870c7 762 hwmgr->default_power_profile_mode = PP_SMC_POWER_PROFILE_VIDEO;
6390258a 763
f83a9991 764 vega10_set_default_registry_data(hwmgr);
f83a9991 765 data->disable_dpm_mask = 0xff;
f83a9991
EH
766
767 /* need to set voltage control types before EVV patching */
768 data->vddc_control = VEGA10_VOLTAGE_CONTROL_NONE;
769 data->mvdd_control = VEGA10_VOLTAGE_CONTROL_NONE;
770 data->vddci_control = VEGA10_VOLTAGE_CONTROL_NONE;
771
772 /* VDDCR_SOC */
773 if (pp_atomfwctrl_is_voltage_controlled_by_gpio_v4(hwmgr,
774 VOLTAGE_TYPE_VDDC, VOLTAGE_OBJ_SVID2)) {
775 if (!pp_atomfwctrl_get_voltage_table_v4(hwmgr,
776 VOLTAGE_TYPE_VDDC, VOLTAGE_OBJ_SVID2,
777 &vol_table)) {
778 config_telemetry = ((vol_table.telemetry_slope << 8) & 0xff00) |
779 (vol_table.telemetry_offset & 0xff);
780 data->vddc_control = VEGA10_VOLTAGE_CONTROL_BY_SVID2;
781 }
782 } else {
783 kfree(hwmgr->backend);
784 hwmgr->backend = NULL;
785 PP_ASSERT_WITH_CODE(false,
786 "VDDCR_SOC is not SVID2!",
787 return -1);
788 }
789
790 /* MVDDC */
791 if (pp_atomfwctrl_is_voltage_controlled_by_gpio_v4(hwmgr,
792 VOLTAGE_TYPE_MVDDC, VOLTAGE_OBJ_SVID2)) {
793 if (!pp_atomfwctrl_get_voltage_table_v4(hwmgr,
794 VOLTAGE_TYPE_MVDDC, VOLTAGE_OBJ_SVID2,
795 &vol_table)) {
796 config_telemetry |=
797 ((vol_table.telemetry_slope << 24) & 0xff000000) |
798 ((vol_table.telemetry_offset << 16) & 0xff0000);
799 data->mvdd_control = VEGA10_VOLTAGE_CONTROL_BY_SVID2;
800 }
801 }
802
803 /* VDDCI_MEM */
dd5a6fe2 804 if (PP_CAP(PHM_PlatformCaps_ControlVDDCI)) {
f83a9991
EH
805 if (pp_atomfwctrl_is_voltage_controlled_by_gpio_v4(hwmgr,
806 VOLTAGE_TYPE_VDDCI, VOLTAGE_OBJ_GPIO_LUT))
807 data->vddci_control = VEGA10_VOLTAGE_CONTROL_BY_GPIO;
808 }
809
810 data->config_telemetry = config_telemetry;
811
812 vega10_set_features_platform_caps(hwmgr);
813
814 vega10_init_dpm_defaults(hwmgr);
815
816#ifdef PPLIB_VEGA10_EVV_SUPPORT
817 /* Get leakage voltage based on leakage ID. */
818 PP_ASSERT_WITH_CODE(!vega10_get_evv_voltages(hwmgr),
819 "Get EVV Voltage Failed. Abort Driver loading!",
820 return -1);
821#endif
822
823 /* Patch our voltage dependency table with actual leakage voltage
824 * We need to perform leakage translation before it's used by other functions
825 */
826 vega10_complete_dependency_tables(hwmgr);
827
828 /* Parse pptable data read from VBIOS */
829 vega10_set_private_data_based_on_pptable(hwmgr);
830
831 data->is_tlu_enabled = false;
832
833 hwmgr->platform_descriptor.hardwareActivityPerformanceLevels =
834 VEGA10_MAX_HARDWARE_POWERLEVELS;
835 hwmgr->platform_descriptor.hardwarePerformanceLevels = 2;
836 hwmgr->platform_descriptor.minimumClocksReductionPercentage = 50;
837
838 hwmgr->platform_descriptor.vbiosInterruptId = 0x20000400; /* IRQ_SOURCE1_SW_INT */
839 /* The true clock step depends on the frequency, typically 4.5 or 9 MHz. Here we use 5. */
840 hwmgr->platform_descriptor.clockStep.engineClock = 500;
841 hwmgr->platform_descriptor.clockStep.memoryClock = 500;
842
ada6770e 843 data->total_active_cus = adev->gfx.cu_info.number;
f83a9991
EH
844 /* Setup default Overdrive Fan control settings */
845 data->odn_fan_table.target_fan_speed =
846 hwmgr->thermal_controller.advanceFanControlParameters.usMaxFanRPM;
847 data->odn_fan_table.target_temperature =
848 hwmgr->thermal_controller.
849 advanceFanControlParameters.ucTargetTemperature;
850 data->odn_fan_table.min_performance_clock =
851 hwmgr->thermal_controller.advanceFanControlParameters.
852 ulMinFanSCLKAcousticLimit;
853 data->odn_fan_table.min_fan_limit =
854 hwmgr->thermal_controller.
855 advanceFanControlParameters.usFanPWMMinLimit *
856 hwmgr->thermal_controller.fanInfo.ulMaxRPM / 100;
857
b8a55591 858 data->mem_channels = (RREG32_SOC15(DF, 0, mmDF_CS_AON0_DramBaseAddress0) &
451cc55d
RZ
859 DF_CS_AON0_DramBaseAddress0__IntLvNumChan_MASK) >>
860 DF_CS_AON0_DramBaseAddress0__IntLvNumChan__SHIFT;
861 PP_ASSERT_WITH_CODE(data->mem_channels < ARRAY_SIZE(channel_number),
862 "Mem Channel Index Exceeded maximum!",
863 return -EINVAL);
864
f83a9991
EH
865 return result;
866}
867
868static int vega10_init_sclk_threshold(struct pp_hwmgr *hwmgr)
869{
690dc626 870 struct vega10_hwmgr *data = hwmgr->backend;
f83a9991
EH
871
872 data->low_sclk_interrupt_threshold = 0;
873
874 return 0;
875}
876
877static int vega10_setup_dpm_led_config(struct pp_hwmgr *hwmgr)
878{
690dc626 879 struct vega10_hwmgr *data = hwmgr->backend;
f83a9991
EH
880 PPTable_t *pp_table = &(data->smc_state_table.pp_table);
881
882 struct pp_atomfwctrl_voltage_table table;
883 uint8_t i, j;
884 uint32_t mask = 0;
885 uint32_t tmp;
886 int32_t ret = 0;
887
888 ret = pp_atomfwctrl_get_voltage_table_v4(hwmgr, VOLTAGE_TYPE_LEDDPM,
889 VOLTAGE_OBJ_GPIO_LUT, &table);
890
891 if (!ret) {
892 tmp = table.mask_low;
893 for (i = 0, j = 0; i < 32; i++) {
894 if (tmp & 1) {
895 mask |= (uint32_t)(i << (8 * j));
896 if (++j >= 3)
897 break;
898 }
899 tmp >>= 1;
900 }
901 }
902
903 pp_table->LedPin0 = (uint8_t)(mask & 0xff);
904 pp_table->LedPin1 = (uint8_t)((mask >> 8) & 0xff);
905 pp_table->LedPin2 = (uint8_t)((mask >> 16) & 0xff);
906 return 0;
907}
908
909static int vega10_setup_asic_task(struct pp_hwmgr *hwmgr)
910{
911 PP_ASSERT_WITH_CODE(!vega10_init_sclk_threshold(hwmgr),
912 "Failed to init sclk threshold!",
913 return -EINVAL);
914
915 PP_ASSERT_WITH_CODE(!vega10_setup_dpm_led_config(hwmgr),
916 "Failed to set up led dpm config!",
917 return -EINVAL);
918
e21148ec
RZ
919 smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_NumOfDisplays, 0);
920
f83a9991
EH
921 return 0;
922}
923
f83a9991
EH
924/**
925* Remove repeated voltage values and create table with unique values.
926*
927* @param hwmgr the address of the powerplay hardware manager.
928* @param vol_table the pointer to changing voltage table
929* @return 0 in success
930*/
931
932static int vega10_trim_voltage_table(struct pp_hwmgr *hwmgr,
933 struct pp_atomfwctrl_voltage_table *vol_table)
934{
935 uint32_t i, j;
936 uint16_t vvalue;
937 bool found = false;
938 struct pp_atomfwctrl_voltage_table *table;
939
940 PP_ASSERT_WITH_CODE(vol_table,
941 "Voltage Table empty.", return -EINVAL);
942 table = kzalloc(sizeof(struct pp_atomfwctrl_voltage_table),
943 GFP_KERNEL);
944
945 if (!table)
946 return -ENOMEM;
947
948 table->mask_low = vol_table->mask_low;
949 table->phase_delay = vol_table->phase_delay;
950
951 for (i = 0; i < vol_table->count; i++) {
952 vvalue = vol_table->entries[i].value;
953 found = false;
954
955 for (j = 0; j < table->count; j++) {
956 if (vvalue == table->entries[j].value) {
957 found = true;
958 break;
959 }
960 }
961
962 if (!found) {
963 table->entries[table->count].value = vvalue;
964 table->entries[table->count].smio_low =
965 vol_table->entries[i].smio_low;
966 table->count++;
967 }
968 }
969
970 memcpy(vol_table, table, sizeof(struct pp_atomfwctrl_voltage_table));
971 kfree(table);
972
973 return 0;
974}
975
976static int vega10_get_mvdd_voltage_table(struct pp_hwmgr *hwmgr,
977 phm_ppt_v1_clock_voltage_dependency_table *dep_table,
978 struct pp_atomfwctrl_voltage_table *vol_table)
979{
980 int i;
981
982 PP_ASSERT_WITH_CODE(dep_table->count,
983 "Voltage Dependency Table empty.",
984 return -EINVAL);
985
986 vol_table->mask_low = 0;
987 vol_table->phase_delay = 0;
988 vol_table->count = dep_table->count;
989
990 for (i = 0; i < vol_table->count; i++) {
991 vol_table->entries[i].value = dep_table->entries[i].mvdd;
992 vol_table->entries[i].smio_low = 0;
993 }
994
995 PP_ASSERT_WITH_CODE(!vega10_trim_voltage_table(hwmgr,
996 vol_table),
997 "Failed to trim MVDD Table!",
998 return -1);
999
1000 return 0;
1001}
1002
1003static int vega10_get_vddci_voltage_table(struct pp_hwmgr *hwmgr,
1004 phm_ppt_v1_clock_voltage_dependency_table *dep_table,
1005 struct pp_atomfwctrl_voltage_table *vol_table)
1006{
1007 uint32_t i;
1008
1009 PP_ASSERT_WITH_CODE(dep_table->count,
1010 "Voltage Dependency Table empty.",
1011 return -EINVAL);
1012
1013 vol_table->mask_low = 0;
1014 vol_table->phase_delay = 0;
1015 vol_table->count = dep_table->count;
1016
1017 for (i = 0; i < dep_table->count; i++) {
1018 vol_table->entries[i].value = dep_table->entries[i].vddci;
1019 vol_table->entries[i].smio_low = 0;
1020 }
1021
1022 PP_ASSERT_WITH_CODE(!vega10_trim_voltage_table(hwmgr, vol_table),
1023 "Failed to trim VDDCI table.",
1024 return -1);
1025
1026 return 0;
1027}
1028
1029static int vega10_get_vdd_voltage_table(struct pp_hwmgr *hwmgr,
1030 phm_ppt_v1_clock_voltage_dependency_table *dep_table,
1031 struct pp_atomfwctrl_voltage_table *vol_table)
1032{
1033 int i;
1034
1035 PP_ASSERT_WITH_CODE(dep_table->count,
1036 "Voltage Dependency Table empty.",
1037 return -EINVAL);
1038
1039 vol_table->mask_low = 0;
1040 vol_table->phase_delay = 0;
1041 vol_table->count = dep_table->count;
1042
1043 for (i = 0; i < vol_table->count; i++) {
1044 vol_table->entries[i].value = dep_table->entries[i].vddc;
1045 vol_table->entries[i].smio_low = 0;
1046 }
1047
1048 return 0;
1049}
1050
1051/* ---- Voltage Tables ----
1052 * If the voltage table would be bigger than
1053 * what will fit into the state table on
1054 * the SMC keep only the higher entries.
1055 */
1056static void vega10_trim_voltage_table_to_fit_state_table(
1057 struct pp_hwmgr *hwmgr,
1058 uint32_t max_vol_steps,
1059 struct pp_atomfwctrl_voltage_table *vol_table)
1060{
1061 unsigned int i, diff;
1062
1063 if (vol_table->count <= max_vol_steps)
1064 return;
1065
1066 diff = vol_table->count - max_vol_steps;
1067
1068 for (i = 0; i < max_vol_steps; i++)
1069 vol_table->entries[i] = vol_table->entries[i + diff];
1070
1071 vol_table->count = max_vol_steps;
1072}
1073
1074/**
1075* Create Voltage Tables.
1076*
1077* @param hwmgr the address of the powerplay hardware manager.
1078* @return always 0
1079*/
1080static int vega10_construct_voltage_tables(struct pp_hwmgr *hwmgr)
1081{
690dc626 1082 struct vega10_hwmgr *data = hwmgr->backend;
f83a9991
EH
1083 struct phm_ppt_v2_information *table_info =
1084 (struct phm_ppt_v2_information *)hwmgr->pptable;
1085 int result;
1086
1087 if (data->mvdd_control == VEGA10_VOLTAGE_CONTROL_BY_SVID2 ||
1088 data->mvdd_control == VEGA10_VOLTAGE_CONTROL_NONE) {
1089 result = vega10_get_mvdd_voltage_table(hwmgr,
1090 table_info->vdd_dep_on_mclk,
1091 &(data->mvdd_voltage_table));
1092 PP_ASSERT_WITH_CODE(!result,
1093 "Failed to retrieve MVDDC table!",
1094 return result);
1095 }
1096
1097 if (data->vddci_control == VEGA10_VOLTAGE_CONTROL_NONE) {
1098 result = vega10_get_vddci_voltage_table(hwmgr,
1099 table_info->vdd_dep_on_mclk,
1100 &(data->vddci_voltage_table));
1101 PP_ASSERT_WITH_CODE(!result,
1102 "Failed to retrieve VDDCI_MEM table!",
1103 return result);
1104 }
1105
1106 if (data->vddc_control == VEGA10_VOLTAGE_CONTROL_BY_SVID2 ||
1107 data->vddc_control == VEGA10_VOLTAGE_CONTROL_NONE) {
1108 result = vega10_get_vdd_voltage_table(hwmgr,
1109 table_info->vdd_dep_on_sclk,
1110 &(data->vddc_voltage_table));
1111 PP_ASSERT_WITH_CODE(!result,
1112 "Failed to retrieve VDDCR_SOC table!",
1113 return result);
1114 }
1115
1116 PP_ASSERT_WITH_CODE(data->vddc_voltage_table.count <= 16,
1117 "Too many voltage values for VDDC. Trimming to fit state table.",
1118 vega10_trim_voltage_table_to_fit_state_table(hwmgr,
1119 16, &(data->vddc_voltage_table)));
1120
1121 PP_ASSERT_WITH_CODE(data->vddci_voltage_table.count <= 16,
1122 "Too many voltage values for VDDCI. Trimming to fit state table.",
1123 vega10_trim_voltage_table_to_fit_state_table(hwmgr,
1124 16, &(data->vddci_voltage_table)));
1125
1126 PP_ASSERT_WITH_CODE(data->mvdd_voltage_table.count <= 16,
1127 "Too many voltage values for MVDD. Trimming to fit state table.",
1128 vega10_trim_voltage_table_to_fit_state_table(hwmgr,
1129 16, &(data->mvdd_voltage_table)));
1130
1131
1132 return 0;
1133}
1134
1135/*
1136 * @fn vega10_init_dpm_state
1137 * @brief Function to initialize all Soft Min/Max and Hard Min/Max to 0xff.
1138 *
1139 * @param dpm_state - the address of the DPM Table to initiailize.
1140 * @return None.
1141 */
1142static void vega10_init_dpm_state(struct vega10_dpm_state *dpm_state)
1143{
1144 dpm_state->soft_min_level = 0xff;
1145 dpm_state->soft_max_level = 0xff;
1146 dpm_state->hard_min_level = 0xff;
1147 dpm_state->hard_max_level = 0xff;
1148}
1149
1150static void vega10_setup_default_single_dpm_table(struct pp_hwmgr *hwmgr,
1151 struct vega10_single_dpm_table *dpm_table,
1152 struct phm_ppt_v1_clock_voltage_dependency_table *dep_table)
1153{
1154 int i;
1155
658b9391
RZ
1156 dpm_table->count = 0;
1157
f83a9991 1158 for (i = 0; i < dep_table->count; i++) {
b7a1f0e3 1159 if (i == 0 || dpm_table->dpm_levels[dpm_table->count - 1].value <=
f83a9991
EH
1160 dep_table->entries[i].clk) {
1161 dpm_table->dpm_levels[dpm_table->count].value =
1162 dep_table->entries[i].clk;
1163 dpm_table->dpm_levels[dpm_table->count].enabled = true;
1164 dpm_table->count++;
1165 }
1166 }
1167}
1168static int vega10_setup_default_pcie_table(struct pp_hwmgr *hwmgr)
1169{
690dc626 1170 struct vega10_hwmgr *data = hwmgr->backend;
f83a9991
EH
1171 struct vega10_pcie_table *pcie_table = &(data->dpm_table.pcie_table);
1172 struct phm_ppt_v2_information *table_info =
1173 (struct phm_ppt_v2_information *)(hwmgr->pptable);
1174 struct phm_ppt_v1_pcie_table *bios_pcie_table =
1175 table_info->pcie_table;
1176 uint32_t i;
1177
1178 PP_ASSERT_WITH_CODE(bios_pcie_table->count,
1179 "Incorrect number of PCIE States from VBIOS!",
1180 return -1);
1181
b6dc60cf 1182 for (i = 0; i < NUM_LINK_LEVELS; i++) {
f83a9991
EH
1183 if (data->registry_data.pcieSpeedOverride)
1184 pcie_table->pcie_gen[i] =
1185 data->registry_data.pcieSpeedOverride;
1186 else
1187 pcie_table->pcie_gen[i] =
1188 bios_pcie_table->entries[i].gen_speed;
1189
1190 if (data->registry_data.pcieLaneOverride)
676b4087
RZ
1191 pcie_table->pcie_lane[i] = (uint8_t)encode_pcie_lane_width(
1192 data->registry_data.pcieLaneOverride);
f83a9991 1193 else
676b4087
RZ
1194 pcie_table->pcie_lane[i] = (uint8_t)encode_pcie_lane_width(
1195 bios_pcie_table->entries[i].lane_width);
f83a9991
EH
1196 if (data->registry_data.pcieClockOverride)
1197 pcie_table->lclk[i] =
1198 data->registry_data.pcieClockOverride;
1199 else
1200 pcie_table->lclk[i] =
1201 bios_pcie_table->entries[i].pcie_sclk;
f83a9991
EH
1202 }
1203
00c4855e 1204 pcie_table->count = NUM_LINK_LEVELS;
f83a9991
EH
1205
1206 return 0;
1207}
1208
1209/*
1210 * This function is to initialize all DPM state tables
1211 * for SMU based on the dependency table.
1212 * Dynamic state patching function will then trim these
1213 * state tables to the allowed range based
1214 * on the power policy or external client requests,
1215 * such as UVD request, etc.
1216 */
1217static int vega10_setup_default_dpm_tables(struct pp_hwmgr *hwmgr)
1218{
690dc626 1219 struct vega10_hwmgr *data = hwmgr->backend;
f83a9991
EH
1220 struct phm_ppt_v2_information *table_info =
1221 (struct phm_ppt_v2_information *)(hwmgr->pptable);
1222 struct vega10_single_dpm_table *dpm_table;
1223 uint32_t i;
1224
1225 struct phm_ppt_v1_clock_voltage_dependency_table *dep_soc_table =
1226 table_info->vdd_dep_on_socclk;
1227 struct phm_ppt_v1_clock_voltage_dependency_table *dep_gfx_table =
1228 table_info->vdd_dep_on_sclk;
1229 struct phm_ppt_v1_clock_voltage_dependency_table *dep_mclk_table =
1230 table_info->vdd_dep_on_mclk;
1231 struct phm_ppt_v1_mm_clock_voltage_dependency_table *dep_mm_table =
1232 table_info->mm_dep_table;
1233 struct phm_ppt_v1_clock_voltage_dependency_table *dep_dcef_table =
1234 table_info->vdd_dep_on_dcefclk;
1235 struct phm_ppt_v1_clock_voltage_dependency_table *dep_pix_table =
1236 table_info->vdd_dep_on_pixclk;
1237 struct phm_ppt_v1_clock_voltage_dependency_table *dep_disp_table =
1238 table_info->vdd_dep_on_dispclk;
1239 struct phm_ppt_v1_clock_voltage_dependency_table *dep_phy_table =
1240 table_info->vdd_dep_on_phyclk;
1241
1242 PP_ASSERT_WITH_CODE(dep_soc_table,
1243 "SOCCLK dependency table is missing. This table is mandatory",
1244 return -EINVAL);
1245 PP_ASSERT_WITH_CODE(dep_soc_table->count >= 1,
1246 "SOCCLK dependency table is empty. This table is mandatory",
1247 return -EINVAL);
1248
1249 PP_ASSERT_WITH_CODE(dep_gfx_table,
1250 "GFXCLK dependency table is missing. This table is mandatory",
1251 return -EINVAL);
1252 PP_ASSERT_WITH_CODE(dep_gfx_table->count >= 1,
1253 "GFXCLK dependency table is empty. This table is mandatory",
1254 return -EINVAL);
1255
1256 PP_ASSERT_WITH_CODE(dep_mclk_table,
1257 "MCLK dependency table is missing. This table is mandatory",
1258 return -EINVAL);
1259 PP_ASSERT_WITH_CODE(dep_mclk_table->count >= 1,
1260 "MCLK dependency table has to have is missing. This table is mandatory",
1261 return -EINVAL);
1262
1263 /* Initialize Sclk DPM table based on allow Sclk values */
f83a9991
EH
1264 dpm_table = &(data->dpm_table.soc_table);
1265 vega10_setup_default_single_dpm_table(hwmgr,
1266 dpm_table,
1267 dep_soc_table);
1268
1269 vega10_init_dpm_state(&(dpm_table->dpm_state));
1270
1271 dpm_table = &(data->dpm_table.gfx_table);
1272 vega10_setup_default_single_dpm_table(hwmgr,
1273 dpm_table,
1274 dep_gfx_table);
1275 vega10_init_dpm_state(&(dpm_table->dpm_state));
1276
1277 /* Initialize Mclk DPM table based on allow Mclk values */
1278 data->dpm_table.mem_table.count = 0;
1279 dpm_table = &(data->dpm_table.mem_table);
1280 vega10_setup_default_single_dpm_table(hwmgr,
1281 dpm_table,
1282 dep_mclk_table);
1283 vega10_init_dpm_state(&(dpm_table->dpm_state));
1284
1285 data->dpm_table.eclk_table.count = 0;
1286 dpm_table = &(data->dpm_table.eclk_table);
1287 for (i = 0; i < dep_mm_table->count; i++) {
1288 if (i == 0 || dpm_table->dpm_levels
b7a1f0e3 1289 [dpm_table->count - 1].value <=
f83a9991
EH
1290 dep_mm_table->entries[i].eclk) {
1291 dpm_table->dpm_levels[dpm_table->count].value =
1292 dep_mm_table->entries[i].eclk;
1293 dpm_table->dpm_levels[dpm_table->count].enabled =
1294 (i == 0) ? true : false;
1295 dpm_table->count++;
1296 }
1297 }
1298 vega10_init_dpm_state(&(dpm_table->dpm_state));
1299
1300 data->dpm_table.vclk_table.count = 0;
1301 data->dpm_table.dclk_table.count = 0;
1302 dpm_table = &(data->dpm_table.vclk_table);
1303 for (i = 0; i < dep_mm_table->count; i++) {
1304 if (i == 0 || dpm_table->dpm_levels
b7a1f0e3 1305 [dpm_table->count - 1].value <=
f83a9991
EH
1306 dep_mm_table->entries[i].vclk) {
1307 dpm_table->dpm_levels[dpm_table->count].value =
1308 dep_mm_table->entries[i].vclk;
1309 dpm_table->dpm_levels[dpm_table->count].enabled =
1310 (i == 0) ? true : false;
1311 dpm_table->count++;
1312 }
1313 }
1314 vega10_init_dpm_state(&(dpm_table->dpm_state));
1315
1316 dpm_table = &(data->dpm_table.dclk_table);
1317 for (i = 0; i < dep_mm_table->count; i++) {
1318 if (i == 0 || dpm_table->dpm_levels
b7a1f0e3 1319 [dpm_table->count - 1].value <=
f83a9991
EH
1320 dep_mm_table->entries[i].dclk) {
1321 dpm_table->dpm_levels[dpm_table->count].value =
1322 dep_mm_table->entries[i].dclk;
1323 dpm_table->dpm_levels[dpm_table->count].enabled =
1324 (i == 0) ? true : false;
1325 dpm_table->count++;
1326 }
1327 }
1328 vega10_init_dpm_state(&(dpm_table->dpm_state));
1329
1330 /* Assume there is no headless Vega10 for now */
1331 dpm_table = &(data->dpm_table.dcef_table);
1332 vega10_setup_default_single_dpm_table(hwmgr,
1333 dpm_table,
1334 dep_dcef_table);
1335
1336 vega10_init_dpm_state(&(dpm_table->dpm_state));
1337
1338 dpm_table = &(data->dpm_table.pixel_table);
1339 vega10_setup_default_single_dpm_table(hwmgr,
1340 dpm_table,
1341 dep_pix_table);
1342
1343 vega10_init_dpm_state(&(dpm_table->dpm_state));
1344
1345 dpm_table = &(data->dpm_table.display_table);
1346 vega10_setup_default_single_dpm_table(hwmgr,
1347 dpm_table,
1348 dep_disp_table);
1349
1350 vega10_init_dpm_state(&(dpm_table->dpm_state));
1351
1352 dpm_table = &(data->dpm_table.phy_table);
1353 vega10_setup_default_single_dpm_table(hwmgr,
1354 dpm_table,
1355 dep_phy_table);
1356
1357 vega10_init_dpm_state(&(dpm_table->dpm_state));
1358
1359 vega10_setup_default_pcie_table(hwmgr);
1360
1361 /* save a copy of the default DPM table */
1362 memcpy(&(data->golden_dpm_table), &(data->dpm_table),
1363 sizeof(struct vega10_dpm_table));
1364
dd5a6fe2
TSD
1365 if (PP_CAP(PHM_PlatformCaps_ODNinACSupport) ||
1366 PP_CAP(PHM_PlatformCaps_ODNinDCSupport)) {
ee85c07a
RZ
1367 data->odn_dpm_table.odn_core_clock_dpm_levels.num_of_pl =
1368 data->dpm_table.gfx_table.count;
f83a9991 1369 for (i = 0; i < data->dpm_table.gfx_table.count; i++) {
4efe9b47 1370 data->odn_dpm_table.odn_core_clock_dpm_levels.entries[i].clock =
f83a9991 1371 data->dpm_table.gfx_table.dpm_levels[i].value;
4efe9b47 1372 data->odn_dpm_table.odn_core_clock_dpm_levels.entries[i].enabled = true;
f83a9991
EH
1373 }
1374
1375 data->odn_dpm_table.vdd_dependency_on_sclk.count =
1376 dep_gfx_table->count;
1377 for (i = 0; i < dep_gfx_table->count; i++) {
1378 data->odn_dpm_table.vdd_dependency_on_sclk.entries[i].clk =
1379 dep_gfx_table->entries[i].clk;
1380 data->odn_dpm_table.vdd_dependency_on_sclk.entries[i].vddInd =
1381 dep_gfx_table->entries[i].vddInd;
1382 data->odn_dpm_table.vdd_dependency_on_sclk.entries[i].cks_enable =
1383 dep_gfx_table->entries[i].cks_enable;
1384 data->odn_dpm_table.vdd_dependency_on_sclk.entries[i].cks_voffset =
1385 dep_gfx_table->entries[i].cks_voffset;
1386 }
1387
ee85c07a
RZ
1388 data->odn_dpm_table.odn_memory_clock_dpm_levels.num_of_pl =
1389 data->dpm_table.mem_table.count;
f83a9991 1390 for (i = 0; i < data->dpm_table.mem_table.count; i++) {
4efe9b47 1391 data->odn_dpm_table.odn_memory_clock_dpm_levels.entries[i].clock =
f83a9991 1392 data->dpm_table.mem_table.dpm_levels[i].value;
4efe9b47 1393 data->odn_dpm_table.odn_memory_clock_dpm_levels.entries[i].enabled = true;
f83a9991
EH
1394 }
1395
1396 data->odn_dpm_table.vdd_dependency_on_mclk.count = dep_mclk_table->count;
1397 for (i = 0; i < dep_mclk_table->count; i++) {
1398 data->odn_dpm_table.vdd_dependency_on_mclk.entries[i].clk =
1399 dep_mclk_table->entries[i].clk;
1400 data->odn_dpm_table.vdd_dependency_on_mclk.entries[i].vddInd =
1401 dep_mclk_table->entries[i].vddInd;
1402 data->odn_dpm_table.vdd_dependency_on_mclk.entries[i].vddci =
1403 dep_mclk_table->entries[i].vddci;
1404 }
1405 }
1406
1407 return 0;
1408}
1409
1410/*
1411 * @fn vega10_populate_ulv_state
1412 * @brief Function to provide parameters for Utral Low Voltage state to SMC.
1413 *
1414 * @param hwmgr - the address of the hardware manager.
1415 * @return Always 0.
1416 */
1417static int vega10_populate_ulv_state(struct pp_hwmgr *hwmgr)
1418{
690dc626 1419 struct vega10_hwmgr *data = hwmgr->backend;
f83a9991
EH
1420 struct phm_ppt_v2_information *table_info =
1421 (struct phm_ppt_v2_information *)(hwmgr->pptable);
1422
1423 data->smc_state_table.pp_table.UlvOffsetVid =
effa290c 1424 (uint8_t)table_info->us_ulv_voltage_offset;
f83a9991
EH
1425
1426 data->smc_state_table.pp_table.UlvSmnclkDid =
1427 (uint8_t)(table_info->us_ulv_smnclk_did);
1428 data->smc_state_table.pp_table.UlvMp1clkDid =
1429 (uint8_t)(table_info->us_ulv_mp1clk_did);
1430 data->smc_state_table.pp_table.UlvGfxclkBypass =
1431 (uint8_t)(table_info->us_ulv_gfxclk_bypass);
1432 data->smc_state_table.pp_table.UlvPhaseSheddingPsi0 =
1433 (uint8_t)(data->vddc_voltage_table.psi0_enable);
1434 data->smc_state_table.pp_table.UlvPhaseSheddingPsi1 =
1435 (uint8_t)(data->vddc_voltage_table.psi1_enable);
1436
1437 return 0;
1438}
1439
1440static int vega10_populate_single_lclk_level(struct pp_hwmgr *hwmgr,
1441 uint32_t lclock, uint8_t *curr_lclk_did)
1442{
1443 struct pp_atomfwctrl_clock_dividers_soc15 dividers;
1444
1445 PP_ASSERT_WITH_CODE(!pp_atomfwctrl_get_gpu_pll_dividers_vega10(
1446 hwmgr,
1447 COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK,
1448 lclock, &dividers),
1449 "Failed to get LCLK clock settings from VBIOS!",
1450 return -1);
1451
1452 *curr_lclk_did = dividers.ulDid;
1453
1454 return 0;
1455}
1456
1457static int vega10_populate_smc_link_levels(struct pp_hwmgr *hwmgr)
1458{
1459 int result = -1;
690dc626 1460 struct vega10_hwmgr *data = hwmgr->backend;
f83a9991
EH
1461 PPTable_t *pp_table = &(data->smc_state_table.pp_table);
1462 struct vega10_pcie_table *pcie_table =
1463 &(data->dpm_table.pcie_table);
1464 uint32_t i, j;
1465
1466 for (i = 0; i < pcie_table->count; i++) {
1467 pp_table->PcieGenSpeed[i] = pcie_table->pcie_gen[i];
1468 pp_table->PcieLaneCount[i] = pcie_table->pcie_lane[i];
1469
1470 result = vega10_populate_single_lclk_level(hwmgr,
1471 pcie_table->lclk[i], &(pp_table->LclkDid[i]));
1472 if (result) {
1473 pr_info("Populate LClock Level %d Failed!\n", i);
1474 return result;
1475 }
1476 }
1477
1478 j = i - 1;
1479 while (i < NUM_LINK_LEVELS) {
1480 pp_table->PcieGenSpeed[i] = pcie_table->pcie_gen[j];
1481 pp_table->PcieLaneCount[i] = pcie_table->pcie_lane[j];
1482
1483 result = vega10_populate_single_lclk_level(hwmgr,
1484 pcie_table->lclk[j], &(pp_table->LclkDid[i]));
1485 if (result) {
1486 pr_info("Populate LClock Level %d Failed!\n", i);
1487 return result;
1488 }
1489 i++;
1490 }
1491
1492 return result;
1493}
1494
1495/**
1496* Populates single SMC GFXSCLK structure using the provided engine clock
1497*
1498* @param hwmgr the address of the hardware manager
1499* @param gfx_clock the GFX clock to use to populate the structure.
1500* @param current_gfxclk_level location in PPTable for the SMC GFXCLK structure.
1501*/
1502
1503static int vega10_populate_single_gfx_level(struct pp_hwmgr *hwmgr,
84d43463
EQ
1504 uint32_t gfx_clock, PllSetting_t *current_gfxclk_level,
1505 uint32_t *acg_freq)
f83a9991
EH
1506{
1507 struct phm_ppt_v2_information *table_info =
1508 (struct phm_ppt_v2_information *)(hwmgr->pptable);
1509 struct phm_ppt_v1_clock_voltage_dependency_table *dep_on_sclk =
1510 table_info->vdd_dep_on_sclk;
690dc626 1511 struct vega10_hwmgr *data = hwmgr->backend;
f83a9991 1512 struct pp_atomfwctrl_clock_dividers_soc15 dividers;
dd4e2237
EH
1513 uint32_t gfx_max_clock =
1514 hwmgr->platform_descriptor.overdriveLimit.engineClock;
1515 uint32_t i = 0;
f83a9991
EH
1516
1517 if (data->apply_overdrive_next_settings_mask &
1518 DPMTABLE_OD_UPDATE_VDDC)
1519 dep_on_sclk = (struct phm_ppt_v1_clock_voltage_dependency_table *)
1520 &(data->odn_dpm_table.vdd_dependency_on_sclk);
1521
1522 PP_ASSERT_WITH_CODE(dep_on_sclk,
1523 "Invalid SOC_VDD-GFX_CLK Dependency Table!",
1524 return -EINVAL);
1525
dd4e2237
EH
1526 if (data->need_update_dpm_table & DPMTABLE_OD_UPDATE_SCLK)
1527 gfx_clock = gfx_clock > gfx_max_clock ? gfx_max_clock : gfx_clock;
1528 else {
1529 for (i = 0; i < dep_on_sclk->count; i++) {
1530 if (dep_on_sclk->entries[i].clk == gfx_clock)
1531 break;
1532 }
1533 PP_ASSERT_WITH_CODE(dep_on_sclk->count > i,
1534 "Cannot find gfx_clk in SOC_VDD-GFX_CLK!",
1535 return -EINVAL);
f83a9991
EH
1536 }
1537
f83a9991
EH
1538 PP_ASSERT_WITH_CODE(!pp_atomfwctrl_get_gpu_pll_dividers_vega10(hwmgr,
1539 COMPUTE_GPUCLK_INPUT_FLAG_GFXCLK,
1540 gfx_clock, &dividers),
1541 "Failed to get GFX Clock settings from VBIOS!",
1542 return -EINVAL);
1543
1544 /* Feedback Multiplier: bit 0:8 int, bit 15:12 post_div, bit 31:16 frac */
1545 current_gfxclk_level->FbMult =
1546 cpu_to_le32(dividers.ulPll_fb_mult);
1547 /* Spread FB Multiplier bit: bit 0:8 int, bit 31:16 frac */
93480f89 1548 current_gfxclk_level->SsOn = dividers.ucPll_ss_enable;
f83a9991
EH
1549 current_gfxclk_level->SsFbMult =
1550 cpu_to_le32(dividers.ulPll_ss_fbsmult);
1551 current_gfxclk_level->SsSlewFrac =
1552 cpu_to_le16(dividers.usPll_ss_slew_frac);
1553 current_gfxclk_level->Did = (uint8_t)(dividers.ulDid);
1554
84d43463
EQ
1555 *acg_freq = gfx_clock / 100; /* 100 Khz to Mhz conversion */
1556
f83a9991
EH
1557 return 0;
1558}
1559
1560/**
1561 * @brief Populates single SMC SOCCLK structure using the provided clock.
1562 *
1563 * @param hwmgr - the address of the hardware manager.
1564 * @param soc_clock - the SOC clock to use to populate the structure.
1565 * @param current_socclk_level - location in PPTable for the SMC SOCCLK structure.
1566 * @return 0 on success..
1567 */
1568static int vega10_populate_single_soc_level(struct pp_hwmgr *hwmgr,
1569 uint32_t soc_clock, uint8_t *current_soc_did,
1570 uint8_t *current_vol_index)
1571{
1572 struct phm_ppt_v2_information *table_info =
1573 (struct phm_ppt_v2_information *)(hwmgr->pptable);
1574 struct phm_ppt_v1_clock_voltage_dependency_table *dep_on_soc =
1575 table_info->vdd_dep_on_socclk;
1576 struct pp_atomfwctrl_clock_dividers_soc15 dividers;
1577 uint32_t i;
1578
1579 PP_ASSERT_WITH_CODE(dep_on_soc,
1580 "Invalid SOC_VDD-SOC_CLK Dependency Table!",
1581 return -EINVAL);
1582 for (i = 0; i < dep_on_soc->count; i++) {
1583 if (dep_on_soc->entries[i].clk == soc_clock)
1584 break;
1585 }
1586 PP_ASSERT_WITH_CODE(dep_on_soc->count > i,
1587 "Cannot find SOC_CLK in SOC_VDD-SOC_CLK Dependency Table",
1588 return -EINVAL);
1589 PP_ASSERT_WITH_CODE(!pp_atomfwctrl_get_gpu_pll_dividers_vega10(hwmgr,
1590 COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK,
1591 soc_clock, &dividers),
1592 "Failed to get SOC Clock settings from VBIOS!",
1593 return -EINVAL);
1594
1595 *current_soc_did = (uint8_t)dividers.ulDid;
1596 *current_vol_index = (uint8_t)(dep_on_soc->entries[i].vddInd);
1597
1598 return 0;
1599}
1600
1601uint16_t vega10_locate_vddc_given_clock(struct pp_hwmgr *hwmgr,
1602 uint32_t clk,
1603 struct phm_ppt_v1_clock_voltage_dependency_table *dep_table)
1604{
1605 uint16_t i;
1606
1607 for (i = 0; i < dep_table->count; i++) {
1608 if (dep_table->entries[i].clk == clk)
1609 return dep_table->entries[i].vddc;
1610 }
1611
1612 pr_info("[LocateVddcGivenClock] Cannot locate SOC Vddc for this clock!");
1613 return 0;
1614}
1615
1616/**
1617* Populates all SMC SCLK levels' structure based on the trimmed allowed dpm engine clock states
1618*
1619* @param hwmgr the address of the hardware manager
1620*/
1621static int vega10_populate_all_graphic_levels(struct pp_hwmgr *hwmgr)
1622{
690dc626 1623 struct vega10_hwmgr *data = hwmgr->backend;
f83a9991
EH
1624 struct phm_ppt_v2_information *table_info =
1625 (struct phm_ppt_v2_information *)(hwmgr->pptable);
1626 struct phm_ppt_v1_clock_voltage_dependency_table *dep_table =
1627 table_info->vdd_dep_on_socclk;
1628 PPTable_t *pp_table = &(data->smc_state_table.pp_table);
1629 struct vega10_single_dpm_table *dpm_table = &(data->dpm_table.gfx_table);
1630 int result = 0;
1631 uint32_t i, j;
1632
1633 for (i = 0; i < dpm_table->count; i++) {
1634 result = vega10_populate_single_gfx_level(hwmgr,
1635 dpm_table->dpm_levels[i].value,
84d43463
EQ
1636 &(pp_table->GfxclkLevel[i]),
1637 &(pp_table->AcgFreqTable[i]));
f83a9991
EH
1638 if (result)
1639 return result;
1640 }
1641
1642 j = i - 1;
1643 while (i < NUM_GFXCLK_DPM_LEVELS) {
1644 result = vega10_populate_single_gfx_level(hwmgr,
1645 dpm_table->dpm_levels[j].value,
84d43463
EQ
1646 &(pp_table->GfxclkLevel[i]),
1647 &(pp_table->AcgFreqTable[i]));
f83a9991
EH
1648 if (result)
1649 return result;
1650 i++;
1651 }
1652
1653 pp_table->GfxclkSlewRate =
1654 cpu_to_le16(table_info->us_gfxclk_slew_rate);
1655
1656 dpm_table = &(data->dpm_table.soc_table);
1657 for (i = 0; i < dpm_table->count; i++) {
1658 pp_table->SocVid[i] =
1659 (uint8_t)convert_to_vid(
1660 vega10_locate_vddc_given_clock(hwmgr,
1661 dpm_table->dpm_levels[i].value,
1662 dep_table));
1663 result = vega10_populate_single_soc_level(hwmgr,
1664 dpm_table->dpm_levels[i].value,
1665 &(pp_table->SocclkDid[i]),
1666 &(pp_table->SocDpmVoltageIndex[i]));
1667 if (result)
1668 return result;
1669 }
1670
1671 j = i - 1;
1672 while (i < NUM_SOCCLK_DPM_LEVELS) {
1673 pp_table->SocVid[i] = pp_table->SocVid[j];
1674 result = vega10_populate_single_soc_level(hwmgr,
1675 dpm_table->dpm_levels[j].value,
1676 &(pp_table->SocclkDid[i]),
1677 &(pp_table->SocDpmVoltageIndex[i]));
1678 if (result)
1679 return result;
1680 i++;
1681 }
1682
1683 return result;
1684}
1685
1686/**
1687 * @brief Populates single SMC GFXCLK structure using the provided clock.
1688 *
1689 * @param hwmgr - the address of the hardware manager.
1690 * @param mem_clock - the memory clock to use to populate the structure.
1691 * @return 0 on success..
1692 */
1693static int vega10_populate_single_memory_level(struct pp_hwmgr *hwmgr,
1694 uint32_t mem_clock, uint8_t *current_mem_vid,
1695 PllSetting_t *current_memclk_level, uint8_t *current_mem_soc_vind)
1696{
690dc626 1697 struct vega10_hwmgr *data = hwmgr->backend;
f83a9991
EH
1698 struct phm_ppt_v2_information *table_info =
1699 (struct phm_ppt_v2_information *)(hwmgr->pptable);
1700 struct phm_ppt_v1_clock_voltage_dependency_table *dep_on_mclk =
1701 table_info->vdd_dep_on_mclk;
1702 struct pp_atomfwctrl_clock_dividers_soc15 dividers;
dd4e2237
EH
1703 uint32_t mem_max_clock =
1704 hwmgr->platform_descriptor.overdriveLimit.memoryClock;
1705 uint32_t i = 0;
f83a9991
EH
1706
1707 if (data->apply_overdrive_next_settings_mask &
1708 DPMTABLE_OD_UPDATE_VDDC)
1709 dep_on_mclk = (struct phm_ppt_v1_clock_voltage_dependency_table *)
1710 &data->odn_dpm_table.vdd_dependency_on_mclk;
1711
1712 PP_ASSERT_WITH_CODE(dep_on_mclk,
1713 "Invalid SOC_VDD-UCLK Dependency Table!",
1714 return -EINVAL);
1715
dd4e2237
EH
1716 if (data->need_update_dpm_table & DPMTABLE_OD_UPDATE_MCLK)
1717 mem_clock = mem_clock > mem_max_clock ? mem_max_clock : mem_clock;
1718 else {
1719 for (i = 0; i < dep_on_mclk->count; i++) {
1720 if (dep_on_mclk->entries[i].clk == mem_clock)
1721 break;
1722 }
1723 PP_ASSERT_WITH_CODE(dep_on_mclk->count > i,
1724 "Cannot find UCLK in SOC_VDD-UCLK Dependency Table!",
1725 return -EINVAL);
f83a9991
EH
1726 }
1727
f83a9991
EH
1728 PP_ASSERT_WITH_CODE(!pp_atomfwctrl_get_gpu_pll_dividers_vega10(
1729 hwmgr, COMPUTE_GPUCLK_INPUT_FLAG_UCLK, mem_clock, &dividers),
1730 "Failed to get UCLK settings from VBIOS!",
1731 return -1);
1732
1733 *current_mem_vid =
1734 (uint8_t)(convert_to_vid(dep_on_mclk->entries[i].mvdd));
1735 *current_mem_soc_vind =
1736 (uint8_t)(dep_on_mclk->entries[i].vddInd);
1737 current_memclk_level->FbMult = cpu_to_le32(dividers.ulPll_fb_mult);
1738 current_memclk_level->Did = (uint8_t)(dividers.ulDid);
1739
1740 PP_ASSERT_WITH_CODE(current_memclk_level->Did >= 1,
1741 "Invalid Divider ID!",
1742 return -EINVAL);
1743
1744 return 0;
1745}
1746
1747/**
1748 * @brief Populates all SMC MCLK levels' structure based on the trimmed allowed dpm memory clock states.
1749 *
1750 * @param pHwMgr - the address of the hardware manager.
1751 * @return PP_Result_OK on success.
1752 */
1753static int vega10_populate_all_memory_levels(struct pp_hwmgr *hwmgr)
1754{
690dc626 1755 struct vega10_hwmgr *data = hwmgr->backend;
f83a9991
EH
1756 PPTable_t *pp_table = &(data->smc_state_table.pp_table);
1757 struct vega10_single_dpm_table *dpm_table =
1758 &(data->dpm_table.mem_table);
1759 int result = 0;
451cc55d 1760 uint32_t i, j;
f83a9991
EH
1761
1762 for (i = 0; i < dpm_table->count; i++) {
1763 result = vega10_populate_single_memory_level(hwmgr,
1764 dpm_table->dpm_levels[i].value,
1765 &(pp_table->MemVid[i]),
1766 &(pp_table->UclkLevel[i]),
1767 &(pp_table->MemSocVoltageIndex[i]));
1768 if (result)
1769 return result;
1770 }
1771
1772 j = i - 1;
1773 while (i < NUM_UCLK_DPM_LEVELS) {
1774 result = vega10_populate_single_memory_level(hwmgr,
1775 dpm_table->dpm_levels[j].value,
1776 &(pp_table->MemVid[i]),
1777 &(pp_table->UclkLevel[i]),
1778 &(pp_table->MemSocVoltageIndex[i]));
1779 if (result)
1780 return result;
1781 i++;
1782 }
1783
451cc55d 1784 pp_table->NumMemoryChannels = (uint16_t)(data->mem_channels);
f83a9991 1785 pp_table->MemoryChannelWidth =
451cc55d
RZ
1786 (uint16_t)(HBM_MEMORY_CHANNEL_WIDTH *
1787 channel_number[data->mem_channels]);
f83a9991
EH
1788
1789 pp_table->LowestUclkReservedForUlv =
1790 (uint8_t)(data->lowest_uclk_reserved_for_ulv);
1791
1792 return result;
1793}
1794
1795static int vega10_populate_single_display_type(struct pp_hwmgr *hwmgr,
1796 DSPCLK_e disp_clock)
1797{
690dc626 1798 struct vega10_hwmgr *data = hwmgr->backend;
f83a9991
EH
1799 PPTable_t *pp_table = &(data->smc_state_table.pp_table);
1800 struct phm_ppt_v2_information *table_info =
1801 (struct phm_ppt_v2_information *)
1802 (hwmgr->pptable);
1803 struct phm_ppt_v1_clock_voltage_dependency_table *dep_table;
1804 uint32_t i;
1805 uint16_t clk = 0, vddc = 0;
1806 uint8_t vid = 0;
1807
1808 switch (disp_clock) {
1809 case DSPCLK_DCEFCLK:
1810 dep_table = table_info->vdd_dep_on_dcefclk;
1811 break;
1812 case DSPCLK_DISPCLK:
1813 dep_table = table_info->vdd_dep_on_dispclk;
1814 break;
1815 case DSPCLK_PIXCLK:
1816 dep_table = table_info->vdd_dep_on_pixclk;
1817 break;
1818 case DSPCLK_PHYCLK:
1819 dep_table = table_info->vdd_dep_on_phyclk;
1820 break;
1821 default:
1822 return -1;
1823 }
1824
1825 PP_ASSERT_WITH_CODE(dep_table->count <= NUM_DSPCLK_LEVELS,
1826 "Number Of Entries Exceeded maximum!",
1827 return -1);
1828
1829 for (i = 0; i < dep_table->count; i++) {
1830 clk = (uint16_t)(dep_table->entries[i].clk / 100);
1831 vddc = table_info->vddc_lookup_table->
1832 entries[dep_table->entries[i].vddInd].us_vdd;
1833 vid = (uint8_t)convert_to_vid(vddc);
1834 pp_table->DisplayClockTable[disp_clock][i].Freq =
1835 cpu_to_le16(clk);
1836 pp_table->DisplayClockTable[disp_clock][i].Vid =
1837 cpu_to_le16(vid);
1838 }
1839
1840 while (i < NUM_DSPCLK_LEVELS) {
1841 pp_table->DisplayClockTable[disp_clock][i].Freq =
1842 cpu_to_le16(clk);
1843 pp_table->DisplayClockTable[disp_clock][i].Vid =
1844 cpu_to_le16(vid);
1845 i++;
1846 }
1847
1848 return 0;
1849}
1850
1851static int vega10_populate_all_display_clock_levels(struct pp_hwmgr *hwmgr)
1852{
1853 uint32_t i;
1854
1855 for (i = 0; i < DSPCLK_COUNT; i++) {
1856 PP_ASSERT_WITH_CODE(!vega10_populate_single_display_type(hwmgr, i),
1857 "Failed to populate Clock in DisplayClockTable!",
1858 return -1);
1859 }
1860
1861 return 0;
1862}
1863
1864static int vega10_populate_single_eclock_level(struct pp_hwmgr *hwmgr,
1865 uint32_t eclock, uint8_t *current_eclk_did,
1866 uint8_t *current_soc_vol)
1867{
1868 struct phm_ppt_v2_information *table_info =
1869 (struct phm_ppt_v2_information *)(hwmgr->pptable);
1870 struct phm_ppt_v1_mm_clock_voltage_dependency_table *dep_table =
1871 table_info->mm_dep_table;
1872 struct pp_atomfwctrl_clock_dividers_soc15 dividers;
1873 uint32_t i;
1874
1875 PP_ASSERT_WITH_CODE(!pp_atomfwctrl_get_gpu_pll_dividers_vega10(hwmgr,
1876 COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK,
1877 eclock, &dividers),
1878 "Failed to get ECLK clock settings from VBIOS!",
1879 return -1);
1880
1881 *current_eclk_did = (uint8_t)dividers.ulDid;
1882
1883 for (i = 0; i < dep_table->count; i++) {
1884 if (dep_table->entries[i].eclk == eclock)
1885 *current_soc_vol = dep_table->entries[i].vddcInd;
1886 }
1887
1888 return 0;
1889}
1890
1891static int vega10_populate_smc_vce_levels(struct pp_hwmgr *hwmgr)
1892{
690dc626 1893 struct vega10_hwmgr *data = hwmgr->backend;
f83a9991
EH
1894 PPTable_t *pp_table = &(data->smc_state_table.pp_table);
1895 struct vega10_single_dpm_table *dpm_table = &(data->dpm_table.eclk_table);
1896 int result = -EINVAL;
1897 uint32_t i, j;
1898
1899 for (i = 0; i < dpm_table->count; i++) {
1900 result = vega10_populate_single_eclock_level(hwmgr,
1901 dpm_table->dpm_levels[i].value,
1902 &(pp_table->EclkDid[i]),
1903 &(pp_table->VceDpmVoltageIndex[i]));
1904 if (result)
1905 return result;
1906 }
1907
1908 j = i - 1;
1909 while (i < NUM_VCE_DPM_LEVELS) {
1910 result = vega10_populate_single_eclock_level(hwmgr,
1911 dpm_table->dpm_levels[j].value,
1912 &(pp_table->EclkDid[i]),
1913 &(pp_table->VceDpmVoltageIndex[i]));
1914 if (result)
1915 return result;
1916 i++;
1917 }
1918
1919 return result;
1920}
1921
1922static int vega10_populate_single_vclock_level(struct pp_hwmgr *hwmgr,
1923 uint32_t vclock, uint8_t *current_vclk_did)
1924{
1925 struct pp_atomfwctrl_clock_dividers_soc15 dividers;
1926
1927 PP_ASSERT_WITH_CODE(!pp_atomfwctrl_get_gpu_pll_dividers_vega10(hwmgr,
1928 COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK,
1929 vclock, &dividers),
1930 "Failed to get VCLK clock settings from VBIOS!",
1931 return -EINVAL);
1932
1933 *current_vclk_did = (uint8_t)dividers.ulDid;
1934
1935 return 0;
1936}
1937
1938static int vega10_populate_single_dclock_level(struct pp_hwmgr *hwmgr,
1939 uint32_t dclock, uint8_t *current_dclk_did)
1940{
1941 struct pp_atomfwctrl_clock_dividers_soc15 dividers;
1942
1943 PP_ASSERT_WITH_CODE(!pp_atomfwctrl_get_gpu_pll_dividers_vega10(hwmgr,
1944 COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK,
1945 dclock, &dividers),
1946 "Failed to get DCLK clock settings from VBIOS!",
1947 return -EINVAL);
1948
1949 *current_dclk_did = (uint8_t)dividers.ulDid;
1950
1951 return 0;
1952}
1953
1954static int vega10_populate_smc_uvd_levels(struct pp_hwmgr *hwmgr)
1955{
690dc626 1956 struct vega10_hwmgr *data = hwmgr->backend;
f83a9991
EH
1957 PPTable_t *pp_table = &(data->smc_state_table.pp_table);
1958 struct vega10_single_dpm_table *vclk_dpm_table =
1959 &(data->dpm_table.vclk_table);
1960 struct vega10_single_dpm_table *dclk_dpm_table =
1961 &(data->dpm_table.dclk_table);
1962 struct phm_ppt_v2_information *table_info =
1963 (struct phm_ppt_v2_information *)(hwmgr->pptable);
1964 struct phm_ppt_v1_mm_clock_voltage_dependency_table *dep_table =
1965 table_info->mm_dep_table;
1966 int result = -EINVAL;
1967 uint32_t i, j;
1968
1969 for (i = 0; i < vclk_dpm_table->count; i++) {
1970 result = vega10_populate_single_vclock_level(hwmgr,
1971 vclk_dpm_table->dpm_levels[i].value,
1972 &(pp_table->VclkDid[i]));
1973 if (result)
1974 return result;
1975 }
1976
1977 j = i - 1;
1978 while (i < NUM_UVD_DPM_LEVELS) {
1979 result = vega10_populate_single_vclock_level(hwmgr,
1980 vclk_dpm_table->dpm_levels[j].value,
1981 &(pp_table->VclkDid[i]));
1982 if (result)
1983 return result;
1984 i++;
1985 }
1986
1987 for (i = 0; i < dclk_dpm_table->count; i++) {
1988 result = vega10_populate_single_dclock_level(hwmgr,
1989 dclk_dpm_table->dpm_levels[i].value,
1990 &(pp_table->DclkDid[i]));
1991 if (result)
1992 return result;
1993 }
1994
1995 j = i - 1;
1996 while (i < NUM_UVD_DPM_LEVELS) {
1997 result = vega10_populate_single_dclock_level(hwmgr,
1998 dclk_dpm_table->dpm_levels[j].value,
1999 &(pp_table->DclkDid[i]));
2000 if (result)
2001 return result;
2002 i++;
2003 }
2004
2005 for (i = 0; i < dep_table->count; i++) {
2006 if (dep_table->entries[i].vclk ==
2007 vclk_dpm_table->dpm_levels[i].value &&
2008 dep_table->entries[i].dclk ==
2009 dclk_dpm_table->dpm_levels[i].value)
2010 pp_table->UvdDpmVoltageIndex[i] =
2011 dep_table->entries[i].vddcInd;
2012 else
2013 return -1;
2014 }
2015
2016 j = i - 1;
2017 while (i < NUM_UVD_DPM_LEVELS) {
2018 pp_table->UvdDpmVoltageIndex[i] = dep_table->entries[j].vddcInd;
2019 i++;
2020 }
2021
2022 return 0;
2023}
2024
2025static int vega10_populate_clock_stretcher_table(struct pp_hwmgr *hwmgr)
2026{
690dc626 2027 struct vega10_hwmgr *data = hwmgr->backend;
f83a9991
EH
2028 PPTable_t *pp_table = &(data->smc_state_table.pp_table);
2029 struct phm_ppt_v2_information *table_info =
2030 (struct phm_ppt_v2_information *)(hwmgr->pptable);
2031 struct phm_ppt_v1_clock_voltage_dependency_table *dep_table =
2032 table_info->vdd_dep_on_sclk;
2033 uint32_t i;
2034
afc0255c 2035 for (i = 0; i < dep_table->count; i++) {
f83a9991 2036 pp_table->CksEnable[i] = dep_table->entries[i].cks_enable;
afc0255c
RZ
2037 pp_table->CksVidOffset[i] = (uint8_t)(dep_table->entries[i].cks_voffset
2038 * VOLTAGE_VID_OFFSET_SCALE2 / VOLTAGE_VID_OFFSET_SCALE1);
f83a9991
EH
2039 }
2040
2041 return 0;
2042}
2043
2044static int vega10_populate_avfs_parameters(struct pp_hwmgr *hwmgr)
2045{
690dc626 2046 struct vega10_hwmgr *data = hwmgr->backend;
f83a9991
EH
2047 PPTable_t *pp_table = &(data->smc_state_table.pp_table);
2048 struct phm_ppt_v2_information *table_info =
2049 (struct phm_ppt_v2_information *)(hwmgr->pptable);
2050 struct phm_ppt_v1_clock_voltage_dependency_table *dep_table =
2051 table_info->vdd_dep_on_sclk;
2052 struct pp_atomfwctrl_avfs_parameters avfs_params = {0};
2053 int result = 0;
2054 uint32_t i;
2055
2056 pp_table->MinVoltageVid = (uint8_t)0xff;
2057 pp_table->MaxVoltageVid = (uint8_t)0;
2058
2059 if (data->smu_features[GNLD_AVFS].supported) {
2060 result = pp_atomfwctrl_get_avfs_information(hwmgr, &avfs_params);
2061 if (!result) {
2062 pp_table->MinVoltageVid = (uint8_t)
f83a9991 2063 convert_to_vid((uint16_t)(avfs_params.ulMinVddc));
6524e494
RZ
2064 pp_table->MaxVoltageVid = (uint8_t)
2065 convert_to_vid((uint16_t)(avfs_params.ulMaxVddc));
2066
2067 pp_table->AConstant[0] = cpu_to_le32(avfs_params.ulMeanNsigmaAcontant0);
2068 pp_table->AConstant[1] = cpu_to_le32(avfs_params.ulMeanNsigmaAcontant1);
2069 pp_table->AConstant[2] = cpu_to_le32(avfs_params.ulMeanNsigmaAcontant2);
2070 pp_table->DC_tol_sigma = cpu_to_le16(avfs_params.usMeanNsigmaDcTolSigma);
2071 pp_table->Platform_mean = cpu_to_le16(avfs_params.usMeanNsigmaPlatformMean);
2072 pp_table->Platform_sigma = cpu_to_le16(avfs_params.usMeanNsigmaDcTolSigma);
2073 pp_table->PSM_Age_CompFactor = cpu_to_le16(avfs_params.usPsmAgeComfactor);
f83a9991
EH
2074
2075 pp_table->BtcGbVdroopTableCksOff.a0 =
2076 cpu_to_le32(avfs_params.ulGbVdroopTableCksoffA0);
6524e494 2077 pp_table->BtcGbVdroopTableCksOff.a0_shift = 20;
f83a9991
EH
2078 pp_table->BtcGbVdroopTableCksOff.a1 =
2079 cpu_to_le32(avfs_params.ulGbVdroopTableCksoffA1);
6524e494 2080 pp_table->BtcGbVdroopTableCksOff.a1_shift = 20;
f83a9991
EH
2081 pp_table->BtcGbVdroopTableCksOff.a2 =
2082 cpu_to_le32(avfs_params.ulGbVdroopTableCksoffA2);
6524e494
RZ
2083 pp_table->BtcGbVdroopTableCksOff.a2_shift = 20;
2084
2085 pp_table->OverrideBtcGbCksOn = avfs_params.ucEnableGbVdroopTableCkson;
2086 pp_table->BtcGbVdroopTableCksOn.a0 =
2087 cpu_to_le32(avfs_params.ulGbVdroopTableCksonA0);
2088 pp_table->BtcGbVdroopTableCksOn.a0_shift = 20;
2089 pp_table->BtcGbVdroopTableCksOn.a1 =
2090 cpu_to_le32(avfs_params.ulGbVdroopTableCksonA1);
2091 pp_table->BtcGbVdroopTableCksOn.a1_shift = 20;
2092 pp_table->BtcGbVdroopTableCksOn.a2 =
2093 cpu_to_le32(avfs_params.ulGbVdroopTableCksonA2);
2094 pp_table->BtcGbVdroopTableCksOn.a2_shift = 20;
f83a9991
EH
2095
2096 pp_table->AvfsGbCksOn.m1 =
2097 cpu_to_le32(avfs_params.ulGbFuseTableCksonM1);
2098 pp_table->AvfsGbCksOn.m2 =
040cd2d1 2099 cpu_to_le32(avfs_params.ulGbFuseTableCksonM2);
f83a9991
EH
2100 pp_table->AvfsGbCksOn.b =
2101 cpu_to_le32(avfs_params.ulGbFuseTableCksonB);
2102 pp_table->AvfsGbCksOn.m1_shift = 24;
2103 pp_table->AvfsGbCksOn.m2_shift = 12;
6524e494 2104 pp_table->AvfsGbCksOn.b_shift = 0;
f83a9991 2105
6524e494
RZ
2106 pp_table->OverrideAvfsGbCksOn =
2107 avfs_params.ucEnableGbFuseTableCkson;
f83a9991
EH
2108 pp_table->AvfsGbCksOff.m1 =
2109 cpu_to_le32(avfs_params.ulGbFuseTableCksoffM1);
2110 pp_table->AvfsGbCksOff.m2 =
040cd2d1 2111 cpu_to_le32(avfs_params.ulGbFuseTableCksoffM2);
f83a9991
EH
2112 pp_table->AvfsGbCksOff.b =
2113 cpu_to_le32(avfs_params.ulGbFuseTableCksoffB);
2114 pp_table->AvfsGbCksOff.m1_shift = 24;
2115 pp_table->AvfsGbCksOff.m2_shift = 12;
6524e494
RZ
2116 pp_table->AvfsGbCksOff.b_shift = 0;
2117
16d6e962
EH
2118 for (i = 0; i < dep_table->count; i++)
2119 pp_table->StaticVoltageOffsetVid[i] =
2120 convert_to_vid((uint8_t)(dep_table->entries[i].sclk_offset));
f83a9991
EH
2121
2122 if ((PPREGKEY_VEGA10QUADRATICEQUATION_DFLT !=
2123 data->disp_clk_quad_eqn_a) &&
2124 (PPREGKEY_VEGA10QUADRATICEQUATION_DFLT !=
2125 data->disp_clk_quad_eqn_b)) {
2126 pp_table->DisplayClock2Gfxclk[DSPCLK_DISPCLK].m1 =
2127 (int32_t)data->disp_clk_quad_eqn_a;
2128 pp_table->DisplayClock2Gfxclk[DSPCLK_DISPCLK].m2 =
6524e494 2129 (int32_t)data->disp_clk_quad_eqn_b;
f83a9991
EH
2130 pp_table->DisplayClock2Gfxclk[DSPCLK_DISPCLK].b =
2131 (int32_t)data->disp_clk_quad_eqn_c;
2132 } else {
2133 pp_table->DisplayClock2Gfxclk[DSPCLK_DISPCLK].m1 =
2134 (int32_t)avfs_params.ulDispclk2GfxclkM1;
2135 pp_table->DisplayClock2Gfxclk[DSPCLK_DISPCLK].m2 =
6524e494 2136 (int32_t)avfs_params.ulDispclk2GfxclkM2;
f83a9991
EH
2137 pp_table->DisplayClock2Gfxclk[DSPCLK_DISPCLK].b =
2138 (int32_t)avfs_params.ulDispclk2GfxclkB;
2139 }
2140
2141 pp_table->DisplayClock2Gfxclk[DSPCLK_DISPCLK].m1_shift = 24;
2142 pp_table->DisplayClock2Gfxclk[DSPCLK_DISPCLK].m2_shift = 12;
4bae05e1 2143 pp_table->DisplayClock2Gfxclk[DSPCLK_DISPCLK].b_shift = 12;
f83a9991
EH
2144
2145 if ((PPREGKEY_VEGA10QUADRATICEQUATION_DFLT !=
2146 data->dcef_clk_quad_eqn_a) &&
2147 (PPREGKEY_VEGA10QUADRATICEQUATION_DFLT !=
2148 data->dcef_clk_quad_eqn_b)) {
2149 pp_table->DisplayClock2Gfxclk[DSPCLK_DCEFCLK].m1 =
2150 (int32_t)data->dcef_clk_quad_eqn_a;
2151 pp_table->DisplayClock2Gfxclk[DSPCLK_DCEFCLK].m2 =
6524e494 2152 (int32_t)data->dcef_clk_quad_eqn_b;
f83a9991
EH
2153 pp_table->DisplayClock2Gfxclk[DSPCLK_DCEFCLK].b =
2154 (int32_t)data->dcef_clk_quad_eqn_c;
2155 } else {
2156 pp_table->DisplayClock2Gfxclk[DSPCLK_DCEFCLK].m1 =
2157 (int32_t)avfs_params.ulDcefclk2GfxclkM1;
2158 pp_table->DisplayClock2Gfxclk[DSPCLK_DCEFCLK].m2 =
6524e494 2159 (int32_t)avfs_params.ulDcefclk2GfxclkM2;
f83a9991
EH
2160 pp_table->DisplayClock2Gfxclk[DSPCLK_DCEFCLK].b =
2161 (int32_t)avfs_params.ulDcefclk2GfxclkB;
2162 }
2163
2164 pp_table->DisplayClock2Gfxclk[DSPCLK_DCEFCLK].m1_shift = 24;
2165 pp_table->DisplayClock2Gfxclk[DSPCLK_DCEFCLK].m2_shift = 12;
4bae05e1 2166 pp_table->DisplayClock2Gfxclk[DSPCLK_DCEFCLK].b_shift = 12;
f83a9991
EH
2167
2168 if ((PPREGKEY_VEGA10QUADRATICEQUATION_DFLT !=
2169 data->pixel_clk_quad_eqn_a) &&
2170 (PPREGKEY_VEGA10QUADRATICEQUATION_DFLT !=
2171 data->pixel_clk_quad_eqn_b)) {
2172 pp_table->DisplayClock2Gfxclk[DSPCLK_PIXCLK].m1 =
2173 (int32_t)data->pixel_clk_quad_eqn_a;
2174 pp_table->DisplayClock2Gfxclk[DSPCLK_PIXCLK].m2 =
6524e494 2175 (int32_t)data->pixel_clk_quad_eqn_b;
f83a9991
EH
2176 pp_table->DisplayClock2Gfxclk[DSPCLK_PIXCLK].b =
2177 (int32_t)data->pixel_clk_quad_eqn_c;
2178 } else {
2179 pp_table->DisplayClock2Gfxclk[DSPCLK_PIXCLK].m1 =
2180 (int32_t)avfs_params.ulPixelclk2GfxclkM1;
2181 pp_table->DisplayClock2Gfxclk[DSPCLK_PIXCLK].m2 =
6524e494 2182 (int32_t)avfs_params.ulPixelclk2GfxclkM2;
f83a9991
EH
2183 pp_table->DisplayClock2Gfxclk[DSPCLK_PIXCLK].b =
2184 (int32_t)avfs_params.ulPixelclk2GfxclkB;
2185 }
2186
2187 pp_table->DisplayClock2Gfxclk[DSPCLK_PIXCLK].m1_shift = 24;
2188 pp_table->DisplayClock2Gfxclk[DSPCLK_PIXCLK].m2_shift = 12;
4bae05e1 2189 pp_table->DisplayClock2Gfxclk[DSPCLK_PIXCLK].b_shift = 12;
f83a9991
EH
2190 if ((PPREGKEY_VEGA10QUADRATICEQUATION_DFLT !=
2191 data->phy_clk_quad_eqn_a) &&
2192 (PPREGKEY_VEGA10QUADRATICEQUATION_DFLT !=
2193 data->phy_clk_quad_eqn_b)) {
2194 pp_table->DisplayClock2Gfxclk[DSPCLK_PHYCLK].m1 =
2195 (int32_t)data->phy_clk_quad_eqn_a;
2196 pp_table->DisplayClock2Gfxclk[DSPCLK_PHYCLK].m2 =
6524e494 2197 (int32_t)data->phy_clk_quad_eqn_b;
f83a9991
EH
2198 pp_table->DisplayClock2Gfxclk[DSPCLK_PHYCLK].b =
2199 (int32_t)data->phy_clk_quad_eqn_c;
2200 } else {
2201 pp_table->DisplayClock2Gfxclk[DSPCLK_PHYCLK].m1 =
2202 (int32_t)avfs_params.ulPhyclk2GfxclkM1;
2203 pp_table->DisplayClock2Gfxclk[DSPCLK_PHYCLK].m2 =
6524e494 2204 (int32_t)avfs_params.ulPhyclk2GfxclkM2;
f83a9991
EH
2205 pp_table->DisplayClock2Gfxclk[DSPCLK_PHYCLK].b =
2206 (int32_t)avfs_params.ulPhyclk2GfxclkB;
2207 }
2208
2209 pp_table->DisplayClock2Gfxclk[DSPCLK_PHYCLK].m1_shift = 24;
2210 pp_table->DisplayClock2Gfxclk[DSPCLK_PHYCLK].m2_shift = 12;
4bae05e1 2211 pp_table->DisplayClock2Gfxclk[DSPCLK_PHYCLK].b_shift = 12;
bdb8cd10
RZ
2212
2213 pp_table->AcgBtcGbVdroopTable.a0 = avfs_params.ulAcgGbVdroopTableA0;
2214 pp_table->AcgBtcGbVdroopTable.a0_shift = 20;
2215 pp_table->AcgBtcGbVdroopTable.a1 = avfs_params.ulAcgGbVdroopTableA1;
2216 pp_table->AcgBtcGbVdroopTable.a1_shift = 20;
2217 pp_table->AcgBtcGbVdroopTable.a2 = avfs_params.ulAcgGbVdroopTableA2;
2218 pp_table->AcgBtcGbVdroopTable.a2_shift = 20;
2219
2220 pp_table->AcgAvfsGb.m1 = avfs_params.ulAcgGbFuseTableM1;
2221 pp_table->AcgAvfsGb.m2 = avfs_params.ulAcgGbFuseTableM2;
2222 pp_table->AcgAvfsGb.b = avfs_params.ulAcgGbFuseTableB;
2223 pp_table->AcgAvfsGb.m1_shift = 0;
2224 pp_table->AcgAvfsGb.m2_shift = 0;
2225 pp_table->AcgAvfsGb.b_shift = 0;
2226
f83a9991
EH
2227 } else {
2228 data->smu_features[GNLD_AVFS].supported = false;
2229 }
2230 }
2231
2232 return 0;
2233}
2234
bdb8cd10
RZ
2235static int vega10_acg_enable(struct pp_hwmgr *hwmgr)
2236{
690dc626 2237 struct vega10_hwmgr *data = hwmgr->backend;
bdb8cd10
RZ
2238 uint32_t agc_btc_response;
2239
2240 if (data->smu_features[GNLD_ACG].supported) {
d3f8c0ab 2241 if (0 == vega10_enable_smc_features(hwmgr, true,
bdb8cd10
RZ
2242 data->smu_features[GNLD_DPM_PREFETCHER].smu_feature_bitmap))
2243 data->smu_features[GNLD_DPM_PREFETCHER].enabled = true;
2244
d3f8c0ab 2245 smum_send_msg_to_smc(hwmgr, PPSMC_MSG_InitializeAcg);
bdb8cd10 2246
d3f8c0ab 2247 smum_send_msg_to_smc(hwmgr, PPSMC_MSG_RunAcgBtc);
3f9ca14a 2248 agc_btc_response = smum_get_argument(hwmgr);
bdb8cd10
RZ
2249
2250 if (1 == agc_btc_response) {
2251 if (1 == data->acg_loop_state)
d3f8c0ab 2252 smum_send_msg_to_smc(hwmgr, PPSMC_MSG_RunAcgInClosedLoop);
bdb8cd10 2253 else if (2 == data->acg_loop_state)
d3f8c0ab
RZ
2254 smum_send_msg_to_smc(hwmgr, PPSMC_MSG_RunAcgInOpenLoop);
2255 if (0 == vega10_enable_smc_features(hwmgr, true,
bdb8cd10
RZ
2256 data->smu_features[GNLD_ACG].smu_feature_bitmap))
2257 data->smu_features[GNLD_ACG].enabled = true;
2258 } else {
2259 pr_info("[ACG_Enable] ACG BTC Returned Failed Status!\n");
2260 data->smu_features[GNLD_ACG].enabled = false;
2261 }
2262 }
2263
2264 return 0;
2265}
2266
2267static int vega10_acg_disable(struct pp_hwmgr *hwmgr)
2268{
690dc626 2269 struct vega10_hwmgr *data = hwmgr->backend;
bdb8cd10 2270
06474d56
TSD
2271 if (data->smu_features[GNLD_ACG].supported &&
2272 data->smu_features[GNLD_ACG].enabled)
d3f8c0ab 2273 if (!vega10_enable_smc_features(hwmgr, false,
06474d56 2274 data->smu_features[GNLD_ACG].smu_feature_bitmap))
bdb8cd10 2275 data->smu_features[GNLD_ACG].enabled = false;
bdb8cd10
RZ
2276
2277 return 0;
2278}
2279
f83a9991
EH
2280static int vega10_populate_gpio_parameters(struct pp_hwmgr *hwmgr)
2281{
690dc626 2282 struct vega10_hwmgr *data = hwmgr->backend;
f83a9991
EH
2283 PPTable_t *pp_table = &(data->smc_state_table.pp_table);
2284 struct pp_atomfwctrl_gpio_parameters gpio_params = {0};
2285 int result;
2286
2287 result = pp_atomfwctrl_get_gpio_information(hwmgr, &gpio_params);
2288 if (!result) {
dd5a6fe2
TSD
2289 if (PP_CAP(PHM_PlatformCaps_RegulatorHot) &&
2290 data->registry_data.regulator_hot_gpio_support) {
f83a9991
EH
2291 pp_table->VR0HotGpio = gpio_params.ucVR0HotGpio;
2292 pp_table->VR0HotPolarity = gpio_params.ucVR0HotPolarity;
2293 pp_table->VR1HotGpio = gpio_params.ucVR1HotGpio;
2294 pp_table->VR1HotPolarity = gpio_params.ucVR1HotPolarity;
2295 } else {
2296 pp_table->VR0HotGpio = 0;
2297 pp_table->VR0HotPolarity = 0;
2298 pp_table->VR1HotGpio = 0;
2299 pp_table->VR1HotPolarity = 0;
2300 }
2301
dd5a6fe2
TSD
2302 if (PP_CAP(PHM_PlatformCaps_AutomaticDCTransition) &&
2303 data->registry_data.ac_dc_switch_gpio_support) {
f83a9991
EH
2304 pp_table->AcDcGpio = gpio_params.ucAcDcGpio;
2305 pp_table->AcDcPolarity = gpio_params.ucAcDcPolarity;
2306 } else {
2307 pp_table->AcDcGpio = 0;
2308 pp_table->AcDcPolarity = 0;
2309 }
2310 }
2311
2312 return result;
2313}
2314
2315static int vega10_avfs_enable(struct pp_hwmgr *hwmgr, bool enable)
2316{
690dc626 2317 struct vega10_hwmgr *data = hwmgr->backend;
f83a9991
EH
2318
2319 if (data->smu_features[GNLD_AVFS].supported) {
2320 if (enable) {
d3f8c0ab 2321 PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr,
f83a9991
EH
2322 true,
2323 data->smu_features[GNLD_AVFS].smu_feature_bitmap),
2324 "[avfs_control] Attempt to Enable AVFS feature Failed!",
2325 return -1);
2326 data->smu_features[GNLD_AVFS].enabled = true;
2327 } else {
d3f8c0ab 2328 PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr,
f83a9991 2329 false,
de196036 2330 data->smu_features[GNLD_AVFS].smu_feature_bitmap),
f83a9991
EH
2331 "[avfs_control] Attempt to Disable AVFS feature Failed!",
2332 return -1);
2333 data->smu_features[GNLD_AVFS].enabled = false;
2334 }
2335 }
2336
2337 return 0;
2338}
2339
ab5cf3a5
RZ
2340static int vega10_populate_and_upload_avfs_fuse_override(struct pp_hwmgr *hwmgr)
2341{
2342 int result = 0;
2343
2344 uint64_t serial_number = 0;
2345 uint32_t top32, bottom32;
2346 struct phm_fuses_default fuse;
2347
690dc626 2348 struct vega10_hwmgr *data = hwmgr->backend;
ab5cf3a5
RZ
2349 AvfsFuseOverride_t *avfs_fuse_table = &(data->smc_state_table.avfs_fuse_override_table);
2350
d3f8c0ab 2351 smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ReadSerialNumTop32);
3f9ca14a 2352 top32 = smum_get_argument(hwmgr);
ab5cf3a5 2353
d3f8c0ab 2354 smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ReadSerialNumBottom32);
3f9ca14a 2355 bottom32 = smum_get_argument(hwmgr);
ab5cf3a5
RZ
2356
2357 serial_number = ((uint64_t)bottom32 << 32) | top32;
2358
819c4b94 2359 if (pp_override_get_default_fuse_value(serial_number, &fuse) == 0) {
ab5cf3a5
RZ
2360 avfs_fuse_table->VFT0_b = fuse.VFT0_b;
2361 avfs_fuse_table->VFT0_m1 = fuse.VFT0_m1;
2362 avfs_fuse_table->VFT0_m2 = fuse.VFT0_m2;
2363 avfs_fuse_table->VFT1_b = fuse.VFT1_b;
2364 avfs_fuse_table->VFT1_m1 = fuse.VFT1_m1;
2365 avfs_fuse_table->VFT1_m2 = fuse.VFT1_m2;
2366 avfs_fuse_table->VFT2_b = fuse.VFT2_b;
2367 avfs_fuse_table->VFT2_m1 = fuse.VFT2_m1;
2368 avfs_fuse_table->VFT2_m2 = fuse.VFT2_m2;
3f9ca14a
RZ
2369 result = smum_smc_table_manager(hwmgr, (uint8_t *)avfs_fuse_table,
2370 AVFSFUSETABLE, false);
ab5cf3a5
RZ
2371 PP_ASSERT_WITH_CODE(!result,
2372 "Failed to upload FuseOVerride!",
2373 );
2374 }
2375
2376 return result;
2377}
2378
f83a9991
EH
2379/**
2380* Initializes the SMC table and uploads it
2381*
2382* @param hwmgr the address of the powerplay hardware manager.
2383* @param pInput the pointer to input data (PowerState)
2384* @return always 0
2385*/
2386static int vega10_init_smc_table(struct pp_hwmgr *hwmgr)
2387{
2388 int result;
690dc626 2389 struct vega10_hwmgr *data = hwmgr->backend;
f83a9991
EH
2390 struct phm_ppt_v2_information *table_info =
2391 (struct phm_ppt_v2_information *)(hwmgr->pptable);
2392 PPTable_t *pp_table = &(data->smc_state_table.pp_table);
2393 struct pp_atomfwctrl_voltage_table voltage_table;
05ee3215 2394 struct pp_atomfwctrl_bios_boot_up_values boot_up_values;
f83a9991
EH
2395
2396 result = vega10_setup_default_dpm_tables(hwmgr);
2397 PP_ASSERT_WITH_CODE(!result,
2398 "Failed to setup default DPM tables!",
2399 return result);
2400
2401 pp_atomfwctrl_get_voltage_table_v4(hwmgr, VOLTAGE_TYPE_VDDC,
2402 VOLTAGE_OBJ_SVID2, &voltage_table);
2403 pp_table->MaxVidStep = voltage_table.max_vid_step;
2404
2405 pp_table->GfxDpmVoltageMode =
2406 (uint8_t)(table_info->uc_gfx_dpm_voltage_mode);
2407 pp_table->SocDpmVoltageMode =
2408 (uint8_t)(table_info->uc_soc_dpm_voltage_mode);
2409 pp_table->UclkDpmVoltageMode =
2410 (uint8_t)(table_info->uc_uclk_dpm_voltage_mode);
2411 pp_table->UvdDpmVoltageMode =
2412 (uint8_t)(table_info->uc_uvd_dpm_voltage_mode);
2413 pp_table->VceDpmVoltageMode =
2414 (uint8_t)(table_info->uc_vce_dpm_voltage_mode);
2415 pp_table->Mp0DpmVoltageMode =
2416 (uint8_t)(table_info->uc_mp0_dpm_voltage_mode);
effa290c 2417
f83a9991
EH
2418 pp_table->DisplayDpmVoltageMode =
2419 (uint8_t)(table_info->uc_dcef_dpm_voltage_mode);
2420
becdaf3f
RZ
2421 data->vddc_voltage_table.psi0_enable = voltage_table.psi0_enable;
2422 data->vddc_voltage_table.psi1_enable = voltage_table.psi1_enable;
2423
f83a9991
EH
2424 if (data->registry_data.ulv_support &&
2425 table_info->us_ulv_voltage_offset) {
2426 result = vega10_populate_ulv_state(hwmgr);
2427 PP_ASSERT_WITH_CODE(!result,
2428 "Failed to initialize ULV state!",
2429 return result);
2430 }
2431
2432 result = vega10_populate_smc_link_levels(hwmgr);
2433 PP_ASSERT_WITH_CODE(!result,
2434 "Failed to initialize Link Level!",
2435 return result);
2436
2437 result = vega10_populate_all_graphic_levels(hwmgr);
2438 PP_ASSERT_WITH_CODE(!result,
2439 "Failed to initialize Graphics Level!",
2440 return result);
2441
2442 result = vega10_populate_all_memory_levels(hwmgr);
2443 PP_ASSERT_WITH_CODE(!result,
2444 "Failed to initialize Memory Level!",
2445 return result);
2446
2447 result = vega10_populate_all_display_clock_levels(hwmgr);
2448 PP_ASSERT_WITH_CODE(!result,
2449 "Failed to initialize Display Level!",
2450 return result);
2451
2452 result = vega10_populate_smc_vce_levels(hwmgr);
2453 PP_ASSERT_WITH_CODE(!result,
2454 "Failed to initialize VCE Level!",
2455 return result);
2456
2457 result = vega10_populate_smc_uvd_levels(hwmgr);
2458 PP_ASSERT_WITH_CODE(!result,
2459 "Failed to initialize UVD Level!",
2460 return result);
2461
afc0255c 2462 if (data->registry_data.clock_stretcher_support) {
f83a9991
EH
2463 result = vega10_populate_clock_stretcher_table(hwmgr);
2464 PP_ASSERT_WITH_CODE(!result,
2465 "Failed to populate Clock Stretcher Table!",
2466 return result);
2467 }
2468
05ee3215
RZ
2469 result = pp_atomfwctrl_get_vbios_bootup_values(hwmgr, &boot_up_values);
2470 if (!result) {
2471 data->vbios_boot_state.vddc = boot_up_values.usVddc;
2472 data->vbios_boot_state.vddci = boot_up_values.usVddci;
2473 data->vbios_boot_state.mvddc = boot_up_values.usMvddc;
2474 data->vbios_boot_state.gfx_clock = boot_up_values.ulGfxClk;
2475 data->vbios_boot_state.mem_clock = boot_up_values.ulUClk;
f73f9e35
RZ
2476 pp_atomfwctrl_get_clk_information_by_clkid(hwmgr,
2477 SMU9_SYSPLL0_SOCCLK_ID, &boot_up_values.ulSocClk);
2478
2479 pp_atomfwctrl_get_clk_information_by_clkid(hwmgr,
2480 SMU9_SYSPLL0_DCEFCLK_ID, &boot_up_values.ulDCEFClk);
2481
05ee3215 2482 data->vbios_boot_state.soc_clock = boot_up_values.ulSocClk;
c5b053d2 2483 data->vbios_boot_state.dcef_clock = boot_up_values.ulDCEFClk;
05ee3215 2484 if (0 != boot_up_values.usVddc) {
d3f8c0ab 2485 smum_send_msg_to_smc_with_parameter(hwmgr,
05ee3215
RZ
2486 PPSMC_MSG_SetFloorSocVoltage,
2487 (boot_up_values.usVddc * 4));
2488 data->vbios_boot_state.bsoc_vddc_lock = true;
2489 } else {
2490 data->vbios_boot_state.bsoc_vddc_lock = false;
2491 }
d3f8c0ab 2492 smum_send_msg_to_smc_with_parameter(hwmgr,
c5b053d2
RZ
2493 PPSMC_MSG_SetMinDeepSleepDcefclk,
2494 (uint32_t)(data->vbios_boot_state.dcef_clock / 100));
05ee3215
RZ
2495 }
2496
f83a9991
EH
2497 result = vega10_populate_avfs_parameters(hwmgr);
2498 PP_ASSERT_WITH_CODE(!result,
2499 "Failed to initialize AVFS Parameters!",
2500 return result);
2501
2502 result = vega10_populate_gpio_parameters(hwmgr);
2503 PP_ASSERT_WITH_CODE(!result,
2504 "Failed to initialize GPIO Parameters!",
2505 return result);
2506
2507 pp_table->GfxclkAverageAlpha = (uint8_t)
2508 (data->gfxclk_average_alpha);
2509 pp_table->SocclkAverageAlpha = (uint8_t)
2510 (data->socclk_average_alpha);
2511 pp_table->UclkAverageAlpha = (uint8_t)
2512 (data->uclk_average_alpha);
2513 pp_table->GfxActivityAverageAlpha = (uint8_t)
2514 (data->gfx_activity_average_alpha);
2515
ab5cf3a5
RZ
2516 vega10_populate_and_upload_avfs_fuse_override(hwmgr);
2517
3f9ca14a
RZ
2518 result = smum_smc_table_manager(hwmgr, (uint8_t *)pp_table, PPTABLE, false);
2519
f83a9991
EH
2520 PP_ASSERT_WITH_CODE(!result,
2521 "Failed to upload PPtable!", return result);
2522
2211a787
RZ
2523 result = vega10_avfs_enable(hwmgr, true);
2524 PP_ASSERT_WITH_CODE(!result, "Attempt to enable AVFS feature Failed!",
f83a9991 2525 return result);
bdb8cd10 2526 vega10_acg_enable(hwmgr);
d6c025d2 2527
f83a9991
EH
2528 return 0;
2529}
2530
2531static int vega10_enable_thermal_protection(struct pp_hwmgr *hwmgr)
2532{
690dc626 2533 struct vega10_hwmgr *data = hwmgr->backend;
f83a9991
EH
2534
2535 if (data->smu_features[GNLD_THERMAL].supported) {
2536 if (data->smu_features[GNLD_THERMAL].enabled)
2537 pr_info("THERMAL Feature Already enabled!");
2538
2539 PP_ASSERT_WITH_CODE(
d3f8c0ab 2540 !vega10_enable_smc_features(hwmgr,
f83a9991
EH
2541 true,
2542 data->smu_features[GNLD_THERMAL].smu_feature_bitmap),
2543 "Enable THERMAL Feature Failed!",
2544 return -1);
2545 data->smu_features[GNLD_THERMAL].enabled = true;
2546 }
2547
2548 return 0;
2549}
2550
8b9242ed
RZ
2551static int vega10_disable_thermal_protection(struct pp_hwmgr *hwmgr)
2552{
690dc626 2553 struct vega10_hwmgr *data = hwmgr->backend;
8b9242ed
RZ
2554
2555 if (data->smu_features[GNLD_THERMAL].supported) {
2556 if (!data->smu_features[GNLD_THERMAL].enabled)
2557 pr_info("THERMAL Feature Already disabled!");
2558
2559 PP_ASSERT_WITH_CODE(
d3f8c0ab 2560 !vega10_enable_smc_features(hwmgr,
8b9242ed
RZ
2561 false,
2562 data->smu_features[GNLD_THERMAL].smu_feature_bitmap),
2563 "disable THERMAL Feature Failed!",
2564 return -1);
2565 data->smu_features[GNLD_THERMAL].enabled = false;
2566 }
2567
2568 return 0;
2569}
2570
f83a9991
EH
2571static int vega10_enable_vrhot_feature(struct pp_hwmgr *hwmgr)
2572{
690dc626 2573 struct vega10_hwmgr *data = hwmgr->backend;
f83a9991 2574
dd5a6fe2 2575 if (PP_CAP(PHM_PlatformCaps_RegulatorHot)) {
f83a9991
EH
2576 if (data->smu_features[GNLD_VR0HOT].supported) {
2577 PP_ASSERT_WITH_CODE(
d3f8c0ab 2578 !vega10_enable_smc_features(hwmgr,
f83a9991
EH
2579 true,
2580 data->smu_features[GNLD_VR0HOT].smu_feature_bitmap),
2581 "Attempt to Enable VR0 Hot feature Failed!",
2582 return -1);
2583 data->smu_features[GNLD_VR0HOT].enabled = true;
2584 } else {
2585 if (data->smu_features[GNLD_VR1HOT].supported) {
2586 PP_ASSERT_WITH_CODE(
d3f8c0ab 2587 !vega10_enable_smc_features(hwmgr,
f83a9991
EH
2588 true,
2589 data->smu_features[GNLD_VR1HOT].smu_feature_bitmap),
2590 "Attempt to Enable VR0 Hot feature Failed!",
2591 return -1);
2592 data->smu_features[GNLD_VR1HOT].enabled = true;
2593 }
2594 }
2595 }
2596 return 0;
2597}
2598
2599static int vega10_enable_ulv(struct pp_hwmgr *hwmgr)
2600{
690dc626 2601 struct vega10_hwmgr *data = hwmgr->backend;
f83a9991
EH
2602
2603 if (data->registry_data.ulv_support) {
d3f8c0ab 2604 PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr,
f83a9991
EH
2605 true, data->smu_features[GNLD_ULV].smu_feature_bitmap),
2606 "Enable ULV Feature Failed!",
2607 return -1);
2608 data->smu_features[GNLD_ULV].enabled = true;
2609 }
2610
2611 return 0;
2612}
2613
4022e4f2
RZ
2614static int vega10_disable_ulv(struct pp_hwmgr *hwmgr)
2615{
690dc626 2616 struct vega10_hwmgr *data = hwmgr->backend;
4022e4f2
RZ
2617
2618 if (data->registry_data.ulv_support) {
d3f8c0ab 2619 PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr,
4022e4f2
RZ
2620 false, data->smu_features[GNLD_ULV].smu_feature_bitmap),
2621 "disable ULV Feature Failed!",
2622 return -EINVAL);
2623 data->smu_features[GNLD_ULV].enabled = false;
2624 }
2625
2626 return 0;
2627}
2628
f83a9991
EH
2629static int vega10_enable_deep_sleep_master_switch(struct pp_hwmgr *hwmgr)
2630{
690dc626 2631 struct vega10_hwmgr *data = hwmgr->backend;
f83a9991
EH
2632
2633 if (data->smu_features[GNLD_DS_GFXCLK].supported) {
d3f8c0ab 2634 PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr,
f83a9991
EH
2635 true, data->smu_features[GNLD_DS_GFXCLK].smu_feature_bitmap),
2636 "Attempt to Enable DS_GFXCLK Feature Failed!",
df057e02 2637 return -EINVAL);
f83a9991
EH
2638 data->smu_features[GNLD_DS_GFXCLK].enabled = true;
2639 }
2640
2641 if (data->smu_features[GNLD_DS_SOCCLK].supported) {
d3f8c0ab 2642 PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr,
f83a9991 2643 true, data->smu_features[GNLD_DS_SOCCLK].smu_feature_bitmap),
df057e02
RZ
2644 "Attempt to Enable DS_SOCCLK Feature Failed!",
2645 return -EINVAL);
f83a9991
EH
2646 data->smu_features[GNLD_DS_SOCCLK].enabled = true;
2647 }
2648
2649 if (data->smu_features[GNLD_DS_LCLK].supported) {
d3f8c0ab 2650 PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr,
f83a9991 2651 true, data->smu_features[GNLD_DS_LCLK].smu_feature_bitmap),
df057e02
RZ
2652 "Attempt to Enable DS_LCLK Feature Failed!",
2653 return -EINVAL);
f83a9991
EH
2654 data->smu_features[GNLD_DS_LCLK].enabled = true;
2655 }
2656
df057e02 2657 if (data->smu_features[GNLD_DS_DCEFCLK].supported) {
d3f8c0ab 2658 PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr,
df057e02
RZ
2659 true, data->smu_features[GNLD_DS_DCEFCLK].smu_feature_bitmap),
2660 "Attempt to Enable DS_DCEFCLK Feature Failed!",
2661 return -EINVAL);
2662 data->smu_features[GNLD_DS_DCEFCLK].enabled = true;
2663 }
2664
2665 return 0;
2666}
2667
2668static int vega10_disable_deep_sleep_master_switch(struct pp_hwmgr *hwmgr)
2669{
690dc626 2670 struct vega10_hwmgr *data = hwmgr->backend;
df057e02
RZ
2671
2672 if (data->smu_features[GNLD_DS_GFXCLK].supported) {
d3f8c0ab 2673 PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr,
df057e02
RZ
2674 false, data->smu_features[GNLD_DS_GFXCLK].smu_feature_bitmap),
2675 "Attempt to disable DS_GFXCLK Feature Failed!",
2676 return -EINVAL);
2677 data->smu_features[GNLD_DS_GFXCLK].enabled = false;
2678 }
2679
2680 if (data->smu_features[GNLD_DS_SOCCLK].supported) {
d3f8c0ab 2681 PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr,
df057e02
RZ
2682 false, data->smu_features[GNLD_DS_SOCCLK].smu_feature_bitmap),
2683 "Attempt to disable DS_ Feature Failed!",
2684 return -EINVAL);
2685 data->smu_features[GNLD_DS_SOCCLK].enabled = false;
2686 }
2687
2688 if (data->smu_features[GNLD_DS_LCLK].supported) {
d3f8c0ab 2689 PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr,
df057e02
RZ
2690 false, data->smu_features[GNLD_DS_LCLK].smu_feature_bitmap),
2691 "Attempt to disable DS_LCLK Feature Failed!",
2692 return -EINVAL);
2693 data->smu_features[GNLD_DS_LCLK].enabled = false;
2694 }
2695
2696 if (data->smu_features[GNLD_DS_DCEFCLK].supported) {
d3f8c0ab 2697 PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr,
df057e02
RZ
2698 false, data->smu_features[GNLD_DS_DCEFCLK].smu_feature_bitmap),
2699 "Attempt to disable DS_DCEFCLK Feature Failed!",
2700 return -EINVAL);
2701 data->smu_features[GNLD_DS_DCEFCLK].enabled = false;
2702 }
2703
f83a9991
EH
2704 return 0;
2705}
2706
8b9242ed
RZ
2707static int vega10_stop_dpm(struct pp_hwmgr *hwmgr, uint32_t bitmap)
2708{
690dc626 2709 struct vega10_hwmgr *data = hwmgr->backend;
8b9242ed
RZ
2710 uint32_t i, feature_mask = 0;
2711
2712
2713 if(data->smu_features[GNLD_LED_DISPLAY].supported == true){
d3f8c0ab 2714 PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr,
f06fed92
RZ
2715 false, data->smu_features[GNLD_LED_DISPLAY].smu_feature_bitmap),
2716 "Attempt to disable LED DPM feature failed!", return -EINVAL);
2717 data->smu_features[GNLD_LED_DISPLAY].enabled = false;
8b9242ed
RZ
2718 }
2719
2720 for (i = 0; i < GNLD_DPM_MAX; i++) {
2721 if (data->smu_features[i].smu_feature_bitmap & bitmap) {
2722 if (data->smu_features[i].supported) {
2723 if (data->smu_features[i].enabled) {
2724 feature_mask |= data->smu_features[i].
2725 smu_feature_bitmap;
2726 data->smu_features[i].enabled = false;
2727 }
2728 }
2729 }
2730 }
2731
d3f8c0ab 2732 vega10_enable_smc_features(hwmgr, false, feature_mask);
8b9242ed
RZ
2733
2734 return 0;
2735}
2736
f83a9991
EH
2737/**
2738 * @brief Tell SMC to enabled the supported DPMs.
2739 *
2740 * @param hwmgr - the address of the powerplay hardware manager.
2741 * @Param bitmap - bitmap for the features to enabled.
2742 * @return 0 on at least one DPM is successfully enabled.
2743 */
2744static int vega10_start_dpm(struct pp_hwmgr *hwmgr, uint32_t bitmap)
2745{
690dc626 2746 struct vega10_hwmgr *data = hwmgr->backend;
f83a9991
EH
2747 uint32_t i, feature_mask = 0;
2748
2749 for (i = 0; i < GNLD_DPM_MAX; i++) {
2750 if (data->smu_features[i].smu_feature_bitmap & bitmap) {
2751 if (data->smu_features[i].supported) {
2752 if (!data->smu_features[i].enabled) {
2753 feature_mask |= data->smu_features[i].
2754 smu_feature_bitmap;
2755 data->smu_features[i].enabled = true;
2756 }
2757 }
2758 }
2759 }
2760
d3f8c0ab 2761 if (vega10_enable_smc_features(hwmgr,
f83a9991
EH
2762 true, feature_mask)) {
2763 for (i = 0; i < GNLD_DPM_MAX; i++) {
2764 if (data->smu_features[i].smu_feature_bitmap &
2765 feature_mask)
2766 data->smu_features[i].enabled = false;
2767 }
2768 }
2769
2770 if(data->smu_features[GNLD_LED_DISPLAY].supported == true){
d3f8c0ab 2771 PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr,
f83a9991
EH
2772 true, data->smu_features[GNLD_LED_DISPLAY].smu_feature_bitmap),
2773 "Attempt to Enable LED DPM feature Failed!", return -EINVAL);
2774 data->smu_features[GNLD_LED_DISPLAY].enabled = true;
2775 }
2776
05ee3215 2777 if (data->vbios_boot_state.bsoc_vddc_lock) {
d3f8c0ab 2778 smum_send_msg_to_smc_with_parameter(hwmgr,
05ee3215
RZ
2779 PPSMC_MSG_SetFloorSocVoltage, 0);
2780 data->vbios_boot_state.bsoc_vddc_lock = false;
2781 }
2782
dd5a6fe2 2783 if (PP_CAP(PHM_PlatformCaps_Falcon_QuickTransition)) {
f83a9991 2784 if (data->smu_features[GNLD_ACDC].supported) {
d3f8c0ab 2785 PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr,
f83a9991
EH
2786 true, data->smu_features[GNLD_ACDC].smu_feature_bitmap),
2787 "Attempt to Enable DS_GFXCLK Feature Failed!",
2788 return -1);
2789 data->smu_features[GNLD_ACDC].enabled = true;
2790 }
2791 }
2792
2793 return 0;
2794}
2795
15826fbf
RZ
2796static int vega10_enable_disable_PCC_limit_feature(struct pp_hwmgr *hwmgr, bool enable)
2797{
690dc626 2798 struct vega10_hwmgr *data = hwmgr->backend;
15826fbf
RZ
2799
2800 if (data->smu_features[GNLD_PCC_LIMIT].supported) {
2801 if (enable == data->smu_features[GNLD_PCC_LIMIT].enabled)
2802 pr_info("GNLD_PCC_LIMIT has been %s \n", enable ? "enabled" : "disabled");
2803 PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr,
2804 enable, data->smu_features[GNLD_PCC_LIMIT].smu_feature_bitmap),
2805 "Attempt to Enable PCC Limit feature Failed!",
2806 return -EINVAL);
2807 data->smu_features[GNLD_PCC_LIMIT].enabled = enable;
2808 }
2809
2810 return 0;
2811}
2812
f83a9991
EH
2813static int vega10_enable_dpm_tasks(struct pp_hwmgr *hwmgr)
2814{
690dc626 2815 struct vega10_hwmgr *data = hwmgr->backend;
f83a9991
EH
2816 int tmp_result, result = 0;
2817
15826fbf
RZ
2818 vega10_enable_disable_PCC_limit_feature(hwmgr, true);
2819
d100033b 2820 if ((hwmgr->smu_version == 0x001c2c00) ||
d246cd53
RZ
2821 (hwmgr->smu_version == 0x001c2d00))
2822 smum_send_msg_to_smc_with_parameter(hwmgr,
b87079ec 2823 PPSMC_MSG_UpdatePkgPwrPidAlpha, 1);
b87079ec 2824
e21148ec
RZ
2825 smum_send_msg_to_smc_with_parameter(hwmgr,
2826 PPSMC_MSG_ConfigureTelemetry, data->config_telemetry);
2827
f83a9991
EH
2828 tmp_result = vega10_construct_voltage_tables(hwmgr);
2829 PP_ASSERT_WITH_CODE(!tmp_result,
2830 "Failed to contruct voltage tables!",
2831 result = tmp_result);
2832
2833 tmp_result = vega10_init_smc_table(hwmgr);
2834 PP_ASSERT_WITH_CODE(!tmp_result,
2835 "Failed to initialize SMC table!",
2836 result = tmp_result);
2837
dd5a6fe2 2838 if (PP_CAP(PHM_PlatformCaps_ThermalController)) {
f83a9991
EH
2839 tmp_result = vega10_enable_thermal_protection(hwmgr);
2840 PP_ASSERT_WITH_CODE(!tmp_result,
2841 "Failed to enable thermal protection!",
2842 result = tmp_result);
2843 }
2844
2845 tmp_result = vega10_enable_vrhot_feature(hwmgr);
2846 PP_ASSERT_WITH_CODE(!tmp_result,
2847 "Failed to enable VR hot feature!",
2848 result = tmp_result);
2849
f83a9991
EH
2850 tmp_result = vega10_enable_deep_sleep_master_switch(hwmgr);
2851 PP_ASSERT_WITH_CODE(!tmp_result,
2852 "Failed to enable deep sleep master switch!",
2853 result = tmp_result);
2854
2855 tmp_result = vega10_start_dpm(hwmgr, SMC_DPM_FEATURES);
2856 PP_ASSERT_WITH_CODE(!tmp_result,
2857 "Failed to start DPM!", result = tmp_result);
2858
9b7b8154
EQ
2859 /* enable didt, do not abort if failed didt */
2860 tmp_result = vega10_enable_didt_config(hwmgr);
2861 PP_ASSERT(!tmp_result,
2862 "Failed to enable didt config!");
2863
f83a9991
EH
2864 tmp_result = vega10_enable_power_containment(hwmgr);
2865 PP_ASSERT_WITH_CODE(!tmp_result,
2866 "Failed to enable power containment!",
2867 result = tmp_result);
2868
2869 tmp_result = vega10_power_control_set_level(hwmgr);
2870 PP_ASSERT_WITH_CODE(!tmp_result,
2871 "Failed to power control set level!",
2872 result = tmp_result);
2873
4022e4f2
RZ
2874 tmp_result = vega10_enable_ulv(hwmgr);
2875 PP_ASSERT_WITH_CODE(!tmp_result,
2876 "Failed to enable ULV!",
2877 result = tmp_result);
2878
f83a9991
EH
2879 return result;
2880}
2881
2882static int vega10_get_power_state_size(struct pp_hwmgr *hwmgr)
2883{
2884 return sizeof(struct vega10_power_state);
2885}
2886
2887static int vega10_get_pp_table_entry_callback_func(struct pp_hwmgr *hwmgr,
2888 void *state, struct pp_power_state *power_state,
2889 void *pp_table, uint32_t classification_flag)
2890{
ebc1c9c1 2891 ATOM_Vega10_GFXCLK_Dependency_Record_V2 *patom_record_V2;
f83a9991
EH
2892 struct vega10_power_state *vega10_power_state =
2893 cast_phw_vega10_power_state(&(power_state->hardware));
2894 struct vega10_performance_level *performance_level;
2895 ATOM_Vega10_State *state_entry = (ATOM_Vega10_State *)state;
2896 ATOM_Vega10_POWERPLAYTABLE *powerplay_table =
2897 (ATOM_Vega10_POWERPLAYTABLE *)pp_table;
2898 ATOM_Vega10_SOCCLK_Dependency_Table *socclk_dep_table =
2899 (ATOM_Vega10_SOCCLK_Dependency_Table *)
2900 (((unsigned long)powerplay_table) +
2901 le16_to_cpu(powerplay_table->usSocclkDependencyTableOffset));
2902 ATOM_Vega10_GFXCLK_Dependency_Table *gfxclk_dep_table =
2903 (ATOM_Vega10_GFXCLK_Dependency_Table *)
2904 (((unsigned long)powerplay_table) +
2905 le16_to_cpu(powerplay_table->usGfxclkDependencyTableOffset));
2906 ATOM_Vega10_MCLK_Dependency_Table *mclk_dep_table =
2907 (ATOM_Vega10_MCLK_Dependency_Table *)
2908 (((unsigned long)powerplay_table) +
2909 le16_to_cpu(powerplay_table->usMclkDependencyTableOffset));
2910
2911
2912 /* The following fields are not initialized here:
2913 * id orderedList allStatesList
2914 */
2915 power_state->classification.ui_label =
2916 (le16_to_cpu(state_entry->usClassification) &
2917 ATOM_PPLIB_CLASSIFICATION_UI_MASK) >>
2918 ATOM_PPLIB_CLASSIFICATION_UI_SHIFT;
2919 power_state->classification.flags = classification_flag;
2920 /* NOTE: There is a classification2 flag in BIOS
2921 * that is not being used right now
2922 */
2923 power_state->classification.temporary_state = false;
2924 power_state->classification.to_be_deleted = false;
2925
2926 power_state->validation.disallowOnDC =
2927 ((le32_to_cpu(state_entry->ulCapsAndSettings) &
2928 ATOM_Vega10_DISALLOW_ON_DC) != 0);
2929
2930 power_state->display.disableFrameModulation = false;
2931 power_state->display.limitRefreshrate = false;
2932 power_state->display.enableVariBright =
2933 ((le32_to_cpu(state_entry->ulCapsAndSettings) &
2934 ATOM_Vega10_ENABLE_VARIBRIGHT) != 0);
2935
2936 power_state->validation.supportedPowerLevels = 0;
2937 power_state->uvd_clocks.VCLK = 0;
2938 power_state->uvd_clocks.DCLK = 0;
2939 power_state->temperatures.min = 0;
2940 power_state->temperatures.max = 0;
2941
2942 performance_level = &(vega10_power_state->performance_levels
2943 [vega10_power_state->performance_level_count++]);
2944
2945 PP_ASSERT_WITH_CODE(
2946 (vega10_power_state->performance_level_count <
2947 NUM_GFXCLK_DPM_LEVELS),
2948 "Performance levels exceeds SMC limit!",
2949 return -1);
2950
2951 PP_ASSERT_WITH_CODE(
2952 (vega10_power_state->performance_level_count <=
2953 hwmgr->platform_descriptor.
2954 hardwareActivityPerformanceLevels),
2955 "Performance levels exceeds Driver limit!",
2956 return -1);
2957
2958 /* Performance levels are arranged from low to high. */
2959 performance_level->soc_clock = socclk_dep_table->entries
2960 [state_entry->ucSocClockIndexLow].ulClk;
2961 performance_level->gfx_clock = gfxclk_dep_table->entries
2962 [state_entry->ucGfxClockIndexLow].ulClk;
2963 performance_level->mem_clock = mclk_dep_table->entries
2964 [state_entry->ucMemClockIndexLow].ulMemClk;
2965
2966 performance_level = &(vega10_power_state->performance_levels
2967 [vega10_power_state->performance_level_count++]);
f83a9991 2968 performance_level->soc_clock = socclk_dep_table->entries
ebc1c9c1
RZ
2969 [state_entry->ucSocClockIndexHigh].ulClk;
2970 if (gfxclk_dep_table->ucRevId == 0) {
2971 performance_level->gfx_clock = gfxclk_dep_table->entries
f83a9991 2972 [state_entry->ucGfxClockIndexHigh].ulClk;
ebc1c9c1
RZ
2973 } else if (gfxclk_dep_table->ucRevId == 1) {
2974 patom_record_V2 = (ATOM_Vega10_GFXCLK_Dependency_Record_V2 *)gfxclk_dep_table->entries;
2975 performance_level->gfx_clock = patom_record_V2[state_entry->ucGfxClockIndexHigh].ulClk;
2976 }
2977
f83a9991
EH
2978 performance_level->mem_clock = mclk_dep_table->entries
2979 [state_entry->ucMemClockIndexHigh].ulMemClk;
2980 return 0;
2981}
2982
2983static int vega10_get_pp_table_entry(struct pp_hwmgr *hwmgr,
2984 unsigned long entry_index, struct pp_power_state *state)
2985{
2986 int result;
2987 struct vega10_power_state *ps;
2988
2989 state->hardware.magic = PhwVega10_Magic;
2990
2991 ps = cast_phw_vega10_power_state(&state->hardware);
2992
2993 result = vega10_get_powerplay_table_entry(hwmgr, entry_index, state,
2994 vega10_get_pp_table_entry_callback_func);
2995
2996 /*
2997 * This is the earliest time we have all the dependency table
2998 * and the VBIOS boot state
2999 */
3000 /* set DC compatible flag if this state supports DC */
3001 if (!state->validation.disallowOnDC)
3002 ps->dc_compatible = true;
3003
3004 ps->uvd_clks.vclk = state->uvd_clocks.VCLK;
3005 ps->uvd_clks.dclk = state->uvd_clocks.DCLK;
3006
3007 return 0;
3008}
3009
3010static int vega10_patch_boot_state(struct pp_hwmgr *hwmgr,
3011 struct pp_hw_power_state *hw_ps)
3012{
3013 return 0;
3014}
3015
3016static int vega10_apply_state_adjust_rules(struct pp_hwmgr *hwmgr,
3017 struct pp_power_state *request_ps,
3018 const struct pp_power_state *current_ps)
3019{
3020 struct vega10_power_state *vega10_ps =
3021 cast_phw_vega10_power_state(&request_ps->hardware);
3022 uint32_t sclk;
3023 uint32_t mclk;
3024 struct PP_Clocks minimum_clocks = {0};
3025 bool disable_mclk_switching;
3026 bool disable_mclk_switching_for_frame_lock;
3027 bool disable_mclk_switching_for_vr;
3028 bool force_mclk_high;
f83a9991
EH
3029 const struct phm_clock_and_voltage_limits *max_limits;
3030 uint32_t i;
690dc626 3031 struct vega10_hwmgr *data = hwmgr->backend;
f83a9991
EH
3032 struct phm_ppt_v2_information *table_info =
3033 (struct phm_ppt_v2_information *)(hwmgr->pptable);
3034 int32_t count;
3035 uint32_t stable_pstate_sclk_dpm_percentage;
3036 uint32_t stable_pstate_sclk = 0, stable_pstate_mclk = 0;
3037 uint32_t latency;
3038
3039 data->battery_state = (PP_StateUILabel_Battery ==
3040 request_ps->classification.ui_label);
3041
3042 if (vega10_ps->performance_level_count != 2)
3043 pr_info("VI should always have 2 performance levels");
3044
3045 max_limits = (PP_PowerSource_AC == hwmgr->power_source) ?
3046 &(hwmgr->dyn_state.max_clock_voltage_on_ac) :
3047 &(hwmgr->dyn_state.max_clock_voltage_on_dc);
3048
3049 /* Cap clock DPM tables at DC MAX if it is in DC. */
3050 if (PP_PowerSource_DC == hwmgr->power_source) {
3051 for (i = 0; i < vega10_ps->performance_level_count; i++) {
3052 if (vega10_ps->performance_levels[i].mem_clock >
3053 max_limits->mclk)
3054 vega10_ps->performance_levels[i].mem_clock =
3055 max_limits->mclk;
3056 if (vega10_ps->performance_levels[i].gfx_clock >
3057 max_limits->sclk)
3058 vega10_ps->performance_levels[i].gfx_clock =
3059 max_limits->sclk;
3060 }
3061 }
3062
f83a9991 3063 /* result = PHM_CheckVBlankTime(hwmgr, &vblankTooShort);*/
555fd70c
RZ
3064 minimum_clocks.engineClock = hwmgr->display_config->min_core_set_clock;
3065 minimum_clocks.memoryClock = hwmgr->display_config->min_mem_set_clock;
f83a9991 3066
dd5a6fe2 3067 if (PP_CAP(PHM_PlatformCaps_StablePState)) {
0d1da3c1
NI
3068 stable_pstate_sclk_dpm_percentage =
3069 data->registry_data.stable_pstate_sclk_dpm_percentage;
f83a9991
EH
3070 PP_ASSERT_WITH_CODE(
3071 data->registry_data.stable_pstate_sclk_dpm_percentage >= 1 &&
3072 data->registry_data.stable_pstate_sclk_dpm_percentage <= 100,
3073 "percent sclk value must range from 1% to 100%, setting default value",
3074 stable_pstate_sclk_dpm_percentage = 75);
3075
3076 max_limits = &(hwmgr->dyn_state.max_clock_voltage_on_ac);
3077 stable_pstate_sclk = (max_limits->sclk *
3078 stable_pstate_sclk_dpm_percentage) / 100;
3079
3080 for (count = table_info->vdd_dep_on_sclk->count - 1;
3081 count >= 0; count--) {
3082 if (stable_pstate_sclk >=
3083 table_info->vdd_dep_on_sclk->entries[count].clk) {
3084 stable_pstate_sclk =
3085 table_info->vdd_dep_on_sclk->entries[count].clk;
3086 break;
3087 }
3088 }
3089
3090 if (count < 0)
3091 stable_pstate_sclk = table_info->vdd_dep_on_sclk->entries[0].clk;
3092
3093 stable_pstate_mclk = max_limits->mclk;
3094
3095 minimum_clocks.engineClock = stable_pstate_sclk;
3096 minimum_clocks.memoryClock = stable_pstate_mclk;
3097 }
3098
6ce2d46c
AD
3099 disable_mclk_switching_for_frame_lock =
3100 PP_CAP(PHM_PlatformCaps_DisableMclkSwitchingForFrameLock);
3101 disable_mclk_switching_for_vr =
3102 PP_CAP(PHM_PlatformCaps_DisableMclkSwitchForVR);
dd5a6fe2 3103 force_mclk_high = PP_CAP(PHM_PlatformCaps_ForceMclkHigh);
f83a9991 3104
555fd70c 3105 if (hwmgr->display_config->num_display == 0)
d6bca7e7
AD
3106 disable_mclk_switching = false;
3107 else
555fd70c 3108 disable_mclk_switching = (hwmgr->display_config->num_display > 1) ||
d6bca7e7
AD
3109 disable_mclk_switching_for_frame_lock ||
3110 disable_mclk_switching_for_vr ||
3111 force_mclk_high;
f83a9991
EH
3112
3113 sclk = vega10_ps->performance_levels[0].gfx_clock;
3114 mclk = vega10_ps->performance_levels[0].mem_clock;
3115
3116 if (sclk < minimum_clocks.engineClock)
3117 sclk = (minimum_clocks.engineClock > max_limits->sclk) ?
3118 max_limits->sclk : minimum_clocks.engineClock;
3119
3120 if (mclk < minimum_clocks.memoryClock)
3121 mclk = (minimum_clocks.memoryClock > max_limits->mclk) ?
3122 max_limits->mclk : minimum_clocks.memoryClock;
3123
3124 vega10_ps->performance_levels[0].gfx_clock = sclk;
3125 vega10_ps->performance_levels[0].mem_clock = mclk;
3126
d0856f3a
RZ
3127 if (vega10_ps->performance_levels[1].gfx_clock <
3128 vega10_ps->performance_levels[0].gfx_clock)
3129 vega10_ps->performance_levels[0].gfx_clock =
3130 vega10_ps->performance_levels[1].gfx_clock;
f83a9991
EH
3131
3132 if (disable_mclk_switching) {
3133 /* Set Mclk the max of level 0 and level 1 */
3134 if (mclk < vega10_ps->performance_levels[1].mem_clock)
3135 mclk = vega10_ps->performance_levels[1].mem_clock;
3136
3137 /* Find the lowest MCLK frequency that is within
3138 * the tolerable latency defined in DAL
3139 */
3140 latency = 0;
3141 for (i = 0; i < data->mclk_latency_table.count; i++) {
3142 if ((data->mclk_latency_table.entries[i].latency <= latency) &&
3143 (data->mclk_latency_table.entries[i].frequency >=
3144 vega10_ps->performance_levels[0].mem_clock) &&
3145 (data->mclk_latency_table.entries[i].frequency <=
3146 vega10_ps->performance_levels[1].mem_clock))
3147 mclk = data->mclk_latency_table.entries[i].frequency;
3148 }
3149 vega10_ps->performance_levels[0].mem_clock = mclk;
3150 } else {
3151 if (vega10_ps->performance_levels[1].mem_clock <
3152 vega10_ps->performance_levels[0].mem_clock)
d0856f3a
RZ
3153 vega10_ps->performance_levels[0].mem_clock =
3154 vega10_ps->performance_levels[1].mem_clock;
f83a9991
EH
3155 }
3156
dd5a6fe2 3157 if (PP_CAP(PHM_PlatformCaps_StablePState)) {
f83a9991
EH
3158 for (i = 0; i < vega10_ps->performance_level_count; i++) {
3159 vega10_ps->performance_levels[i].gfx_clock = stable_pstate_sclk;
3160 vega10_ps->performance_levels[i].mem_clock = stable_pstate_mclk;
3161 }
3162 }
3163
3164 return 0;
3165}
3166
3167static int vega10_find_dpm_states_clocks_in_dpm_table(struct pp_hwmgr *hwmgr, const void *input)
3168{
3169 const struct phm_set_power_state_input *states =
3170 (const struct phm_set_power_state_input *)input;
3171 const struct vega10_power_state *vega10_ps =
3172 cast_const_phw_vega10_power_state(states->pnew_state);
690dc626 3173 struct vega10_hwmgr *data = hwmgr->backend;
f83a9991
EH
3174 struct vega10_single_dpm_table *sclk_table =
3175 &(data->dpm_table.gfx_table);
3176 uint32_t sclk = vega10_ps->performance_levels
3177 [vega10_ps->performance_level_count - 1].gfx_clock;
3178 struct vega10_single_dpm_table *mclk_table =
3179 &(data->dpm_table.mem_table);
3180 uint32_t mclk = vega10_ps->performance_levels
3181 [vega10_ps->performance_level_count - 1].mem_clock;
3182 struct PP_Clocks min_clocks = {0};
3183 uint32_t i;
f83a9991
EH
3184
3185 data->need_update_dpm_table = 0;
3186
dd5a6fe2
TSD
3187 if (PP_CAP(PHM_PlatformCaps_ODNinACSupport) ||
3188 PP_CAP(PHM_PlatformCaps_ODNinDCSupport)) {
f83a9991
EH
3189 for (i = 0; i < sclk_table->count; i++) {
3190 if (sclk == sclk_table->dpm_levels[i].value)
3191 break;
3192 }
3193
3194 if (!(data->apply_overdrive_next_settings_mask &
3195 DPMTABLE_OD_UPDATE_SCLK) && i >= sclk_table->count) {
3196 /* Check SCLK in DAL's minimum clocks
3197 * in case DeepSleep divider update is required.
3198 */
3199 if (data->display_timing.min_clock_in_sr !=
3200 min_clocks.engineClockInSR &&
3201 (min_clocks.engineClockInSR >=
3202 VEGA10_MINIMUM_ENGINE_CLOCK ||
3203 data->display_timing.min_clock_in_sr >=
3204 VEGA10_MINIMUM_ENGINE_CLOCK))
3205 data->need_update_dpm_table |= DPMTABLE_UPDATE_SCLK;
3206 }
3207
f83a9991 3208 if (data->display_timing.num_existing_displays !=
555fd70c 3209 hwmgr->display_config->num_display)
f83a9991
EH
3210 data->need_update_dpm_table |= DPMTABLE_UPDATE_MCLK;
3211 } else {
3212 for (i = 0; i < sclk_table->count; i++) {
3213 if (sclk == sclk_table->dpm_levels[i].value)
3214 break;
3215 }
3216
3217 if (i >= sclk_table->count)
3218 data->need_update_dpm_table |= DPMTABLE_OD_UPDATE_SCLK;
3219 else {
3220 /* Check SCLK in DAL's minimum clocks
3221 * in case DeepSleep divider update is required.
3222 */
3223 if (data->display_timing.min_clock_in_sr !=
3224 min_clocks.engineClockInSR &&
3225 (min_clocks.engineClockInSR >=
3226 VEGA10_MINIMUM_ENGINE_CLOCK ||
3227 data->display_timing.min_clock_in_sr >=
3228 VEGA10_MINIMUM_ENGINE_CLOCK))
3229 data->need_update_dpm_table |= DPMTABLE_UPDATE_SCLK;
3230 }
3231
3232 for (i = 0; i < mclk_table->count; i++) {
3233 if (mclk == mclk_table->dpm_levels[i].value)
3234 break;
3235 }
3236
f83a9991
EH
3237 if (i >= mclk_table->count)
3238 data->need_update_dpm_table |= DPMTABLE_OD_UPDATE_MCLK;
3239
3240 if (data->display_timing.num_existing_displays !=
555fd70c 3241 hwmgr->display_config->num_display ||
f83a9991
EH
3242 i >= mclk_table->count)
3243 data->need_update_dpm_table |= DPMTABLE_UPDATE_MCLK;
3244 }
3245 return 0;
3246}
3247
3248static int vega10_populate_and_upload_sclk_mclk_dpm_levels(
3249 struct pp_hwmgr *hwmgr, const void *input)
3250{
3251 int result = 0;
3252 const struct phm_set_power_state_input *states =
3253 (const struct phm_set_power_state_input *)input;
3254 const struct vega10_power_state *vega10_ps =
3255 cast_const_phw_vega10_power_state(states->pnew_state);
690dc626 3256 struct vega10_hwmgr *data = hwmgr->backend;
f83a9991
EH
3257 uint32_t sclk = vega10_ps->performance_levels
3258 [vega10_ps->performance_level_count - 1].gfx_clock;
3259 uint32_t mclk = vega10_ps->performance_levels
3260 [vega10_ps->performance_level_count - 1].mem_clock;
3261 struct vega10_dpm_table *dpm_table = &data->dpm_table;
3262 struct vega10_dpm_table *golden_dpm_table =
3263 &data->golden_dpm_table;
3264 uint32_t dpm_count, clock_percent;
3265 uint32_t i;
3266
dd5a6fe2
TSD
3267 if (PP_CAP(PHM_PlatformCaps_ODNinACSupport) ||
3268 PP_CAP(PHM_PlatformCaps_ODNinDCSupport)) {
f83a9991
EH
3269
3270 if (!data->need_update_dpm_table &&
3271 !data->apply_optimized_settings &&
3272 !data->apply_overdrive_next_settings_mask)
3273 return 0;
3274
3275 if (data->apply_overdrive_next_settings_mask &
3276 DPMTABLE_OD_UPDATE_SCLK) {
3277 for (dpm_count = 0;
3278 dpm_count < dpm_table->gfx_table.count;
3279 dpm_count++) {
3280 dpm_table->gfx_table.dpm_levels[dpm_count].enabled =
4efe9b47 3281 data->odn_dpm_table.odn_core_clock_dpm_levels.entries[dpm_count].enabled;
f83a9991 3282 dpm_table->gfx_table.dpm_levels[dpm_count].value =
4efe9b47 3283 data->odn_dpm_table.odn_core_clock_dpm_levels.entries[dpm_count].clock;
f83a9991
EH
3284 }
3285 }
3286
3287 if (data->apply_overdrive_next_settings_mask &
3288 DPMTABLE_OD_UPDATE_MCLK) {
3289 for (dpm_count = 0;
3290 dpm_count < dpm_table->mem_table.count;
3291 dpm_count++) {
3292 dpm_table->mem_table.dpm_levels[dpm_count].enabled =
4efe9b47 3293 data->odn_dpm_table.odn_memory_clock_dpm_levels.entries[dpm_count].enabled;
f83a9991 3294 dpm_table->mem_table.dpm_levels[dpm_count].value =
4efe9b47 3295 data->odn_dpm_table.odn_memory_clock_dpm_levels.entries[dpm_count].clock;
f83a9991
EH
3296 }
3297 }
3298
3299 if ((data->need_update_dpm_table & DPMTABLE_UPDATE_SCLK) ||
3300 data->apply_optimized_settings ||
3301 (data->apply_overdrive_next_settings_mask &
3302 DPMTABLE_OD_UPDATE_SCLK)) {
3303 result = vega10_populate_all_graphic_levels(hwmgr);
3304 PP_ASSERT_WITH_CODE(!result,
4f42a2dd 3305 "Failed to populate SCLK during PopulateNewDPMClocksStates Function!",
f83a9991
EH
3306 return result);
3307 }
3308
3309 if ((data->need_update_dpm_table & DPMTABLE_UPDATE_MCLK) ||
3310 (data->apply_overdrive_next_settings_mask &
3311 DPMTABLE_OD_UPDATE_MCLK)){
3312 result = vega10_populate_all_memory_levels(hwmgr);
3313 PP_ASSERT_WITH_CODE(!result,
4f42a2dd 3314 "Failed to populate MCLK during PopulateNewDPMClocksStates Function!",
f83a9991
EH
3315 return result);
3316 }
3317 } else {
3318 if (!data->need_update_dpm_table &&
3319 !data->apply_optimized_settings)
3320 return 0;
3321
3322 if (data->need_update_dpm_table & DPMTABLE_OD_UPDATE_SCLK &&
3323 data->smu_features[GNLD_DPM_GFXCLK].supported) {
3324 dpm_table->
3325 gfx_table.dpm_levels[dpm_table->gfx_table.count - 1].
3326 value = sclk;
11f64ff5 3327 if (hwmgr->od_enabled) {
f83a9991
EH
3328 /* Need to do calculation based on the golden DPM table
3329 * as the Heatmap GPU Clock axis is also based on
3330 * the default values
3331 */
3332 PP_ASSERT_WITH_CODE(
3333 golden_dpm_table->gfx_table.dpm_levels
3334 [golden_dpm_table->gfx_table.count - 1].value,
3335 "Divide by 0!",
3336 return -1);
3337
3338 dpm_count = dpm_table->gfx_table.count < 2 ?
3339 0 : dpm_table->gfx_table.count - 2;
3340 for (i = dpm_count; i > 1; i--) {
3341 if (sclk > golden_dpm_table->gfx_table.dpm_levels
3342 [golden_dpm_table->gfx_table.count - 1].value) {
3343 clock_percent =
3344 ((sclk - golden_dpm_table->gfx_table.dpm_levels
3345 [golden_dpm_table->gfx_table.count - 1].value) *
3346 100) /
3347 golden_dpm_table->gfx_table.dpm_levels
3348 [golden_dpm_table->gfx_table.count - 1].value;
3349
3350 dpm_table->gfx_table.dpm_levels[i].value =
3351 golden_dpm_table->gfx_table.dpm_levels[i].value +
3352 (golden_dpm_table->gfx_table.dpm_levels[i].value *
3353 clock_percent) / 100;
3354 } else if (golden_dpm_table->
3355 gfx_table.dpm_levels[dpm_table->gfx_table.count-1].value >
3356 sclk) {
3357 clock_percent =
3358 ((golden_dpm_table->gfx_table.dpm_levels
3359 [golden_dpm_table->gfx_table.count - 1].value -
3360 sclk) * 100) /
3361 golden_dpm_table->gfx_table.dpm_levels
3362 [golden_dpm_table->gfx_table.count-1].value;
3363
3364 dpm_table->gfx_table.dpm_levels[i].value =
3365 golden_dpm_table->gfx_table.dpm_levels[i].value -
3366 (golden_dpm_table->gfx_table.dpm_levels[i].value *
3367 clock_percent) / 100;
3368 } else
3369 dpm_table->gfx_table.dpm_levels[i].value =
3370 golden_dpm_table->gfx_table.dpm_levels[i].value;
3371 }
3372 }
3373 }
3374
3375 if (data->need_update_dpm_table & DPMTABLE_OD_UPDATE_MCLK &&
3376 data->smu_features[GNLD_DPM_UCLK].supported) {
3377 dpm_table->
3378 mem_table.dpm_levels[dpm_table->mem_table.count - 1].
3379 value = mclk;
3380
11f64ff5 3381 if (hwmgr->od_enabled) {
f83a9991
EH
3382 PP_ASSERT_WITH_CODE(
3383 golden_dpm_table->mem_table.dpm_levels
3384 [golden_dpm_table->mem_table.count - 1].value,
3385 "Divide by 0!",
3386 return -1);
3387
3388 dpm_count = dpm_table->mem_table.count < 2 ?
3389 0 : dpm_table->mem_table.count - 2;
3390 for (i = dpm_count; i > 1; i--) {
3391 if (mclk > golden_dpm_table->mem_table.dpm_levels
3392 [golden_dpm_table->mem_table.count-1].value) {
3393 clock_percent = ((mclk -
3394 golden_dpm_table->mem_table.dpm_levels
3395 [golden_dpm_table->mem_table.count-1].value) *
3396 100) /
3397 golden_dpm_table->mem_table.dpm_levels
3398 [golden_dpm_table->mem_table.count-1].value;
3399
3400 dpm_table->mem_table.dpm_levels[i].value =
3401 golden_dpm_table->mem_table.dpm_levels[i].value +
3402 (golden_dpm_table->mem_table.dpm_levels[i].value *
3403 clock_percent) / 100;
3404 } else if (golden_dpm_table->mem_table.dpm_levels
3405 [dpm_table->mem_table.count-1].value > mclk) {
3406 clock_percent = ((golden_dpm_table->mem_table.dpm_levels
3407 [golden_dpm_table->mem_table.count-1].value - mclk) *
3408 100) /
3409 golden_dpm_table->mem_table.dpm_levels
3410 [golden_dpm_table->mem_table.count-1].value;
3411
3412 dpm_table->mem_table.dpm_levels[i].value =
3413 golden_dpm_table->mem_table.dpm_levels[i].value -
3414 (golden_dpm_table->mem_table.dpm_levels[i].value *
3415 clock_percent) / 100;
3416 } else
3417 dpm_table->mem_table.dpm_levels[i].value =
3418 golden_dpm_table->mem_table.dpm_levels[i].value;
3419 }
3420 }
3421 }
3422
3423 if ((data->need_update_dpm_table &
3424 (DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_UPDATE_SCLK)) ||
3425 data->apply_optimized_settings) {
3426 result = vega10_populate_all_graphic_levels(hwmgr);
3427 PP_ASSERT_WITH_CODE(!result,
4f42a2dd 3428 "Failed to populate SCLK during PopulateNewDPMClocksStates Function!",
f83a9991
EH
3429 return result);
3430 }
3431
3432 if (data->need_update_dpm_table &
3433 (DPMTABLE_OD_UPDATE_MCLK + DPMTABLE_UPDATE_MCLK)) {
3434 result = vega10_populate_all_memory_levels(hwmgr);
3435 PP_ASSERT_WITH_CODE(!result,
4f42a2dd 3436 "Failed to populate MCLK during PopulateNewDPMClocksStates Function!",
f83a9991
EH
3437 return result);
3438 }
3439 }
f83a9991
EH
3440 return result;
3441}
3442
3443static int vega10_trim_single_dpm_states(struct pp_hwmgr *hwmgr,
3444 struct vega10_single_dpm_table *dpm_table,
3445 uint32_t low_limit, uint32_t high_limit)
3446{
3447 uint32_t i;
3448
3449 for (i = 0; i < dpm_table->count; i++) {
3450 if ((dpm_table->dpm_levels[i].value < low_limit) ||
3451 (dpm_table->dpm_levels[i].value > high_limit))
3452 dpm_table->dpm_levels[i].enabled = false;
3453 else
3454 dpm_table->dpm_levels[i].enabled = true;
3455 }
3456 return 0;
3457}
3458
3459static int vega10_trim_single_dpm_states_with_mask(struct pp_hwmgr *hwmgr,
3460 struct vega10_single_dpm_table *dpm_table,
3461 uint32_t low_limit, uint32_t high_limit,
3462 uint32_t disable_dpm_mask)
3463{
3464 uint32_t i;
3465
3466 for (i = 0; i < dpm_table->count; i++) {
3467 if ((dpm_table->dpm_levels[i].value < low_limit) ||
3468 (dpm_table->dpm_levels[i].value > high_limit))
3469 dpm_table->dpm_levels[i].enabled = false;
3470 else if (!((1 << i) & disable_dpm_mask))
3471 dpm_table->dpm_levels[i].enabled = false;
3472 else
3473 dpm_table->dpm_levels[i].enabled = true;
3474 }
3475 return 0;
3476}
3477
3478static int vega10_trim_dpm_states(struct pp_hwmgr *hwmgr,
3479 const struct vega10_power_state *vega10_ps)
3480{
690dc626 3481 struct vega10_hwmgr *data = hwmgr->backend;
f83a9991
EH
3482 uint32_t high_limit_count;
3483
3484 PP_ASSERT_WITH_CODE((vega10_ps->performance_level_count >= 1),
3485 "power state did not have any performance level",
3486 return -1);
3487
3488 high_limit_count = (vega10_ps->performance_level_count == 1) ? 0 : 1;
3489
3490 vega10_trim_single_dpm_states(hwmgr,
3491 &(data->dpm_table.soc_table),
3492 vega10_ps->performance_levels[0].soc_clock,
3493 vega10_ps->performance_levels[high_limit_count].soc_clock);
3494
3495 vega10_trim_single_dpm_states_with_mask(hwmgr,
3496 &(data->dpm_table.gfx_table),
3497 vega10_ps->performance_levels[0].gfx_clock,
3498 vega10_ps->performance_levels[high_limit_count].gfx_clock,
3499 data->disable_dpm_mask);
3500
3501 vega10_trim_single_dpm_states(hwmgr,
3502 &(data->dpm_table.mem_table),
3503 vega10_ps->performance_levels[0].mem_clock,
3504 vega10_ps->performance_levels[high_limit_count].mem_clock);
3505
3506 return 0;
3507}
3508
3509static uint32_t vega10_find_lowest_dpm_level(
3510 struct vega10_single_dpm_table *table)
3511{
3512 uint32_t i;
3513
3514 for (i = 0; i < table->count; i++) {
3515 if (table->dpm_levels[i].enabled)
3516 break;
3517 }
3518
3519 return i;
3520}
3521
3522static uint32_t vega10_find_highest_dpm_level(
3523 struct vega10_single_dpm_table *table)
3524{
3525 uint32_t i = 0;
3526
3527 if (table->count <= MAX_REGULAR_DPM_NUMBER) {
3528 for (i = table->count; i > 0; i--) {
3529 if (table->dpm_levels[i - 1].enabled)
3530 return i - 1;
3531 }
3532 } else {
3533 pr_info("DPM Table Has Too Many Entries!");
3534 return MAX_REGULAR_DPM_NUMBER - 1;
3535 }
3536
3537 return i;
3538}
3539
3540static void vega10_apply_dal_minimum_voltage_request(
3541 struct pp_hwmgr *hwmgr)
3542{
3543 return;
3544}
3545
3d4d4fd0
RZ
3546static int vega10_get_soc_index_for_max_uclk(struct pp_hwmgr *hwmgr)
3547{
3548 struct phm_ppt_v1_clock_voltage_dependency_table *vdd_dep_table_on_mclk;
3549 struct phm_ppt_v2_information *table_info =
3550 (struct phm_ppt_v2_information *)(hwmgr->pptable);
3551
3552 vdd_dep_table_on_mclk = table_info->vdd_dep_on_mclk;
3553
3554 return vdd_dep_table_on_mclk->entries[NUM_UCLK_DPM_LEVELS - 1].vddInd + 1;
3555}
3556
f83a9991
EH
3557static int vega10_upload_dpm_bootup_level(struct pp_hwmgr *hwmgr)
3558{
690dc626 3559 struct vega10_hwmgr *data = hwmgr->backend;
3d4d4fd0 3560 uint32_t socclk_idx;
f83a9991
EH
3561
3562 vega10_apply_dal_minimum_voltage_request(hwmgr);
3563
3564 if (!data->registry_data.sclk_dpm_key_disabled) {
3565 if (data->smc_state_table.gfx_boot_level !=
3566 data->dpm_table.gfx_table.dpm_state.soft_min_level) {
d246cd53 3567 smum_send_msg_to_smc_with_parameter(hwmgr,
f83a9991 3568 PPSMC_MSG_SetSoftMinGfxclkByIndex,
d246cd53 3569 data->smc_state_table.gfx_boot_level);
f83a9991
EH
3570 data->dpm_table.gfx_table.dpm_state.soft_min_level =
3571 data->smc_state_table.gfx_boot_level;
3572 }
3573 }
3574
3575 if (!data->registry_data.mclk_dpm_key_disabled) {
3576 if (data->smc_state_table.mem_boot_level !=
3577 data->dpm_table.mem_table.dpm_state.soft_min_level) {
3d4d4fd0
RZ
3578 if (data->smc_state_table.mem_boot_level == NUM_UCLK_DPM_LEVELS - 1) {
3579 socclk_idx = vega10_get_soc_index_for_max_uclk(hwmgr);
d246cd53 3580 smum_send_msg_to_smc_with_parameter(hwmgr,
3d4d4fd0 3581 PPSMC_MSG_SetSoftMinSocclkByIndex,
d246cd53 3582 socclk_idx);
3d4d4fd0 3583 } else {
d246cd53 3584 smum_send_msg_to_smc_with_parameter(hwmgr,
3d4d4fd0 3585 PPSMC_MSG_SetSoftMinUclkByIndex,
d246cd53 3586 data->smc_state_table.mem_boot_level);
3d4d4fd0 3587 }
f83a9991
EH
3588 data->dpm_table.mem_table.dpm_state.soft_min_level =
3589 data->smc_state_table.mem_boot_level;
3590 }
3591 }
3592
3593 return 0;
3594}
3595
3596static int vega10_upload_dpm_max_level(struct pp_hwmgr *hwmgr)
3597{
690dc626 3598 struct vega10_hwmgr *data = hwmgr->backend;
f83a9991
EH
3599
3600 vega10_apply_dal_minimum_voltage_request(hwmgr);
3601
3602 if (!data->registry_data.sclk_dpm_key_disabled) {
3603 if (data->smc_state_table.gfx_max_level !=
d246cd53
RZ
3604 data->dpm_table.gfx_table.dpm_state.soft_max_level) {
3605 smum_send_msg_to_smc_with_parameter(hwmgr,
f83a9991 3606 PPSMC_MSG_SetSoftMaxGfxclkByIndex,
d246cd53 3607 data->smc_state_table.gfx_max_level);
f83a9991
EH
3608 data->dpm_table.gfx_table.dpm_state.soft_max_level =
3609 data->smc_state_table.gfx_max_level;
3610 }
3611 }
3612
3613 if (!data->registry_data.mclk_dpm_key_disabled) {
3614 if (data->smc_state_table.mem_max_level !=
d246cd53
RZ
3615 data->dpm_table.mem_table.dpm_state.soft_max_level) {
3616 smum_send_msg_to_smc_with_parameter(hwmgr,
3617 PPSMC_MSG_SetSoftMaxUclkByIndex,
3618 data->smc_state_table.mem_max_level);
f83a9991
EH
3619 data->dpm_table.mem_table.dpm_state.soft_max_level =
3620 data->smc_state_table.mem_max_level;
3621 }
3622 }
3623
3624 return 0;
3625}
3626
3627static int vega10_generate_dpm_level_enable_mask(
3628 struct pp_hwmgr *hwmgr, const void *input)
3629{
690dc626 3630 struct vega10_hwmgr *data = hwmgr->backend;
f83a9991
EH
3631 const struct phm_set_power_state_input *states =
3632 (const struct phm_set_power_state_input *)input;
3633 const struct vega10_power_state *vega10_ps =
3634 cast_const_phw_vega10_power_state(states->pnew_state);
3635 int i;
3636
3637 PP_ASSERT_WITH_CODE(!vega10_trim_dpm_states(hwmgr, vega10_ps),
3638 "Attempt to Trim DPM States Failed!",
3639 return -1);
3640
3641 data->smc_state_table.gfx_boot_level =
3642 vega10_find_lowest_dpm_level(&(data->dpm_table.gfx_table));
3643 data->smc_state_table.gfx_max_level =
3644 vega10_find_highest_dpm_level(&(data->dpm_table.gfx_table));
3645 data->smc_state_table.mem_boot_level =
3646 vega10_find_lowest_dpm_level(&(data->dpm_table.mem_table));
3647 data->smc_state_table.mem_max_level =
3648 vega10_find_highest_dpm_level(&(data->dpm_table.mem_table));
3649
3650 PP_ASSERT_WITH_CODE(!vega10_upload_dpm_bootup_level(hwmgr),
3651 "Attempt to upload DPM Bootup Levels Failed!",
3652 return -1);
3653 PP_ASSERT_WITH_CODE(!vega10_upload_dpm_max_level(hwmgr),
3654 "Attempt to upload DPM Max Levels Failed!",
3655 return -1);
3656 for(i = data->smc_state_table.gfx_boot_level; i < data->smc_state_table.gfx_max_level; i++)
3657 data->dpm_table.gfx_table.dpm_levels[i].enabled = true;
3658
3659
3660 for(i = data->smc_state_table.mem_boot_level; i < data->smc_state_table.mem_max_level; i++)
3661 data->dpm_table.mem_table.dpm_levels[i].enabled = true;
3662
3663 return 0;
3664}
3665
3666int vega10_enable_disable_vce_dpm(struct pp_hwmgr *hwmgr, bool enable)
3667{
690dc626 3668 struct vega10_hwmgr *data = hwmgr->backend;
f83a9991
EH
3669
3670 if (data->smu_features[GNLD_DPM_VCE].supported) {
d3f8c0ab 3671 PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr,
f83a9991
EH
3672 enable,
3673 data->smu_features[GNLD_DPM_VCE].smu_feature_bitmap),
3674 "Attempt to Enable/Disable DPM VCE Failed!",
3675 return -1);
3676 data->smu_features[GNLD_DPM_VCE].enabled = enable;
3677 }
3678
3679 return 0;
3680}
3681
3682static int vega10_update_sclk_threshold(struct pp_hwmgr *hwmgr)
3683{
690dc626 3684 struct vega10_hwmgr *data = hwmgr->backend;
f83a9991
EH
3685 uint32_t low_sclk_interrupt_threshold = 0;
3686
dd5a6fe2 3687 if (PP_CAP(PHM_PlatformCaps_SclkThrottleLowNotification) &&
29411f05 3688 (data->low_sclk_interrupt_threshold != 0)) {
f83a9991
EH
3689 low_sclk_interrupt_threshold =
3690 data->low_sclk_interrupt_threshold;
3691
3692 data->smc_state_table.pp_table.LowGfxclkInterruptThreshold =
3693 cpu_to_le32(low_sclk_interrupt_threshold);
3694
3695 /* This message will also enable SmcToHost Interrupt */
d246cd53 3696 smum_send_msg_to_smc_with_parameter(hwmgr,
f83a9991
EH
3697 PPSMC_MSG_SetLowGfxclkInterruptThreshold,
3698 (uint32_t)low_sclk_interrupt_threshold);
3699 }
3700
d246cd53 3701 return 0;
f83a9991
EH
3702}
3703
3704static int vega10_set_power_state_tasks(struct pp_hwmgr *hwmgr,
3705 const void *input)
3706{
3707 int tmp_result, result = 0;
690dc626 3708 struct vega10_hwmgr *data = hwmgr->backend;
f83a9991
EH
3709 PPTable_t *pp_table = &(data->smc_state_table.pp_table);
3710
3711 tmp_result = vega10_find_dpm_states_clocks_in_dpm_table(hwmgr, input);
3712 PP_ASSERT_WITH_CODE(!tmp_result,
3713 "Failed to find DPM states clocks in DPM table!",
3714 result = tmp_result);
3715
3716 tmp_result = vega10_populate_and_upload_sclk_mclk_dpm_levels(hwmgr, input);
3717 PP_ASSERT_WITH_CODE(!tmp_result,
3718 "Failed to populate and upload SCLK MCLK DPM levels!",
3719 result = tmp_result);
3720
3721 tmp_result = vega10_generate_dpm_level_enable_mask(hwmgr, input);
3722 PP_ASSERT_WITH_CODE(!tmp_result,
3723 "Failed to generate DPM level enabled mask!",
3724 result = tmp_result);
3725
3726 tmp_result = vega10_update_sclk_threshold(hwmgr);
3727 PP_ASSERT_WITH_CODE(!tmp_result,
3728 "Failed to update SCLK threshold!",
3729 result = tmp_result);
3730
3f9ca14a 3731 result = smum_smc_table_manager(hwmgr, (uint8_t *)pp_table, PPTABLE, false);
f83a9991
EH
3732 PP_ASSERT_WITH_CODE(!result,
3733 "Failed to upload PPtable!", return result);
3734
3735 data->apply_optimized_settings = false;
3736 data->apply_overdrive_next_settings_mask = 0;
3737
3738 return 0;
3739}
3740
f93f0c3a 3741static uint32_t vega10_dpm_get_sclk(struct pp_hwmgr *hwmgr, bool low)
f83a9991
EH
3742{
3743 struct pp_power_state *ps;
3744 struct vega10_power_state *vega10_ps;
3745
3746 if (hwmgr == NULL)
3747 return -EINVAL;
3748
3749 ps = hwmgr->request_ps;
3750
3751 if (ps == NULL)
3752 return -EINVAL;
3753
3754 vega10_ps = cast_phw_vega10_power_state(&ps->hardware);
3755
3756 if (low)
3757 return vega10_ps->performance_levels[0].gfx_clock;
3758 else
3759 return vega10_ps->performance_levels
3760 [vega10_ps->performance_level_count - 1].gfx_clock;
3761}
3762
f93f0c3a 3763static uint32_t vega10_dpm_get_mclk(struct pp_hwmgr *hwmgr, bool low)
f83a9991
EH
3764{
3765 struct pp_power_state *ps;
3766 struct vega10_power_state *vega10_ps;
3767
3768 if (hwmgr == NULL)
3769 return -EINVAL;
3770
3771 ps = hwmgr->request_ps;
3772
3773 if (ps == NULL)
3774 return -EINVAL;
3775
3776 vega10_ps = cast_phw_vega10_power_state(&ps->hardware);
3777
3778 if (low)
3779 return vega10_ps->performance_levels[0].mem_clock;
3780 else
3781 return vega10_ps->performance_levels
3782 [vega10_ps->performance_level_count-1].mem_clock;
3783}
3784
17d176a5
EH
3785static int vega10_get_gpu_power(struct pp_hwmgr *hwmgr,
3786 struct pp_gpu_power *query)
3787{
6b5defd6
EH
3788 uint32_t value;
3789
d246cd53 3790 smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetCurrPkgPwr);
3f9ca14a 3791 value = smum_get_argument(hwmgr);
fda519fb 3792
6b5defd6 3793 /* power value is an integer */
fda519fb 3794 memset(query, 0, sizeof *query);
6b5defd6
EH
3795 query->average_gpu_power = value << 8;
3796
3797 return 0;
17d176a5
EH
3798}
3799
f83a9991
EH
3800static int vega10_read_sensor(struct pp_hwmgr *hwmgr, int idx,
3801 void *value, int *size)
3802{
b8a55591 3803 struct amdgpu_device *adev = hwmgr->adev;
c11d8afe 3804 uint32_t sclk_mhz, mclk_idx, activity_percent = 0;
690dc626 3805 struct vega10_hwmgr *data = hwmgr->backend;
f83a9991
EH
3806 struct vega10_dpm_table *dpm_table = &data->dpm_table;
3807 int ret = 0;
b8a55591 3808 uint32_t val_vid;
f83a9991
EH
3809
3810 switch (idx) {
3811 case AMDGPU_PP_SENSOR_GFX_SCLK:
c11d8afe
EQ
3812 smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetAverageGfxclkActualFrequency);
3813 sclk_mhz = smum_get_argument(hwmgr);
3814 *((uint32_t *)value) = sclk_mhz * 100;
f83a9991
EH
3815 break;
3816 case AMDGPU_PP_SENSOR_GFX_MCLK:
952e5daa 3817 smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetCurrentUclkIndex);
3f9ca14a 3818 mclk_idx = smum_get_argument(hwmgr);
952e5daa 3819 if (mclk_idx < dpm_table->mem_table.count) {
f83a9991
EH
3820 *((uint32_t *)value) = dpm_table->mem_table.dpm_levels[mclk_idx].value;
3821 *size = 4;
952e5daa
RZ
3822 } else {
3823 ret = -EINVAL;
f83a9991
EH
3824 }
3825 break;
3826 case AMDGPU_PP_SENSOR_GPU_LOAD:
952e5daa 3827 smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_GetAverageGfxActivity, 0);
3f9ca14a 3828 activity_percent = smum_get_argument(hwmgr);
952e5daa
RZ
3829 *((uint32_t *)value) = activity_percent > 100 ? 100 : activity_percent;
3830 *size = 4;
f83a9991
EH
3831 break;
3832 case AMDGPU_PP_SENSOR_GPU_TEMP:
3833 *((uint32_t *)value) = vega10_thermal_get_temperature(hwmgr);
3834 *size = 4;
3835 break;
3836 case AMDGPU_PP_SENSOR_UVD_POWER:
3837 *((uint32_t *)value) = data->uvd_power_gated ? 0 : 1;
3838 *size = 4;
3839 break;
3840 case AMDGPU_PP_SENSOR_VCE_POWER:
3841 *((uint32_t *)value) = data->vce_power_gated ? 0 : 1;
3842 *size = 4;
3843 break;
17d176a5
EH
3844 case AMDGPU_PP_SENSOR_GPU_POWER:
3845 if (*size < sizeof(struct pp_gpu_power))
3846 ret = -EINVAL;
3847 else {
3848 *size = sizeof(struct pp_gpu_power);
3849 ret = vega10_get_gpu_power(hwmgr, (struct pp_gpu_power *)value);
3850 }
3851 break;
59655cb6 3852 case AMDGPU_PP_SENSOR_VDDGFX:
b8a55591 3853 val_vid = (RREG32_SOC15(SMUIO, 0, mmSMUSVI0_PLANE0_CURRENTVID) &
59655cb6
RZ
3854 SMUSVI0_PLANE0_CURRENTVID__CURRENT_SVI0_PLANE0_VID_MASK) >>
3855 SMUSVI0_PLANE0_CURRENTVID__CURRENT_SVI0_PLANE0_VID__SHIFT;
3856 *((uint32_t *)value) = (uint32_t)convert_to_vddc((uint8_t)val_vid);
3857 return 0;
f83a9991
EH
3858 default:
3859 ret = -EINVAL;
3860 break;
3861 }
6390258a 3862
f83a9991
EH
3863 return ret;
3864}
3865
d246cd53 3866static void vega10_notify_smc_display_change(struct pp_hwmgr *hwmgr,
f83a9991
EH
3867 bool has_disp)
3868{
d246cd53 3869 smum_send_msg_to_smc_with_parameter(hwmgr,
f83a9991
EH
3870 PPSMC_MSG_SetUclkFastSwitch,
3871 has_disp ? 0 : 1);
3872}
3873
3874int vega10_display_clock_voltage_request(struct pp_hwmgr *hwmgr,
3875 struct pp_display_clock_request *clock_req)
3876{
3877 int result = 0;
3878 enum amd_pp_clock_type clk_type = clock_req->clock_type;
75f0e32b 3879 uint32_t clk_freq = clock_req->clock_freq_in_khz / 1000;
f83a9991
EH
3880 DSPCLK_e clk_select = 0;
3881 uint32_t clk_request = 0;
3882
3883 switch (clk_type) {
3884 case amd_pp_dcef_clock:
3885 clk_select = DSPCLK_DCEFCLK;
3886 break;
3887 case amd_pp_disp_clock:
3888 clk_select = DSPCLK_DISPCLK;
3889 break;
3890 case amd_pp_pixel_clock:
3891 clk_select = DSPCLK_PIXCLK;
3892 break;
3893 case amd_pp_phy_clock:
3894 clk_select = DSPCLK_PHYCLK;
3895 break;
3896 default:
3897 pr_info("[DisplayClockVoltageRequest]Invalid Clock Type!");
3898 result = -1;
3899 break;
3900 }
3901
3902 if (!result) {
3903 clk_request = (clk_freq << 16) | clk_select;
d246cd53 3904 smum_send_msg_to_smc_with_parameter(hwmgr,
f83a9991
EH
3905 PPSMC_MSG_RequestDisplayClockByFreq,
3906 clk_request);
3907 }
3908
3909 return result;
3910}
3911
75f0e32b
RZ
3912static uint8_t vega10_get_uclk_index(struct pp_hwmgr *hwmgr,
3913 struct phm_ppt_v1_clock_voltage_dependency_table *mclk_table,
3914 uint32_t frequency)
3915{
3916 uint8_t count;
3917 uint8_t i;
3918
3919 if (mclk_table == NULL || mclk_table->count == 0)
3920 return 0;
3921
3922 count = (uint8_t)(mclk_table->count);
3923
3924 for(i = 0; i < count; i++) {
3925 if(mclk_table->entries[i].clk >= frequency)
3926 return i;
3927 }
3928
3929 return i-1;
3930}
3931
f83a9991
EH
3932static int vega10_notify_smc_display_config_after_ps_adjustment(
3933 struct pp_hwmgr *hwmgr)
3934{
690dc626 3935 struct vega10_hwmgr *data = hwmgr->backend;
f83a9991
EH
3936 struct vega10_single_dpm_table *dpm_table =
3937 &data->dpm_table.dcef_table;
75f0e32b
RZ
3938 struct phm_ppt_v2_information *table_info =
3939 (struct phm_ppt_v2_information *)hwmgr->pptable;
3940 struct phm_ppt_v1_clock_voltage_dependency_table *mclk_table = table_info->vdd_dep_on_mclk;
3941 uint32_t idx;
f83a9991
EH
3942 struct PP_Clocks min_clocks = {0};
3943 uint32_t i;
3944 struct pp_display_clock_request clock_req;
3945
555fd70c 3946 if (hwmgr->display_config->num_display > 1)
f83a9991
EH
3947 vega10_notify_smc_display_change(hwmgr, false);
3948 else
3949 vega10_notify_smc_display_change(hwmgr, true);
3950
555fd70c
RZ
3951 min_clocks.dcefClock = hwmgr->display_config->min_dcef_set_clk;
3952 min_clocks.dcefClockInSR = hwmgr->display_config->min_dcef_deep_sleep_set_clk;
3953 min_clocks.memoryClock = hwmgr->display_config->min_mem_set_clock;
f83a9991
EH
3954
3955 for (i = 0; i < dpm_table->count; i++) {
3956 if (dpm_table->dpm_levels[i].value == min_clocks.dcefClock)
3957 break;
3958 }
3959
3960 if (i < dpm_table->count) {
3961 clock_req.clock_type = amd_pp_dcef_clock;
3962 clock_req.clock_freq_in_khz = dpm_table->dpm_levels[i].value;
3963 if (!vega10_display_clock_voltage_request(hwmgr, &clock_req)) {
d246cd53 3964 smum_send_msg_to_smc_with_parameter(
d3f8c0ab 3965 hwmgr, PPSMC_MSG_SetMinDeepSleepDcefclk,
d246cd53 3966 min_clocks.dcefClockInSR / 100);
75f0e32b 3967 } else {
f83a9991 3968 pr_info("Attempt to set Hard Min for DCEFCLK Failed!");
75f0e32b
RZ
3969 }
3970 } else {
5bbc5c64 3971 pr_debug("Cannot find requested DCEFCLK!");
75f0e32b
RZ
3972 }
3973
3974 if (min_clocks.memoryClock != 0) {
3975 idx = vega10_get_uclk_index(hwmgr, mclk_table, min_clocks.memoryClock);
d3f8c0ab 3976 smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetSoftMinUclkByIndex, idx);
75f0e32b
RZ
3977 data->dpm_table.mem_table.dpm_state.soft_min_level= idx;
3978 }
f83a9991
EH
3979
3980 return 0;
3981}
3982
3983static int vega10_force_dpm_highest(struct pp_hwmgr *hwmgr)
3984{
690dc626 3985 struct vega10_hwmgr *data = hwmgr->backend;
f83a9991
EH
3986
3987 data->smc_state_table.gfx_boot_level =
3988 data->smc_state_table.gfx_max_level =
3989 vega10_find_highest_dpm_level(&(data->dpm_table.gfx_table));
3990 data->smc_state_table.mem_boot_level =
3991 data->smc_state_table.mem_max_level =
3992 vega10_find_highest_dpm_level(&(data->dpm_table.mem_table));
3993
3994 PP_ASSERT_WITH_CODE(!vega10_upload_dpm_bootup_level(hwmgr),
3995 "Failed to upload boot level to highest!",
3996 return -1);
3997
3998 PP_ASSERT_WITH_CODE(!vega10_upload_dpm_max_level(hwmgr),
3999 "Failed to upload dpm max level to highest!",
4000 return -1);
4001
4002 return 0;
4003}
4004
4005static int vega10_force_dpm_lowest(struct pp_hwmgr *hwmgr)
4006{
690dc626 4007 struct vega10_hwmgr *data = hwmgr->backend;
f83a9991
EH
4008
4009 data->smc_state_table.gfx_boot_level =
4010 data->smc_state_table.gfx_max_level =
4011 vega10_find_lowest_dpm_level(&(data->dpm_table.gfx_table));
4012 data->smc_state_table.mem_boot_level =
4013 data->smc_state_table.mem_max_level =
4014 vega10_find_lowest_dpm_level(&(data->dpm_table.mem_table));
4015
4016 PP_ASSERT_WITH_CODE(!vega10_upload_dpm_bootup_level(hwmgr),
4017 "Failed to upload boot level to highest!",
4018 return -1);
4019
4020 PP_ASSERT_WITH_CODE(!vega10_upload_dpm_max_level(hwmgr),
4021 "Failed to upload dpm max level to highest!",
4022 return -1);
4023
4024 return 0;
4025
4026}
4027
4028static int vega10_unforce_dpm_levels(struct pp_hwmgr *hwmgr)
4029{
690dc626 4030 struct vega10_hwmgr *data = hwmgr->backend;
f83a9991
EH
4031
4032 data->smc_state_table.gfx_boot_level =
4033 vega10_find_lowest_dpm_level(&(data->dpm_table.gfx_table));
4034 data->smc_state_table.gfx_max_level =
4035 vega10_find_highest_dpm_level(&(data->dpm_table.gfx_table));
4036 data->smc_state_table.mem_boot_level =
4037 vega10_find_lowest_dpm_level(&(data->dpm_table.mem_table));
4038 data->smc_state_table.mem_max_level =
4039 vega10_find_highest_dpm_level(&(data->dpm_table.mem_table));
4040
4041 PP_ASSERT_WITH_CODE(!vega10_upload_dpm_bootup_level(hwmgr),
4042 "Failed to upload DPM Bootup Levels!",
4043 return -1);
4044
4045 PP_ASSERT_WITH_CODE(!vega10_upload_dpm_max_level(hwmgr),
4046 "Failed to upload DPM Max Levels!",
4047 return -1);
4048 return 0;
4049}
4050
53a4b90d
RZ
4051static int vega10_get_profiling_clk_mask(struct pp_hwmgr *hwmgr, enum amd_dpm_forced_level level,
4052 uint32_t *sclk_mask, uint32_t *mclk_mask, uint32_t *soc_mask)
f83a9991 4053{
53a4b90d
RZ
4054 struct phm_ppt_v2_information *table_info =
4055 (struct phm_ppt_v2_information *)(hwmgr->pptable);
f83a9991 4056
53a4b90d
RZ
4057 if (table_info->vdd_dep_on_sclk->count > VEGA10_UMD_PSTATE_GFXCLK_LEVEL &&
4058 table_info->vdd_dep_on_socclk->count > VEGA10_UMD_PSTATE_SOCCLK_LEVEL &&
4059 table_info->vdd_dep_on_mclk->count > VEGA10_UMD_PSTATE_MCLK_LEVEL) {
4060 *sclk_mask = VEGA10_UMD_PSTATE_GFXCLK_LEVEL;
4061 *soc_mask = VEGA10_UMD_PSTATE_SOCCLK_LEVEL;
4062 *mclk_mask = VEGA10_UMD_PSTATE_MCLK_LEVEL;
dd70949d
RZ
4063 hwmgr->pstate_sclk = table_info->vdd_dep_on_sclk->entries[VEGA10_UMD_PSTATE_GFXCLK_LEVEL].clk;
4064 hwmgr->pstate_mclk = table_info->vdd_dep_on_mclk->entries[VEGA10_UMD_PSTATE_MCLK_LEVEL].clk;
f83a9991
EH
4065 }
4066
53a4b90d
RZ
4067 if (level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK) {
4068 *sclk_mask = 0;
4069 } else if (level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK) {
4070 *mclk_mask = 0;
4071 } else if (level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) {
4072 *sclk_mask = table_info->vdd_dep_on_sclk->count - 1;
4073 *soc_mask = table_info->vdd_dep_on_socclk->count - 1;
4074 *mclk_mask = table_info->vdd_dep_on_mclk->count - 1;
4075 }
4076 return 0;
f83a9991
EH
4077}
4078
f93f0c3a 4079static void vega10_set_fan_control_mode(struct pp_hwmgr *hwmgr, uint32_t mode)
f83a9991 4080{
7522ffc4
RZ
4081 switch (mode) {
4082 case AMD_FAN_CTRL_NONE:
f93f0c3a 4083 vega10_fan_ctrl_set_fan_speed_percent(hwmgr, 100);
7522ffc4
RZ
4084 break;
4085 case AMD_FAN_CTRL_MANUAL:
dd5a6fe2 4086 if (PP_CAP(PHM_PlatformCaps_MicrocodeFanControl))
f93f0c3a 4087 vega10_fan_ctrl_stop_smc_fan_control(hwmgr);
7522ffc4
RZ
4088 break;
4089 case AMD_FAN_CTRL_AUTO:
710931c2 4090 if (PP_CAP(PHM_PlatformCaps_MicrocodeFanControl))
f93f0c3a 4091 vega10_fan_ctrl_start_smc_fan_control(hwmgr);
7522ffc4
RZ
4092 break;
4093 default:
4094 break;
4095 }
f83a9991
EH
4096}
4097
53a4b90d
RZ
4098static int vega10_dpm_force_dpm_level(struct pp_hwmgr *hwmgr,
4099 enum amd_dpm_forced_level level)
4100{
4101 int ret = 0;
4102 uint32_t sclk_mask = 0;
4103 uint32_t mclk_mask = 0;
4104 uint32_t soc_mask = 0;
53a4b90d 4105
dd70949d
RZ
4106 if (hwmgr->pstate_sclk == 0)
4107 vega10_get_profiling_clk_mask(hwmgr, level, &sclk_mask, &mclk_mask, &soc_mask);
4108
53a4b90d
RZ
4109 switch (level) {
4110 case AMD_DPM_FORCED_LEVEL_HIGH:
4111 ret = vega10_force_dpm_highest(hwmgr);
53a4b90d
RZ
4112 break;
4113 case AMD_DPM_FORCED_LEVEL_LOW:
4114 ret = vega10_force_dpm_lowest(hwmgr);
53a4b90d
RZ
4115 break;
4116 case AMD_DPM_FORCED_LEVEL_AUTO:
4117 ret = vega10_unforce_dpm_levels(hwmgr);
53a4b90d
RZ
4118 break;
4119 case AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD:
4120 case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK:
4121 case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK:
4122 case AMD_DPM_FORCED_LEVEL_PROFILE_PEAK:
4123 ret = vega10_get_profiling_clk_mask(hwmgr, level, &sclk_mask, &mclk_mask, &soc_mask);
4124 if (ret)
4125 return ret;
53a4b90d
RZ
4126 vega10_force_clock_level(hwmgr, PP_SCLK, 1<<sclk_mask);
4127 vega10_force_clock_level(hwmgr, PP_MCLK, 1<<mclk_mask);
4128 break;
4129 case AMD_DPM_FORCED_LEVEL_MANUAL:
53a4b90d
RZ
4130 case AMD_DPM_FORCED_LEVEL_PROFILE_EXIT:
4131 default:
4132 break;
4133 }
4134
9947f704
RZ
4135 if (!ret) {
4136 if (level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK && hwmgr->dpm_level != AMD_DPM_FORCED_LEVEL_PROFILE_PEAK)
4137 vega10_set_fan_control_mode(hwmgr, AMD_FAN_CTRL_NONE);
4138 else if (level != AMD_DPM_FORCED_LEVEL_PROFILE_PEAK && hwmgr->dpm_level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK)
4139 vega10_set_fan_control_mode(hwmgr, AMD_FAN_CTRL_AUTO);
4140 }
9ac870c7 4141
9947f704 4142 return ret;
53a4b90d
RZ
4143}
4144
f93f0c3a 4145static uint32_t vega10_get_fan_control_mode(struct pp_hwmgr *hwmgr)
f83a9991 4146{
690dc626 4147 struct vega10_hwmgr *data = hwmgr->backend;
f83a9991 4148
7522ffc4
RZ
4149 if (data->smu_features[GNLD_FAN_CONTROL].enabled == false)
4150 return AMD_FAN_CTRL_MANUAL;
4151 else
4152 return AMD_FAN_CTRL_AUTO;
f83a9991
EH
4153}
4154
4155static int vega10_get_dal_power_level(struct pp_hwmgr *hwmgr,
4156 struct amd_pp_simple_clock_info *info)
4157{
4158 struct phm_ppt_v2_information *table_info =
4159 (struct phm_ppt_v2_information *)hwmgr->pptable;
4160 struct phm_clock_and_voltage_limits *max_limits =
4161 &table_info->max_clock_voltage_on_ac;
4162
4163 info->engine_max_clock = max_limits->sclk;
4164 info->memory_max_clock = max_limits->mclk;
4165
4166 return 0;
4167}
4168
4169static void vega10_get_sclks(struct pp_hwmgr *hwmgr,
4170 struct pp_clock_levels_with_latency *clocks)
4171{
4172 struct phm_ppt_v2_information *table_info =
4173 (struct phm_ppt_v2_information *)hwmgr->pptable;
4174 struct phm_ppt_v1_clock_voltage_dependency_table *dep_table =
4175 table_info->vdd_dep_on_sclk;
4176 uint32_t i;
4177
4178 for (i = 0; i < dep_table->count; i++) {
4179 if (dep_table->entries[i].clk) {
4180 clocks->data[clocks->num_levels].clocks_in_khz =
4181 dep_table->entries[i].clk;
4182 clocks->num_levels++;
4183 }
4184 }
4185
4186}
4187
4188static uint32_t vega10_get_mem_latency(struct pp_hwmgr *hwmgr,
4189 uint32_t clock)
4190{
4191 if (clock >= MEM_FREQ_LOW_LATENCY &&
4192 clock < MEM_FREQ_HIGH_LATENCY)
4193 return MEM_LATENCY_HIGH;
4194 else if (clock >= MEM_FREQ_HIGH_LATENCY)
4195 return MEM_LATENCY_LOW;
4196 else
4197 return MEM_LATENCY_ERR;
4198}
4199
4200static void vega10_get_memclocks(struct pp_hwmgr *hwmgr,
4201 struct pp_clock_levels_with_latency *clocks)
4202{
4203 struct phm_ppt_v2_information *table_info =
4204 (struct phm_ppt_v2_information *)hwmgr->pptable;
4205 struct phm_ppt_v1_clock_voltage_dependency_table *dep_table =
4206 table_info->vdd_dep_on_mclk;
690dc626 4207 struct vega10_hwmgr *data = hwmgr->backend;
f83a9991
EH
4208 uint32_t i;
4209
4210 clocks->num_levels = 0;
4211 data->mclk_latency_table.count = 0;
4212
4213 for (i = 0; i < dep_table->count; i++) {
4214 if (dep_table->entries[i].clk) {
4215 clocks->data[clocks->num_levels].clocks_in_khz =
4216 data->mclk_latency_table.entries
4217 [data->mclk_latency_table.count].frequency =
4218 dep_table->entries[i].clk;
4219 clocks->data[clocks->num_levels].latency_in_us =
4220 data->mclk_latency_table.entries
4221 [data->mclk_latency_table.count].latency =
4222 vega10_get_mem_latency(hwmgr,
4223 dep_table->entries[i].clk);
4224 clocks->num_levels++;
4225 data->mclk_latency_table.count++;
4226 }
4227 }
4228}
4229
4230static void vega10_get_dcefclocks(struct pp_hwmgr *hwmgr,
4231 struct pp_clock_levels_with_latency *clocks)
4232{
4233 struct phm_ppt_v2_information *table_info =
4234 (struct phm_ppt_v2_information *)hwmgr->pptable;
4235 struct phm_ppt_v1_clock_voltage_dependency_table *dep_table =
4236 table_info->vdd_dep_on_dcefclk;
4237 uint32_t i;
4238
4239 for (i = 0; i < dep_table->count; i++) {
4240 clocks->data[i].clocks_in_khz = dep_table->entries[i].clk;
4241 clocks->data[i].latency_in_us = 0;
4242 clocks->num_levels++;
4243 }
4244}
4245
4246static void vega10_get_socclocks(struct pp_hwmgr *hwmgr,
4247 struct pp_clock_levels_with_latency *clocks)
4248{
4249 struct phm_ppt_v2_information *table_info =
4250 (struct phm_ppt_v2_information *)hwmgr->pptable;
4251 struct phm_ppt_v1_clock_voltage_dependency_table *dep_table =
4252 table_info->vdd_dep_on_socclk;
4253 uint32_t i;
4254
4255 for (i = 0; i < dep_table->count; i++) {
4256 clocks->data[i].clocks_in_khz = dep_table->entries[i].clk;
4257 clocks->data[i].latency_in_us = 0;
4258 clocks->num_levels++;
4259 }
4260}
4261
4262static int vega10_get_clock_by_type_with_latency(struct pp_hwmgr *hwmgr,
4263 enum amd_pp_clock_type type,
4264 struct pp_clock_levels_with_latency *clocks)
4265{
4266 switch (type) {
4267 case amd_pp_sys_clock:
4268 vega10_get_sclks(hwmgr, clocks);
4269 break;
4270 case amd_pp_mem_clock:
4271 vega10_get_memclocks(hwmgr, clocks);
4272 break;
4273 case amd_pp_dcef_clock:
4274 vega10_get_dcefclocks(hwmgr, clocks);
4275 break;
4276 case amd_pp_soc_clock:
4277 vega10_get_socclocks(hwmgr, clocks);
4278 break;
4279 default:
4280 return -1;
4281 }
4282
4283 return 0;
4284}
4285
4286static int vega10_get_clock_by_type_with_voltage(struct pp_hwmgr *hwmgr,
4287 enum amd_pp_clock_type type,
4288 struct pp_clock_levels_with_voltage *clocks)
4289{
4290 struct phm_ppt_v2_information *table_info =
4291 (struct phm_ppt_v2_information *)hwmgr->pptable;
4292 struct phm_ppt_v1_clock_voltage_dependency_table *dep_table;
4293 uint32_t i;
4294
4295 switch (type) {
4296 case amd_pp_mem_clock:
4297 dep_table = table_info->vdd_dep_on_mclk;
4298 break;
4299 case amd_pp_dcef_clock:
4300 dep_table = table_info->vdd_dep_on_dcefclk;
4301 break;
4302 case amd_pp_disp_clock:
4303 dep_table = table_info->vdd_dep_on_dispclk;
4304 break;
4305 case amd_pp_pixel_clock:
4306 dep_table = table_info->vdd_dep_on_pixclk;
4307 break;
4308 case amd_pp_phy_clock:
4309 dep_table = table_info->vdd_dep_on_phyclk;
4310 break;
4311 default:
4312 return -1;
4313 }
4314
4315 for (i = 0; i < dep_table->count; i++) {
4316 clocks->data[i].clocks_in_khz = dep_table->entries[i].clk;
4317 clocks->data[i].voltage_in_mv = (uint32_t)(table_info->vddc_lookup_table->
4318 entries[dep_table->entries[i].vddInd].us_vdd);
4319 clocks->num_levels++;
4320 }
4321
4322 if (i < dep_table->count)
4323 return -1;
4324
4325 return 0;
4326}
4327
4328static int vega10_set_watermarks_for_clocks_ranges(struct pp_hwmgr *hwmgr,
4329 struct pp_wm_sets_with_clock_ranges_soc15 *wm_with_clock_ranges)
4330{
690dc626 4331 struct vega10_hwmgr *data = hwmgr->backend;
f83a9991
EH
4332 Watermarks_t *table = &(data->smc_state_table.water_marks_table);
4333 int result = 0;
4334 uint32_t i;
4335
4336 if (!data->registry_data.disable_water_mark) {
4337 for (i = 0; i < wm_with_clock_ranges->num_wm_sets_dmif; i++) {
4338 table->WatermarkRow[WM_DCEFCLK][i].MinClock =
4339 cpu_to_le16((uint16_t)
4340 (wm_with_clock_ranges->wm_sets_dmif[i].wm_min_dcefclk_in_khz) /
4341 100);
4342 table->WatermarkRow[WM_DCEFCLK][i].MaxClock =
4343 cpu_to_le16((uint16_t)
4344 (wm_with_clock_ranges->wm_sets_dmif[i].wm_max_dcefclk_in_khz) /
4345 100);
4346 table->WatermarkRow[WM_DCEFCLK][i].MinUclk =
4347 cpu_to_le16((uint16_t)
4348 (wm_with_clock_ranges->wm_sets_dmif[i].wm_min_memclk_in_khz) /
4349 100);
4350 table->WatermarkRow[WM_DCEFCLK][i].MaxUclk =
4351 cpu_to_le16((uint16_t)
4352 (wm_with_clock_ranges->wm_sets_dmif[i].wm_max_memclk_in_khz) /
4353 100);
4354 table->WatermarkRow[WM_DCEFCLK][i].WmSetting = (uint8_t)
4355 wm_with_clock_ranges->wm_sets_dmif[i].wm_set_id;
4356 }
4357
4358 for (i = 0; i < wm_with_clock_ranges->num_wm_sets_mcif; i++) {
4359 table->WatermarkRow[WM_SOCCLK][i].MinClock =
4360 cpu_to_le16((uint16_t)
4361 (wm_with_clock_ranges->wm_sets_mcif[i].wm_min_socclk_in_khz) /
4362 100);
4363 table->WatermarkRow[WM_SOCCLK][i].MaxClock =
4364 cpu_to_le16((uint16_t)
4365 (wm_with_clock_ranges->wm_sets_mcif[i].wm_max_socclk_in_khz) /
4366 100);
4367 table->WatermarkRow[WM_SOCCLK][i].MinUclk =
4368 cpu_to_le16((uint16_t)
4369 (wm_with_clock_ranges->wm_sets_mcif[i].wm_min_memclk_in_khz) /
4370 100);
4371 table->WatermarkRow[WM_SOCCLK][i].MaxUclk =
4372 cpu_to_le16((uint16_t)
4373 (wm_with_clock_ranges->wm_sets_mcif[i].wm_max_memclk_in_khz) /
4374 100);
4375 table->WatermarkRow[WM_SOCCLK][i].WmSetting = (uint8_t)
4376 wm_with_clock_ranges->wm_sets_mcif[i].wm_set_id;
4377 }
4378 data->water_marks_bitmap = WaterMarksExist;
4379 }
4380
4381 return result;
4382}
4383
4384static int vega10_force_clock_level(struct pp_hwmgr *hwmgr,
4385 enum pp_clock_type type, uint32_t mask)
4386{
690dc626 4387 struct vega10_hwmgr *data = hwmgr->backend;
f83a9991 4388
f83a9991
EH
4389 switch (type) {
4390 case PP_SCLK:
10cd19c8
EQ
4391 data->smc_state_table.gfx_boot_level = mask ? (ffs(mask) - 1) : 0;
4392 data->smc_state_table.gfx_max_level = mask ? (fls(mask) - 1) : 0;
7b52db39
RZ
4393
4394 PP_ASSERT_WITH_CODE(!vega10_upload_dpm_bootup_level(hwmgr),
4395 "Failed to upload boot level to lowest!",
4396 return -EINVAL);
4397
4398 PP_ASSERT_WITH_CODE(!vega10_upload_dpm_max_level(hwmgr),
4399 "Failed to upload dpm max level to highest!",
4400 return -EINVAL);
f83a9991
EH
4401 break;
4402
4403 case PP_MCLK:
10cd19c8
EQ
4404 data->smc_state_table.mem_boot_level = mask ? (ffs(mask) - 1) : 0;
4405 data->smc_state_table.mem_max_level = mask ? (fls(mask) - 1) : 0;
7b52db39
RZ
4406
4407 PP_ASSERT_WITH_CODE(!vega10_upload_dpm_bootup_level(hwmgr),
4408 "Failed to upload boot level to lowest!",
4409 return -EINVAL);
4410
4411 PP_ASSERT_WITH_CODE(!vega10_upload_dpm_max_level(hwmgr),
4412 "Failed to upload dpm max level to highest!",
4413 return -EINVAL);
f83a9991 4414
f83a9991 4415 break;
7b52db39
RZ
4416
4417 case PP_PCIE:
f83a9991
EH
4418 default:
4419 break;
4420 }
4421
4422 return 0;
4423}
4424
4425static int vega10_print_clock_levels(struct pp_hwmgr *hwmgr,
4426 enum pp_clock_type type, char *buf)
4427{
690dc626 4428 struct vega10_hwmgr *data = hwmgr->backend;
f83a9991
EH
4429 struct vega10_single_dpm_table *sclk_table = &(data->dpm_table.gfx_table);
4430 struct vega10_single_dpm_table *mclk_table = &(data->dpm_table.mem_table);
4431 struct vega10_pcie_table *pcie_table = &(data->dpm_table.pcie_table);
4432 int i, now, size = 0;
4433
4434 switch (type) {
4435 case PP_SCLK:
4436 if (data->registry_data.sclk_dpm_key_disabled)
4437 break;
4438
d246cd53 4439 smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetCurrentGfxclkIndex);
3f9ca14a 4440 now = smum_get_argument(hwmgr);
f83a9991
EH
4441
4442 for (i = 0; i < sclk_table->count; i++)
4443 size += sprintf(buf + size, "%d: %uMhz %s\n",
4444 i, sclk_table->dpm_levels[i].value / 100,
4445 (i == now) ? "*" : "");
4446 break;
4447 case PP_MCLK:
4448 if (data->registry_data.mclk_dpm_key_disabled)
4449 break;
4450
d246cd53 4451 smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetCurrentUclkIndex);
3f9ca14a 4452 now = smum_get_argument(hwmgr);
f83a9991
EH
4453
4454 for (i = 0; i < mclk_table->count; i++)
4455 size += sprintf(buf + size, "%d: %uMhz %s\n",
4456 i, mclk_table->dpm_levels[i].value / 100,
4457 (i == now) ? "*" : "");
4458 break;
4459 case PP_PCIE:
d246cd53 4460 smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetCurrentLinkIndex);
3f9ca14a 4461 now = smum_get_argument(hwmgr);
f83a9991
EH
4462
4463 for (i = 0; i < pcie_table->count; i++)
4464 size += sprintf(buf + size, "%d: %s %s\n", i,
7413d2fa
EQ
4465 (pcie_table->pcie_gen[i] == 0) ? "2.5GT/s, x1" :
4466 (pcie_table->pcie_gen[i] == 1) ? "5.0GT/s, x16" :
4467 (pcie_table->pcie_gen[i] == 2) ? "8.0GT/s, x16" : "",
f83a9991
EH
4468 (i == now) ? "*" : "");
4469 break;
4470 default:
4471 break;
4472 }
4473 return size;
4474}
4475
4476static int vega10_display_configuration_changed_task(struct pp_hwmgr *hwmgr)
4477{
690dc626 4478 struct vega10_hwmgr *data = hwmgr->backend;
f83a9991 4479 Watermarks_t *wm_table = &(data->smc_state_table.water_marks_table);
555fd70c 4480 int result = 0;
f83a9991
EH
4481
4482 if ((data->water_marks_bitmap & WaterMarksExist) &&
4483 !(data->water_marks_bitmap & WaterMarksLoaded)) {
3f9ca14a 4484 result = smum_smc_table_manager(hwmgr, (uint8_t *)wm_table, WMTABLE, false);
f83a9991
EH
4485 PP_ASSERT_WITH_CODE(result, "Failed to update WMTABLE!", return EINVAL);
4486 data->water_marks_bitmap |= WaterMarksLoaded;
4487 }
4488
4489 if (data->water_marks_bitmap & WaterMarksLoaded) {
d3f8c0ab 4490 smum_send_msg_to_smc_with_parameter(hwmgr,
555fd70c 4491 PPSMC_MSG_NumOfDisplays, hwmgr->display_config->num_display);
f83a9991
EH
4492 }
4493
4494 return result;
4495}
4496
4497int vega10_enable_disable_uvd_dpm(struct pp_hwmgr *hwmgr, bool enable)
4498{
690dc626 4499 struct vega10_hwmgr *data = hwmgr->backend;
f83a9991
EH
4500
4501 if (data->smu_features[GNLD_DPM_UVD].supported) {
d3f8c0ab 4502 PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr,
f83a9991
EH
4503 enable,
4504 data->smu_features[GNLD_DPM_UVD].smu_feature_bitmap),
4505 "Attempt to Enable/Disable DPM UVD Failed!",
4506 return -1);
4507 data->smu_features[GNLD_DPM_UVD].enabled = enable;
4508 }
4509 return 0;
4510}
4511
f93f0c3a 4512static void vega10_power_gate_vce(struct pp_hwmgr *hwmgr, bool bgate)
f83a9991 4513{
690dc626 4514 struct vega10_hwmgr *data = hwmgr->backend;
f83a9991
EH
4515
4516 data->vce_power_gated = bgate;
f93f0c3a 4517 vega10_enable_disable_vce_dpm(hwmgr, !bgate);
f83a9991
EH
4518}
4519
f93f0c3a 4520static void vega10_power_gate_uvd(struct pp_hwmgr *hwmgr, bool bgate)
f83a9991 4521{
690dc626 4522 struct vega10_hwmgr *data = hwmgr->backend;
f83a9991
EH
4523
4524 data->uvd_power_gated = bgate;
f93f0c3a 4525 vega10_enable_disable_uvd_dpm(hwmgr, !bgate);
f83a9991
EH
4526}
4527
4528static inline bool vega10_are_power_levels_equal(
4529 const struct vega10_performance_level *pl1,
4530 const struct vega10_performance_level *pl2)
4531{
4532 return ((pl1->soc_clock == pl2->soc_clock) &&
4533 (pl1->gfx_clock == pl2->gfx_clock) &&
4534 (pl1->mem_clock == pl2->mem_clock));
4535}
4536
4537static int vega10_check_states_equal(struct pp_hwmgr *hwmgr,
4538 const struct pp_hw_power_state *pstate1,
4539 const struct pp_hw_power_state *pstate2, bool *equal)
4540{
4541 const struct vega10_power_state *psa;
4542 const struct vega10_power_state *psb;
4543 int i;
4544
4545 if (pstate1 == NULL || pstate2 == NULL || equal == NULL)
4546 return -EINVAL;
4547
4548 psa = cast_const_phw_vega10_power_state(pstate1);
4549 psb = cast_const_phw_vega10_power_state(pstate2);
4550 /* If the two states don't even have the same number of performance levels they cannot be the same state. */
4551 if (psa->performance_level_count != psb->performance_level_count) {
4552 *equal = false;
4553 return 0;
4554 }
4555
4556 for (i = 0; i < psa->performance_level_count; i++) {
4557 if (!vega10_are_power_levels_equal(&(psa->performance_levels[i]), &(psb->performance_levels[i]))) {
4558 /* If we have found even one performance level pair that is different the states are different. */
4559 *equal = false;
4560 return 0;
4561 }
4562 }
4563
4564 /* If all performance levels are the same try to use the UVD clocks to break the tie.*/
4565 *equal = ((psa->uvd_clks.vclk == psb->uvd_clks.vclk) && (psa->uvd_clks.dclk == psb->uvd_clks.dclk));
4566 *equal &= ((psa->vce_clks.evclk == psb->vce_clks.evclk) && (psa->vce_clks.ecclk == psb->vce_clks.ecclk));
4567 *equal &= (psa->sclk_threshold == psb->sclk_threshold);
4568
4569 return 0;
4570}
4571
4572static bool
4573vega10_check_smc_update_required_for_display_configuration(struct pp_hwmgr *hwmgr)
4574{
690dc626 4575 struct vega10_hwmgr *data = hwmgr->backend;
f83a9991 4576 bool is_update_required = false;
f83a9991 4577
555fd70c 4578 if (data->display_timing.num_existing_displays != hwmgr->display_config->num_display)
f83a9991
EH
4579 is_update_required = true;
4580
dd5a6fe2 4581 if (PP_CAP(PHM_PlatformCaps_SclkDeepSleep)) {
555fd70c 4582 if (data->display_timing.min_clock_in_sr != hwmgr->display_config->min_core_set_clock_in_sr)
f83a9991
EH
4583 is_update_required = true;
4584 }
4585
4586 return is_update_required;
4587}
4588
8b9242ed
RZ
4589static int vega10_disable_dpm_tasks(struct pp_hwmgr *hwmgr)
4590{
4591 int tmp_result, result = 0;
4592
dd5a6fe2 4593 if (PP_CAP(PHM_PlatformCaps_ThermalController))
8b9242ed
RZ
4594 vega10_disable_thermal_protection(hwmgr);
4595
4596 tmp_result = vega10_disable_power_containment(hwmgr);
4597 PP_ASSERT_WITH_CODE((tmp_result == 0),
4598 "Failed to disable power containment!", result = tmp_result);
4599
9b7b8154
EQ
4600 tmp_result = vega10_disable_didt_config(hwmgr);
4601 PP_ASSERT_WITH_CODE((tmp_result == 0),
4602 "Failed to disable didt config!", result = tmp_result);
4603
8b9242ed
RZ
4604 tmp_result = vega10_avfs_enable(hwmgr, false);
4605 PP_ASSERT_WITH_CODE((tmp_result == 0),
4606 "Failed to disable AVFS!", result = tmp_result);
4607
4608 tmp_result = vega10_stop_dpm(hwmgr, SMC_DPM_FEATURES);
4609 PP_ASSERT_WITH_CODE((tmp_result == 0),
4610 "Failed to stop DPM!", result = tmp_result);
4611
df057e02
RZ
4612 tmp_result = vega10_disable_deep_sleep_master_switch(hwmgr);
4613 PP_ASSERT_WITH_CODE((tmp_result == 0),
4614 "Failed to disable deep sleep!", result = tmp_result);
4615
4022e4f2
RZ
4616 tmp_result = vega10_disable_ulv(hwmgr);
4617 PP_ASSERT_WITH_CODE((tmp_result == 0),
4618 "Failed to disable ulv!", result = tmp_result);
4619
bdb8cd10
RZ
4620 tmp_result = vega10_acg_disable(hwmgr);
4621 PP_ASSERT_WITH_CODE((tmp_result == 0),
4622 "Failed to disable acg!", result = tmp_result);
15826fbf
RZ
4623
4624 vega10_enable_disable_PCC_limit_feature(hwmgr, false);
8b9242ed
RZ
4625 return result;
4626}
4627
4628static int vega10_power_off_asic(struct pp_hwmgr *hwmgr)
4629{
690dc626 4630 struct vega10_hwmgr *data = hwmgr->backend;
8b9242ed
RZ
4631 int result;
4632
4633 result = vega10_disable_dpm_tasks(hwmgr);
4634 PP_ASSERT_WITH_CODE((0 == result),
4635 "[disable_dpm_tasks] Failed to disable DPM!",
4636 );
4637 data->water_marks_bitmap &= ~(WaterMarksLoaded);
4638
4639 return result;
4640}
4641
dd4e2237
EH
4642static int vega10_get_sclk_od(struct pp_hwmgr *hwmgr)
4643{
690dc626 4644 struct vega10_hwmgr *data = hwmgr->backend;
dd4e2237
EH
4645 struct vega10_single_dpm_table *sclk_table = &(data->dpm_table.gfx_table);
4646 struct vega10_single_dpm_table *golden_sclk_table =
4647 &(data->golden_dpm_table.gfx_table);
4648 int value;
4649
4650 value = (sclk_table->dpm_levels[sclk_table->count - 1].value -
4651 golden_sclk_table->dpm_levels
4652 [golden_sclk_table->count - 1].value) *
4653 100 /
4654 golden_sclk_table->dpm_levels
4655 [golden_sclk_table->count - 1].value;
4656
4657 return value;
4658}
4659
4660static int vega10_set_sclk_od(struct pp_hwmgr *hwmgr, uint32_t value)
4661{
690dc626 4662 struct vega10_hwmgr *data = hwmgr->backend;
dd4e2237
EH
4663 struct vega10_single_dpm_table *golden_sclk_table =
4664 &(data->golden_dpm_table.gfx_table);
4665 struct pp_power_state *ps;
4666 struct vega10_power_state *vega10_ps;
4667
4668 ps = hwmgr->request_ps;
4669
4670 if (ps == NULL)
4671 return -EINVAL;
4672
4673 vega10_ps = cast_phw_vega10_power_state(&ps->hardware);
4674
4675 vega10_ps->performance_levels
4676 [vega10_ps->performance_level_count - 1].gfx_clock =
4677 golden_sclk_table->dpm_levels
4678 [golden_sclk_table->count - 1].value *
4679 value / 100 +
4680 golden_sclk_table->dpm_levels
4681 [golden_sclk_table->count - 1].value;
4682
4683 if (vega10_ps->performance_levels
4684 [vega10_ps->performance_level_count - 1].gfx_clock >
4685 hwmgr->platform_descriptor.overdriveLimit.engineClock)
4686 vega10_ps->performance_levels
4687 [vega10_ps->performance_level_count - 1].gfx_clock =
4688 hwmgr->platform_descriptor.overdriveLimit.engineClock;
4689
4690 return 0;
4691}
4692
4693static int vega10_get_mclk_od(struct pp_hwmgr *hwmgr)
4694{
690dc626 4695 struct vega10_hwmgr *data = hwmgr->backend;
dd4e2237
EH
4696 struct vega10_single_dpm_table *mclk_table = &(data->dpm_table.mem_table);
4697 struct vega10_single_dpm_table *golden_mclk_table =
4698 &(data->golden_dpm_table.mem_table);
4699 int value;
4700
4701 value = (mclk_table->dpm_levels
4702 [mclk_table->count - 1].value -
4703 golden_mclk_table->dpm_levels
4704 [golden_mclk_table->count - 1].value) *
4705 100 /
4706 golden_mclk_table->dpm_levels
4707 [golden_mclk_table->count - 1].value;
4708
4709 return value;
4710}
4711
4712static int vega10_set_mclk_od(struct pp_hwmgr *hwmgr, uint32_t value)
4713{
690dc626 4714 struct vega10_hwmgr *data = hwmgr->backend;
dd4e2237
EH
4715 struct vega10_single_dpm_table *golden_mclk_table =
4716 &(data->golden_dpm_table.mem_table);
4717 struct pp_power_state *ps;
4718 struct vega10_power_state *vega10_ps;
4719
4720 ps = hwmgr->request_ps;
4721
4722 if (ps == NULL)
4723 return -EINVAL;
4724
4725 vega10_ps = cast_phw_vega10_power_state(&ps->hardware);
4726
4727 vega10_ps->performance_levels
4728 [vega10_ps->performance_level_count - 1].mem_clock =
4729 golden_mclk_table->dpm_levels
4730 [golden_mclk_table->count - 1].value *
4731 value / 100 +
4732 golden_mclk_table->dpm_levels
4733 [golden_mclk_table->count - 1].value;
4734
4735 if (vega10_ps->performance_levels
4736 [vega10_ps->performance_level_count - 1].mem_clock >
4737 hwmgr->platform_descriptor.overdriveLimit.memoryClock)
4738 vega10_ps->performance_levels
4739 [vega10_ps->performance_level_count - 1].mem_clock =
4740 hwmgr->platform_descriptor.overdriveLimit.memoryClock;
4741
4742 return 0;
4743}
8b9242ed 4744
52afb85e
RZ
4745static int vega10_notify_cac_buffer_info(struct pp_hwmgr *hwmgr,
4746 uint32_t virtual_addr_low,
4747 uint32_t virtual_addr_hi,
4748 uint32_t mc_addr_low,
4749 uint32_t mc_addr_hi,
4750 uint32_t size)
4751{
4752 smum_send_msg_to_smc_with_parameter(hwmgr,
4753 PPSMC_MSG_SetSystemVirtualDramAddrHigh,
4754 virtual_addr_hi);
4755 smum_send_msg_to_smc_with_parameter(hwmgr,
4756 PPSMC_MSG_SetSystemVirtualDramAddrLow,
4757 virtual_addr_low);
4758 smum_send_msg_to_smc_with_parameter(hwmgr,
4759 PPSMC_MSG_DramLogSetDramAddrHigh,
4760 mc_addr_hi);
4761
4762 smum_send_msg_to_smc_with_parameter(hwmgr,
4763 PPSMC_MSG_DramLogSetDramAddrLow,
4764 mc_addr_low);
4765
4766 smum_send_msg_to_smc_with_parameter(hwmgr,
4767 PPSMC_MSG_DramLogSetDramSize,
4768 size);
4769 return 0;
4770}
4771
0a91ee07
EQ
4772static int vega10_get_thermal_temperature_range(struct pp_hwmgr *hwmgr,
4773 struct PP_TemperatureRange *thermal_data)
4774{
4775 struct phm_ppt_v2_information *table_info =
4776 (struct phm_ppt_v2_information *)hwmgr->pptable;
4777
4778 memcpy(thermal_data, &SMU7ThermalWithDelayPolicy[0], sizeof(struct PP_TemperatureRange));
4779
4780 thermal_data->max = table_info->tdp_table->usSoftwareShutdownTemp *
4781 PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
4782
4783 return 0;
4784}
4785
6390258a
RZ
4786static int vega10_get_power_profile_mode(struct pp_hwmgr *hwmgr, char *buf)
4787{
690dc626 4788 struct vega10_hwmgr *data = hwmgr->backend;
6390258a
RZ
4789 uint32_t i, size = 0;
4790 static const uint8_t profile_mode_setting[5][4] = {{70, 60, 1, 3,},
4791 {90, 60, 0, 0,},
4792 {70, 60, 0, 0,},
4793 {70, 90, 0, 0,},
4794 {30, 60, 0, 6,},
4795 };
4796 static const char *profile_name[6] = {"3D_FULL_SCREEN",
4797 "POWER_SAVING",
4798 "VIDEO",
4799 "VR",
04f618eb 4800 "COMPUTE",
6390258a
RZ
4801 "CUSTOM"};
4802 static const char *title[6] = {"NUM",
4803 "MODE_NAME",
4804 "BUSY_SET_POINT",
4805 "FPS",
4806 "USE_RLC_BUSY",
4807 "MIN_ACTIVE_LEVEL"};
4808
4809 if (!buf)
4810 return -EINVAL;
4811
4812 size += sprintf(buf + size, "%s %16s %s %s %s %s\n",title[0],
4813 title[1], title[2], title[3], title[4], title[5]);
4814
4815 for (i = 0; i < PP_SMC_POWER_PROFILE_CUSTOM; i++)
4816 size += sprintf(buf + size, "%3d %14s%s: %14d %3d %10d %14d\n",
4817 i, profile_name[i], (i == hwmgr->power_profile_mode) ? "*" : " ",
4818 profile_mode_setting[i][0], profile_mode_setting[i][1],
4819 profile_mode_setting[i][2], profile_mode_setting[i][3]);
4820 size += sprintf(buf + size, "%3d %14s%s: %14d %3d %10d %14d\n", i,
4821 profile_name[i], (i == hwmgr->power_profile_mode) ? "*" : " ",
4822 data->custom_profile_mode[0], data->custom_profile_mode[1],
4823 data->custom_profile_mode[2], data->custom_profile_mode[3]);
4824 return size;
4825}
4826
4827static int vega10_set_power_profile_mode(struct pp_hwmgr *hwmgr, long *input, uint32_t size)
4828{
690dc626 4829 struct vega10_hwmgr *data = hwmgr->backend;
6390258a
RZ
4830 uint8_t busy_set_point;
4831 uint8_t FPS;
4832 uint8_t use_rlc_busy;
4833 uint8_t min_active_level;
4834
6390258a
RZ
4835 hwmgr->power_profile_mode = input[size];
4836
4837 smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetWorkloadMask,
4838 1<<hwmgr->power_profile_mode);
4839
4840 if (hwmgr->power_profile_mode == PP_SMC_POWER_PROFILE_CUSTOM) {
4841 if (size == 0 || size > 4)
4842 return -EINVAL;
4843
4844 data->custom_profile_mode[0] = busy_set_point = input[0];
4845 data->custom_profile_mode[1] = FPS = input[1];
4846 data->custom_profile_mode[2] = use_rlc_busy = input[2];
4847 data->custom_profile_mode[3] = min_active_level = input[3];
4848 smum_send_msg_to_smc_with_parameter(hwmgr,
4849 PPSMC_MSG_SetCustomGfxDpmParameters,
4850 busy_set_point | FPS<<8 |
4851 use_rlc_busy << 16 | min_active_level<<24);
6390258a
RZ
4852 }
4853
4854 return 0;
4855}
4856
f83a9991
EH
4857static const struct pp_hwmgr_func vega10_hwmgr_funcs = {
4858 .backend_init = vega10_hwmgr_backend_init,
4859 .backend_fini = vega10_hwmgr_backend_fini,
4860 .asic_setup = vega10_setup_asic_task,
4861 .dynamic_state_management_enable = vega10_enable_dpm_tasks,
8b9242ed 4862 .dynamic_state_management_disable = vega10_disable_dpm_tasks,
f83a9991
EH
4863 .get_num_of_pp_table_entries =
4864 vega10_get_number_of_powerplay_table_entries,
4865 .get_power_state_size = vega10_get_power_state_size,
4866 .get_pp_table_entry = vega10_get_pp_table_entry,
4867 .patch_boot_state = vega10_patch_boot_state,
4868 .apply_state_adjust_rules = vega10_apply_state_adjust_rules,
4869 .power_state_set = vega10_set_power_state_tasks,
4870 .get_sclk = vega10_dpm_get_sclk,
4871 .get_mclk = vega10_dpm_get_mclk,
4872 .notify_smc_display_config_after_ps_adjustment =
4873 vega10_notify_smc_display_config_after_ps_adjustment,
4874 .force_dpm_level = vega10_dpm_force_dpm_level,
f83a9991
EH
4875 .stop_thermal_controller = vega10_thermal_stop_thermal_controller,
4876 .get_fan_speed_info = vega10_fan_ctrl_get_fan_speed_info,
4877 .get_fan_speed_percent = vega10_fan_ctrl_get_fan_speed_percent,
4878 .set_fan_speed_percent = vega10_fan_ctrl_set_fan_speed_percent,
4879 .reset_fan_speed_to_default =
4880 vega10_fan_ctrl_reset_fan_speed_to_default,
4881 .get_fan_speed_rpm = vega10_fan_ctrl_get_fan_speed_rpm,
4882 .set_fan_speed_rpm = vega10_fan_ctrl_set_fan_speed_rpm,
4883 .uninitialize_thermal_controller =
4884 vega10_thermal_ctrl_uninitialize_thermal_controller,
4885 .set_fan_control_mode = vega10_set_fan_control_mode,
4886 .get_fan_control_mode = vega10_get_fan_control_mode,
4887 .read_sensor = vega10_read_sensor,
4888 .get_dal_power_level = vega10_get_dal_power_level,
4889 .get_clock_by_type_with_latency = vega10_get_clock_by_type_with_latency,
4890 .get_clock_by_type_with_voltage = vega10_get_clock_by_type_with_voltage,
4891 .set_watermarks_for_clocks_ranges = vega10_set_watermarks_for_clocks_ranges,
4892 .display_clock_voltage_request = vega10_display_clock_voltage_request,
4893 .force_clock_level = vega10_force_clock_level,
4894 .print_clock_levels = vega10_print_clock_levels,
4895 .display_config_changed = vega10_display_configuration_changed_task,
4896 .powergate_uvd = vega10_power_gate_uvd,
4897 .powergate_vce = vega10_power_gate_vce,
4898 .check_states_equal = vega10_check_states_equal,
4899 .check_smc_update_required_for_display_configuration =
4900 vega10_check_smc_update_required_for_display_configuration,
8b9242ed
RZ
4901 .power_off_asic = vega10_power_off_asic,
4902 .disable_smc_firmware_ctf = vega10_thermal_disable_alert,
dd4e2237
EH
4903 .get_sclk_od = vega10_get_sclk_od,
4904 .set_sclk_od = vega10_set_sclk_od,
4905 .get_mclk_od = vega10_get_mclk_od,
4906 .set_mclk_od = vega10_set_mclk_od,
9d90f0bd 4907 .avfs_control = vega10_avfs_enable,
52afb85e 4908 .notify_cac_buffer_info = vega10_notify_cac_buffer_info,
0a91ee07 4909 .get_thermal_temperature_range = vega10_get_thermal_temperature_range,
4d200372 4910 .register_irq_handlers = smu9_register_irq_handlers,
1ed05ff4 4911 .start_thermal_controller = vega10_start_thermal_controller,
6390258a
RZ
4912 .get_power_profile_mode = vega10_get_power_profile_mode,
4913 .set_power_profile_mode = vega10_set_power_profile_mode,
6ab8555e 4914 .set_power_limit = vega10_set_power_limit,
f83a9991
EH
4915};
4916
c7d30b40
RZ
4917int vega10_enable_smc_features(struct pp_hwmgr *hwmgr,
4918 bool enable, uint32_t feature_mask)
4919{
4920 int msg = enable ? PPSMC_MSG_EnableSmuFeatures :
4921 PPSMC_MSG_DisableSmuFeatures;
4922
4923 return smum_send_msg_to_smc_with_parameter(hwmgr,
4924 msg, feature_mask);
4925}
4926
f83a9991
EH
4927int vega10_hwmgr_init(struct pp_hwmgr *hwmgr)
4928{
4929 hwmgr->hwmgr_func = &vega10_hwmgr_funcs;
4930 hwmgr->pptable_func = &vega10_pptable_funcs;
1ab47204 4931
f83a9991
EH
4932 return 0;
4933}