drm/amd/powerplay: drop unneeded newline
[linux-2.6-block.git] / drivers / gpu / drm / amd / powerplay / hwmgr / smu7_hwmgr.c
CommitLineData
599a7e9f
RZ
1/*
2 * Copyright 2015 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
7bd55429 23#include "pp_debug.h"
f90dee20
MY
24#include <linux/delay.h>
25#include <linux/fb.h>
599a7e9f
RZ
26#include <linux/module.h>
27#include <linux/slab.h>
599a7e9f 28#include <asm/div64.h>
86457c3b 29#include <drm/amdgpu_drm.h>
599a7e9f 30#include "pp_acpi.h"
599a7e9f
RZ
31#include "ppatomctrl.h"
32#include "atombios.h"
33#include "pptable_v1_0.h"
34#include "pppcielanes.h"
35#include "amd_pcie_helpers.h"
36#include "hardwaremanager.h"
37#include "process_pptables_v1_0.h"
38#include "cgs_common.h"
39
40#include "smu7_common.h"
41
42#include "hwmgr.h"
43#include "smu7_hwmgr.h"
e81f7494
HR
44#include "smu7_smumgr.h"
45#include "smu_ucode_xfer_vi.h"
599a7e9f
RZ
46#include "smu7_powertune.h"
47#include "smu7_dyn_defaults.h"
48#include "smu7_thermal.h"
49#include "smu7_clockpowergating.h"
50#include "processpptables.h"
51
52#define MC_CG_ARB_FREQ_F0 0x0a
53#define MC_CG_ARB_FREQ_F1 0x0b
54#define MC_CG_ARB_FREQ_F2 0x0c
55#define MC_CG_ARB_FREQ_F3 0x0d
56
57#define MC_CG_SEQ_DRAMCONF_S0 0x05
58#define MC_CG_SEQ_DRAMCONF_S1 0x06
59#define MC_CG_SEQ_YCLK_SUSPEND 0x04
60#define MC_CG_SEQ_YCLK_RESUME 0x0a
61
62#define SMC_CG_IND_START 0xc0030000
63#define SMC_CG_IND_END 0xc0040000
64
65#define VOLTAGE_SCALE 4
66#define VOLTAGE_VID_OFFSET_SCALE1 625
67#define VOLTAGE_VID_OFFSET_SCALE2 100
68
69#define MEM_FREQ_LOW_LATENCY 25000
70#define MEM_FREQ_HIGH_LATENCY 80000
71
72#define MEM_LATENCY_HIGH 45
73#define MEM_LATENCY_LOW 35
74#define MEM_LATENCY_ERR 0xFFFF
75
76#define MC_SEQ_MISC0_GDDR5_SHIFT 28
77#define MC_SEQ_MISC0_GDDR5_MASK 0xf0000000
78#define MC_SEQ_MISC0_GDDR5_VALUE 5
79
80#define PCIE_BUS_CLK 10000
81#define TCLK (PCIE_BUS_CLK / 10)
82
83
84/** Values for the CG_THERMAL_CTRL::DPM_EVENT_SRC field. */
85enum DPM_EVENT_SRC {
86 DPM_EVENT_SRC_ANALOG = 0,
87 DPM_EVENT_SRC_EXTERNAL = 1,
88 DPM_EVENT_SRC_DIGITAL = 2,
89 DPM_EVENT_SRC_ANALOG_OR_EXTERNAL = 3,
90 DPM_EVENT_SRC_DIGITAL_OR_EXTERNAL = 4
91};
92
35011d39 93static int smu7_avfs_control(struct pp_hwmgr *hwmgr, bool enable);
599a7e9f 94static const unsigned long PhwVIslands_Magic = (unsigned long)(PHM_VIslands_Magic);
570272d2
RZ
95static int smu7_force_clock_level(struct pp_hwmgr *hwmgr,
96 enum pp_clock_type type, uint32_t mask);
599a7e9f 97
f8a4c11b 98static struct smu7_power_state *cast_phw_smu7_power_state(
599a7e9f
RZ
99 struct pp_hw_power_state *hw_ps)
100{
101 PP_ASSERT_WITH_CODE((PhwVIslands_Magic == hw_ps->magic),
102 "Invalid Powerstate Type!",
103 return NULL);
104
105 return (struct smu7_power_state *)hw_ps;
106}
107
f8a4c11b 108static const struct smu7_power_state *cast_const_phw_smu7_power_state(
599a7e9f
RZ
109 const struct pp_hw_power_state *hw_ps)
110{
111 PP_ASSERT_WITH_CODE((PhwVIslands_Magic == hw_ps->magic),
112 "Invalid Powerstate Type!",
113 return NULL);
114
115 return (const struct smu7_power_state *)hw_ps;
116}
117
118/**
119 * Find the MC microcode version and store it in the HwMgr struct
120 *
121 * @param hwmgr the address of the powerplay hardware manager.
122 * @return always 0
123 */
f8a4c11b 124static int smu7_get_mc_microcode_version(struct pp_hwmgr *hwmgr)
599a7e9f
RZ
125{
126 cgs_write_register(hwmgr->device, mmMC_SEQ_IO_DEBUG_INDEX, 0x9F);
127
128 hwmgr->microcode_version_info.MC = cgs_read_register(hwmgr->device, mmMC_SEQ_IO_DEBUG_DATA);
129
130 return 0;
131}
132
f8a4c11b 133static uint16_t smu7_get_current_pcie_speed(struct pp_hwmgr *hwmgr)
599a7e9f
RZ
134{
135 uint32_t speedCntl = 0;
136
137 /* mmPCIE_PORT_INDEX rename as mmPCIE_INDEX */
138 speedCntl = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__PCIE,
139 ixPCIE_LC_SPEED_CNTL);
140 return((uint16_t)PHM_GET_FIELD(speedCntl,
141 PCIE_LC_SPEED_CNTL, LC_CURRENT_DATA_RATE));
142}
143
f8a4c11b 144static int smu7_get_current_pcie_lane_number(struct pp_hwmgr *hwmgr)
599a7e9f
RZ
145{
146 uint32_t link_width;
147
148 /* mmPCIE_PORT_INDEX rename as mmPCIE_INDEX */
149 link_width = PHM_READ_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__PCIE,
150 PCIE_LC_LINK_WIDTH_CNTL, LC_LINK_WIDTH_RD);
151
152 PP_ASSERT_WITH_CODE((7 >= link_width),
153 "Invalid PCIe lane width!", return 0);
154
155 return decode_pcie_lane_width(link_width);
156}
157
158/**
159* Enable voltage control
160*
161* @param pHwMgr the address of the powerplay hardware manager.
162* @return always PP_Result_OK
163*/
f8a4c11b 164static int smu7_enable_smc_voltage_controller(struct pp_hwmgr *hwmgr)
599a7e9f
RZ
165{
166 if (hwmgr->feature_mask & PP_SMC_VOLTAGE_CONTROL_MASK)
d3f8c0ab 167 smum_send_msg_to_smc(hwmgr, PPSMC_MSG_Voltage_Cntl_Enable);
599a7e9f
RZ
168
169 return 0;
170}
171
172/**
173* Checks if we want to support voltage control
174*
175* @param hwmgr the address of the powerplay hardware manager.
176*/
177static bool smu7_voltage_control(const struct pp_hwmgr *hwmgr)
178{
179 const struct smu7_hwmgr *data =
180 (const struct smu7_hwmgr *)(hwmgr->backend);
181
182 return (SMU7_VOLTAGE_CONTROL_NONE != data->voltage_control);
183}
184
185/**
186* Enable voltage control
187*
188* @param hwmgr the address of the powerplay hardware manager.
189* @return always 0
190*/
191static int smu7_enable_voltage_control(struct pp_hwmgr *hwmgr)
192{
193 /* enable voltage control */
194 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
195 GENERAL_PWRMGT, VOLT_PWRMGT_EN, 1);
196
197 return 0;
198}
199
200static int phm_get_svi2_voltage_table_v0(pp_atomctrl_voltage_table *voltage_table,
201 struct phm_clock_voltage_dependency_table *voltage_dependency_table
202 )
203{
204 uint32_t i;
205
206 PP_ASSERT_WITH_CODE((NULL != voltage_table),
207 "Voltage Dependency Table empty.", return -EINVAL;);
208
209 voltage_table->mask_low = 0;
210 voltage_table->phase_delay = 0;
211 voltage_table->count = voltage_dependency_table->count;
212
213 for (i = 0; i < voltage_dependency_table->count; i++) {
214 voltage_table->entries[i].value =
215 voltage_dependency_table->entries[i].v;
216 voltage_table->entries[i].smio_low = 0;
217 }
218
219 return 0;
220}
221
222
223/**
224* Create Voltage Tables.
225*
226* @param hwmgr the address of the powerplay hardware manager.
227* @return always 0
228*/
229static int smu7_construct_voltage_tables(struct pp_hwmgr *hwmgr)
230{
231 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
232 struct phm_ppt_v1_information *table_info =
233 (struct phm_ppt_v1_information *)hwmgr->pptable;
234 int result = 0;
235 uint32_t tmp;
236
237 if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->mvdd_control) {
238 result = atomctrl_get_voltage_table_v3(hwmgr,
239 VOLTAGE_TYPE_MVDDC, VOLTAGE_OBJ_GPIO_LUT,
240 &(data->mvdd_voltage_table));
241 PP_ASSERT_WITH_CODE((0 == result),
242 "Failed to retrieve MVDD table.",
243 return result);
244 } else if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->mvdd_control) {
245 if (hwmgr->pp_table_version == PP_TABLE_V1)
246 result = phm_get_svi2_mvdd_voltage_table(&(data->mvdd_voltage_table),
247 table_info->vdd_dep_on_mclk);
248 else if (hwmgr->pp_table_version == PP_TABLE_V0)
249 result = phm_get_svi2_voltage_table_v0(&(data->mvdd_voltage_table),
250 hwmgr->dyn_state.mvdd_dependency_on_mclk);
251
252 PP_ASSERT_WITH_CODE((0 == result),
253 "Failed to retrieve SVI2 MVDD table from dependancy table.",
254 return result;);
255 }
256
257 if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->vddci_control) {
258 result = atomctrl_get_voltage_table_v3(hwmgr,
259 VOLTAGE_TYPE_VDDCI, VOLTAGE_OBJ_GPIO_LUT,
260 &(data->vddci_voltage_table));
261 PP_ASSERT_WITH_CODE((0 == result),
262 "Failed to retrieve VDDCI table.",
263 return result);
264 } else if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->vddci_control) {
265 if (hwmgr->pp_table_version == PP_TABLE_V1)
266 result = phm_get_svi2_vddci_voltage_table(&(data->vddci_voltage_table),
267 table_info->vdd_dep_on_mclk);
268 else if (hwmgr->pp_table_version == PP_TABLE_V0)
269 result = phm_get_svi2_voltage_table_v0(&(data->vddci_voltage_table),
270 hwmgr->dyn_state.vddci_dependency_on_mclk);
271 PP_ASSERT_WITH_CODE((0 == result),
272 "Failed to retrieve SVI2 VDDCI table from dependancy table.",
273 return result);
274 }
275
276 if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->vdd_gfx_control) {
277 /* VDDGFX has only SVI2 voltage control */
278 result = phm_get_svi2_vdd_voltage_table(&(data->vddgfx_voltage_table),
279 table_info->vddgfx_lookup_table);
280 PP_ASSERT_WITH_CODE((0 == result),
281 "Failed to retrieve SVI2 VDDGFX table from lookup table.", return result;);
282 }
283
284
285 if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->voltage_control) {
286 result = atomctrl_get_voltage_table_v3(hwmgr,
287 VOLTAGE_TYPE_VDDC, VOLTAGE_OBJ_GPIO_LUT,
288 &data->vddc_voltage_table);
289 PP_ASSERT_WITH_CODE((0 == result),
290 "Failed to retrieve VDDC table.", return result;);
291 } else if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->voltage_control) {
292
293 if (hwmgr->pp_table_version == PP_TABLE_V0)
294 result = phm_get_svi2_voltage_table_v0(&data->vddc_voltage_table,
295 hwmgr->dyn_state.vddc_dependency_on_mclk);
296 else if (hwmgr->pp_table_version == PP_TABLE_V1)
297 result = phm_get_svi2_vdd_voltage_table(&(data->vddc_voltage_table),
298 table_info->vddc_lookup_table);
299
300 PP_ASSERT_WITH_CODE((0 == result),
301 "Failed to retrieve SVI2 VDDC table from dependancy table.", return result;);
302 }
303
d3f8c0ab 304 tmp = smum_get_mac_definition(hwmgr, SMU_MAX_LEVELS_VDDC);
599a7e9f
RZ
305 PP_ASSERT_WITH_CODE(
306 (data->vddc_voltage_table.count <= tmp),
307 "Too many voltage values for VDDC. Trimming to fit state table.",
308 phm_trim_voltage_table_to_fit_state_table(tmp,
309 &(data->vddc_voltage_table)));
310
d3f8c0ab 311 tmp = smum_get_mac_definition(hwmgr, SMU_MAX_LEVELS_VDDGFX);
599a7e9f
RZ
312 PP_ASSERT_WITH_CODE(
313 (data->vddgfx_voltage_table.count <= tmp),
314 "Too many voltage values for VDDC. Trimming to fit state table.",
315 phm_trim_voltage_table_to_fit_state_table(tmp,
316 &(data->vddgfx_voltage_table)));
317
d3f8c0ab 318 tmp = smum_get_mac_definition(hwmgr, SMU_MAX_LEVELS_VDDCI);
599a7e9f
RZ
319 PP_ASSERT_WITH_CODE(
320 (data->vddci_voltage_table.count <= tmp),
321 "Too many voltage values for VDDCI. Trimming to fit state table.",
322 phm_trim_voltage_table_to_fit_state_table(tmp,
323 &(data->vddci_voltage_table)));
324
d3f8c0ab 325 tmp = smum_get_mac_definition(hwmgr, SMU_MAX_LEVELS_MVDD);
599a7e9f
RZ
326 PP_ASSERT_WITH_CODE(
327 (data->mvdd_voltage_table.count <= tmp),
328 "Too many voltage values for MVDD. Trimming to fit state table.",
329 phm_trim_voltage_table_to_fit_state_table(tmp,
330 &(data->mvdd_voltage_table)));
331
332 return 0;
333}
334
335/**
336* Programs static screed detection parameters
337*
338* @param hwmgr the address of the powerplay hardware manager.
339* @return always 0
340*/
341static int smu7_program_static_screen_threshold_parameters(
342 struct pp_hwmgr *hwmgr)
343{
344 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
345
346 /* Set static screen threshold unit */
347 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
348 CG_STATIC_SCREEN_PARAMETER, STATIC_SCREEN_THRESHOLD_UNIT,
349 data->static_screen_threshold_unit);
350 /* Set static screen threshold */
351 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
352 CG_STATIC_SCREEN_PARAMETER, STATIC_SCREEN_THRESHOLD,
353 data->static_screen_threshold);
354
355 return 0;
356}
357
358/**
359* Setup display gap for glitch free memory clock switching.
360*
361* @param hwmgr the address of the powerplay hardware manager.
362* @return always 0
363*/
364static int smu7_enable_display_gap(struct pp_hwmgr *hwmgr)
365{
366 uint32_t display_gap =
367 cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC,
368 ixCG_DISPLAY_GAP_CNTL);
369
370 display_gap = PHM_SET_FIELD(display_gap, CG_DISPLAY_GAP_CNTL,
371 DISP_GAP, DISPLAY_GAP_IGNORE);
372
373 display_gap = PHM_SET_FIELD(display_gap, CG_DISPLAY_GAP_CNTL,
374 DISP_GAP_MCHG, DISPLAY_GAP_VBLANK);
375
376 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
377 ixCG_DISPLAY_GAP_CNTL, display_gap);
378
379 return 0;
380}
381
382/**
383* Programs activity state transition voting clients
384*
385* @param hwmgr the address of the powerplay hardware manager.
386* @return always 0
387*/
388static int smu7_program_voting_clients(struct pp_hwmgr *hwmgr)
389{
390 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
0596df6b 391 int i;
599a7e9f
RZ
392
393 /* Clear reset for voting clients before enabling DPM */
394 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
395 SCLK_PWRMGT_CNTL, RESET_SCLK_CNT, 0);
396 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
397 SCLK_PWRMGT_CNTL, RESET_BUSY_CNT, 0);
398
0596df6b
RZ
399 for (i = 0; i < 8; i++)
400 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
401 ixCG_FREQ_TRAN_VOTING_0 + i * 4,
402 data->voting_rights_clients[i]);
599a7e9f
RZ
403 return 0;
404}
405
406static int smu7_clear_voting_clients(struct pp_hwmgr *hwmgr)
407{
0596df6b
RZ
408 int i;
409
599a7e9f
RZ
410 /* Reset voting clients before disabling DPM */
411 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
412 SCLK_PWRMGT_CNTL, RESET_SCLK_CNT, 1);
413 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
414 SCLK_PWRMGT_CNTL, RESET_BUSY_CNT, 1);
415
0596df6b
RZ
416 for (i = 0; i < 8; i++)
417 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
418 ixCG_FREQ_TRAN_VOTING_0 + i * 4, 0);
599a7e9f
RZ
419
420 return 0;
421}
422
423/* Copy one arb setting to another and then switch the active set.
424 * arb_src and arb_dest is one of the MC_CG_ARB_FREQ_Fx constants.
425 */
426static int smu7_copy_and_switch_arb_sets(struct pp_hwmgr *hwmgr,
427 uint32_t arb_src, uint32_t arb_dest)
428{
429 uint32_t mc_arb_dram_timing;
430 uint32_t mc_arb_dram_timing2;
431 uint32_t burst_time;
432 uint32_t mc_cg_config;
433
434 switch (arb_src) {
435 case MC_CG_ARB_FREQ_F0:
436 mc_arb_dram_timing = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING);
437 mc_arb_dram_timing2 = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING2);
438 burst_time = PHM_READ_FIELD(hwmgr->device, MC_ARB_BURST_TIME, STATE0);
439 break;
440 case MC_CG_ARB_FREQ_F1:
441 mc_arb_dram_timing = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING_1);
442 mc_arb_dram_timing2 = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING2_1);
443 burst_time = PHM_READ_FIELD(hwmgr->device, MC_ARB_BURST_TIME, STATE1);
444 break;
445 default:
446 return -EINVAL;
447 }
448
449 switch (arb_dest) {
450 case MC_CG_ARB_FREQ_F0:
451 cgs_write_register(hwmgr->device, mmMC_ARB_DRAM_TIMING, mc_arb_dram_timing);
452 cgs_write_register(hwmgr->device, mmMC_ARB_DRAM_TIMING2, mc_arb_dram_timing2);
453 PHM_WRITE_FIELD(hwmgr->device, MC_ARB_BURST_TIME, STATE0, burst_time);
454 break;
455 case MC_CG_ARB_FREQ_F1:
456 cgs_write_register(hwmgr->device, mmMC_ARB_DRAM_TIMING_1, mc_arb_dram_timing);
457 cgs_write_register(hwmgr->device, mmMC_ARB_DRAM_TIMING2_1, mc_arb_dram_timing2);
458 PHM_WRITE_FIELD(hwmgr->device, MC_ARB_BURST_TIME, STATE1, burst_time);
459 break;
460 default:
461 return -EINVAL;
462 }
463
464 mc_cg_config = cgs_read_register(hwmgr->device, mmMC_CG_CONFIG);
465 mc_cg_config |= 0x0000000F;
466 cgs_write_register(hwmgr->device, mmMC_CG_CONFIG, mc_cg_config);
467 PHM_WRITE_FIELD(hwmgr->device, MC_ARB_CG, CG_ARB_REQ, arb_dest);
468
469 return 0;
470}
471
472static int smu7_reset_to_default(struct pp_hwmgr *hwmgr)
473{
d3f8c0ab 474 return smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ResetToDefaults);
599a7e9f
RZ
475}
476
477/**
478* Initial switch from ARB F0->F1
479*
480* @param hwmgr the address of the powerplay hardware manager.
481* @return always 0
482* This function is to be called from the SetPowerState table.
483*/
484static int smu7_initial_switch_from_arbf0_to_f1(struct pp_hwmgr *hwmgr)
485{
486 return smu7_copy_and_switch_arb_sets(hwmgr,
487 MC_CG_ARB_FREQ_F0, MC_CG_ARB_FREQ_F1);
488}
489
490static int smu7_force_switch_to_arbf0(struct pp_hwmgr *hwmgr)
491{
492 uint32_t tmp;
493
494 tmp = (cgs_read_ind_register(hwmgr->device,
495 CGS_IND_REG__SMC, ixSMC_SCRATCH9) &
496 0x0000ff00) >> 8;
497
498 if (tmp == MC_CG_ARB_FREQ_F0)
499 return 0;
500
501 return smu7_copy_and_switch_arb_sets(hwmgr,
502 tmp, MC_CG_ARB_FREQ_F0);
503}
504
505static int smu7_setup_default_pcie_table(struct pp_hwmgr *hwmgr)
506{
507 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
508
509 struct phm_ppt_v1_information *table_info =
510 (struct phm_ppt_v1_information *)(hwmgr->pptable);
511 struct phm_ppt_v1_pcie_table *pcie_table = NULL;
512
513 uint32_t i, max_entry;
514 uint32_t tmp;
515
516 PP_ASSERT_WITH_CODE((data->use_pcie_performance_levels ||
517 data->use_pcie_power_saving_levels), "No pcie performance levels!",
518 return -EINVAL);
519
520 if (table_info != NULL)
521 pcie_table = table_info->pcie_table;
522
523 if (data->use_pcie_performance_levels &&
524 !data->use_pcie_power_saving_levels) {
525 data->pcie_gen_power_saving = data->pcie_gen_performance;
526 data->pcie_lane_power_saving = data->pcie_lane_performance;
527 } else if (!data->use_pcie_performance_levels &&
528 data->use_pcie_power_saving_levels) {
529 data->pcie_gen_performance = data->pcie_gen_power_saving;
530 data->pcie_lane_performance = data->pcie_lane_power_saving;
531 }
d3f8c0ab 532 tmp = smum_get_mac_definition(hwmgr, SMU_MAX_LEVELS_LINK);
599a7e9f
RZ
533 phm_reset_single_dpm_table(&data->dpm_table.pcie_speed_table,
534 tmp,
535 MAX_REGULAR_DPM_NUMBER);
536
537 if (pcie_table != NULL) {
538 /* max_entry is used to make sure we reserve one PCIE level
539 * for boot level (fix for A+A PSPP issue).
540 * If PCIE table from PPTable have ULV entry + 8 entries,
541 * then ignore the last entry.*/
542 max_entry = (tmp < pcie_table->count) ? tmp : pcie_table->count;
543 for (i = 1; i < max_entry; i++) {
544 phm_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, i - 1,
545 get_pcie_gen_support(data->pcie_gen_cap,
546 pcie_table->entries[i].gen_speed),
547 get_pcie_lane_support(data->pcie_lane_cap,
548 pcie_table->entries[i].lane_width));
549 }
550 data->dpm_table.pcie_speed_table.count = max_entry - 1;
551 smum_update_smc_table(hwmgr, SMU_BIF_TABLE);
552 } else {
553 /* Hardcode Pcie Table */
554 phm_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, 0,
555 get_pcie_gen_support(data->pcie_gen_cap,
556 PP_Min_PCIEGen),
557 get_pcie_lane_support(data->pcie_lane_cap,
558 PP_Max_PCIELane));
559 phm_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, 1,
560 get_pcie_gen_support(data->pcie_gen_cap,
561 PP_Min_PCIEGen),
562 get_pcie_lane_support(data->pcie_lane_cap,
563 PP_Max_PCIELane));
564 phm_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, 2,
565 get_pcie_gen_support(data->pcie_gen_cap,
566 PP_Max_PCIEGen),
567 get_pcie_lane_support(data->pcie_lane_cap,
568 PP_Max_PCIELane));
569 phm_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, 3,
570 get_pcie_gen_support(data->pcie_gen_cap,
571 PP_Max_PCIEGen),
572 get_pcie_lane_support(data->pcie_lane_cap,
573 PP_Max_PCIELane));
574 phm_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, 4,
575 get_pcie_gen_support(data->pcie_gen_cap,
576 PP_Max_PCIEGen),
577 get_pcie_lane_support(data->pcie_lane_cap,
578 PP_Max_PCIELane));
579 phm_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, 5,
580 get_pcie_gen_support(data->pcie_gen_cap,
581 PP_Max_PCIEGen),
582 get_pcie_lane_support(data->pcie_lane_cap,
583 PP_Max_PCIELane));
584
585 data->dpm_table.pcie_speed_table.count = 6;
586 }
587 /* Populate last level for boot PCIE level, but do not increment count. */
86457c3b
RZ
588 if (hwmgr->chip_family == AMDGPU_FAMILY_CI) {
589 for (i = 0; i <= data->dpm_table.pcie_speed_table.count; i++)
590 phm_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, i,
591 get_pcie_gen_support(data->pcie_gen_cap,
592 PP_Max_PCIEGen),
593 data->vbios_boot_state.pcie_lane_bootup_value);
594 } else {
595 phm_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table,
599a7e9f
RZ
596 data->dpm_table.pcie_speed_table.count,
597 get_pcie_gen_support(data->pcie_gen_cap,
598 PP_Min_PCIEGen),
599 get_pcie_lane_support(data->pcie_lane_cap,
600 PP_Max_PCIELane));
86457c3b 601 }
599a7e9f
RZ
602 return 0;
603}
604
605static int smu7_reset_dpm_tables(struct pp_hwmgr *hwmgr)
606{
607 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
608
609 memset(&(data->dpm_table), 0x00, sizeof(data->dpm_table));
610
611 phm_reset_single_dpm_table(
612 &data->dpm_table.sclk_table,
d3f8c0ab 613 smum_get_mac_definition(hwmgr,
599a7e9f
RZ
614 SMU_MAX_LEVELS_GRAPHICS),
615 MAX_REGULAR_DPM_NUMBER);
616 phm_reset_single_dpm_table(
617 &data->dpm_table.mclk_table,
d3f8c0ab 618 smum_get_mac_definition(hwmgr,
599a7e9f
RZ
619 SMU_MAX_LEVELS_MEMORY), MAX_REGULAR_DPM_NUMBER);
620
621 phm_reset_single_dpm_table(
622 &data->dpm_table.vddc_table,
d3f8c0ab 623 smum_get_mac_definition(hwmgr,
599a7e9f
RZ
624 SMU_MAX_LEVELS_VDDC),
625 MAX_REGULAR_DPM_NUMBER);
626 phm_reset_single_dpm_table(
627 &data->dpm_table.vddci_table,
d3f8c0ab 628 smum_get_mac_definition(hwmgr,
599a7e9f
RZ
629 SMU_MAX_LEVELS_VDDCI), MAX_REGULAR_DPM_NUMBER);
630
631 phm_reset_single_dpm_table(
632 &data->dpm_table.mvdd_table,
d3f8c0ab 633 smum_get_mac_definition(hwmgr,
599a7e9f
RZ
634 SMU_MAX_LEVELS_MVDD),
635 MAX_REGULAR_DPM_NUMBER);
636 return 0;
637}
638/*
639 * This function is to initialize all DPM state tables
640 * for SMU7 based on the dependency table.
641 * Dynamic state patching function will then trim these
642 * state tables to the allowed range based
643 * on the power policy or external client requests,
644 * such as UVD request, etc.
645 */
646
647static int smu7_setup_dpm_tables_v0(struct pp_hwmgr *hwmgr)
648{
649 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
650 struct phm_clock_voltage_dependency_table *allowed_vdd_sclk_table =
651 hwmgr->dyn_state.vddc_dependency_on_sclk;
652 struct phm_clock_voltage_dependency_table *allowed_vdd_mclk_table =
653 hwmgr->dyn_state.vddc_dependency_on_mclk;
654 struct phm_cac_leakage_table *std_voltage_table =
655 hwmgr->dyn_state.cac_leakage_table;
656 uint32_t i;
657
658 PP_ASSERT_WITH_CODE(allowed_vdd_sclk_table != NULL,
659 "SCLK dependency table is missing. This table is mandatory", return -EINVAL);
660 PP_ASSERT_WITH_CODE(allowed_vdd_sclk_table->count >= 1,
661 "SCLK dependency table has to have is missing. This table is mandatory", return -EINVAL);
662
663 PP_ASSERT_WITH_CODE(allowed_vdd_mclk_table != NULL,
664 "MCLK dependency table is missing. This table is mandatory", return -EINVAL);
665 PP_ASSERT_WITH_CODE(allowed_vdd_mclk_table->count >= 1,
666 "VMCLK dependency table has to have is missing. This table is mandatory", return -EINVAL);
667
668
669 /* Initialize Sclk DPM table based on allow Sclk values*/
670 data->dpm_table.sclk_table.count = 0;
671
672 for (i = 0; i < allowed_vdd_sclk_table->count; i++) {
673 if (i == 0 || data->dpm_table.sclk_table.dpm_levels[data->dpm_table.sclk_table.count-1].value !=
674 allowed_vdd_sclk_table->entries[i].clk) {
675 data->dpm_table.sclk_table.dpm_levels[data->dpm_table.sclk_table.count].value =
676 allowed_vdd_sclk_table->entries[i].clk;
86457c3b 677 data->dpm_table.sclk_table.dpm_levels[data->dpm_table.sclk_table.count].enabled = (i == 0) ? 1 : 0;
599a7e9f
RZ
678 data->dpm_table.sclk_table.count++;
679 }
680 }
681
682 PP_ASSERT_WITH_CODE(allowed_vdd_mclk_table != NULL,
683 "MCLK dependency table is missing. This table is mandatory", return -EINVAL);
684 /* Initialize Mclk DPM table based on allow Mclk values */
685 data->dpm_table.mclk_table.count = 0;
686 for (i = 0; i < allowed_vdd_mclk_table->count; i++) {
687 if (i == 0 || data->dpm_table.mclk_table.dpm_levels[data->dpm_table.mclk_table.count-1].value !=
688 allowed_vdd_mclk_table->entries[i].clk) {
689 data->dpm_table.mclk_table.dpm_levels[data->dpm_table.mclk_table.count].value =
690 allowed_vdd_mclk_table->entries[i].clk;
86457c3b 691 data->dpm_table.mclk_table.dpm_levels[data->dpm_table.mclk_table.count].enabled = (i == 0) ? 1 : 0;
599a7e9f
RZ
692 data->dpm_table.mclk_table.count++;
693 }
694 }
695
696 /* Initialize Vddc DPM table based on allow Vddc values. And populate corresponding std values. */
697 for (i = 0; i < allowed_vdd_sclk_table->count; i++) {
698 data->dpm_table.vddc_table.dpm_levels[i].value = allowed_vdd_mclk_table->entries[i].v;
699 data->dpm_table.vddc_table.dpm_levels[i].param1 = std_voltage_table->entries[i].Leakage;
700 /* param1 is for corresponding std voltage */
701 data->dpm_table.vddc_table.dpm_levels[i].enabled = 1;
702 }
703
704 data->dpm_table.vddc_table.count = allowed_vdd_sclk_table->count;
705 allowed_vdd_mclk_table = hwmgr->dyn_state.vddci_dependency_on_mclk;
706
707 if (NULL != allowed_vdd_mclk_table) {
708 /* Initialize Vddci DPM table based on allow Mclk values */
709 for (i = 0; i < allowed_vdd_mclk_table->count; i++) {
710 data->dpm_table.vddci_table.dpm_levels[i].value = allowed_vdd_mclk_table->entries[i].v;
711 data->dpm_table.vddci_table.dpm_levels[i].enabled = 1;
712 }
713 data->dpm_table.vddci_table.count = allowed_vdd_mclk_table->count;
714 }
715
716 allowed_vdd_mclk_table = hwmgr->dyn_state.mvdd_dependency_on_mclk;
717
718 if (NULL != allowed_vdd_mclk_table) {
719 /*
720 * Initialize MVDD DPM table based on allow Mclk
721 * values
722 */
723 for (i = 0; i < allowed_vdd_mclk_table->count; i++) {
724 data->dpm_table.mvdd_table.dpm_levels[i].value = allowed_vdd_mclk_table->entries[i].v;
725 data->dpm_table.mvdd_table.dpm_levels[i].enabled = 1;
726 }
727 data->dpm_table.mvdd_table.count = allowed_vdd_mclk_table->count;
728 }
729
730 return 0;
731}
732
733static int smu7_setup_dpm_tables_v1(struct pp_hwmgr *hwmgr)
734{
735 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
736 struct phm_ppt_v1_information *table_info =
737 (struct phm_ppt_v1_information *)(hwmgr->pptable);
738 uint32_t i;
739
740 struct phm_ppt_v1_clock_voltage_dependency_table *dep_sclk_table;
741 struct phm_ppt_v1_clock_voltage_dependency_table *dep_mclk_table;
742
743 if (table_info == NULL)
744 return -EINVAL;
745
746 dep_sclk_table = table_info->vdd_dep_on_sclk;
747 dep_mclk_table = table_info->vdd_dep_on_mclk;
748
749 PP_ASSERT_WITH_CODE(dep_sclk_table != NULL,
750 "SCLK dependency table is missing.",
751 return -EINVAL);
752 PP_ASSERT_WITH_CODE(dep_sclk_table->count >= 1,
753 "SCLK dependency table count is 0.",
754 return -EINVAL);
755
756 PP_ASSERT_WITH_CODE(dep_mclk_table != NULL,
757 "MCLK dependency table is missing.",
758 return -EINVAL);
759 PP_ASSERT_WITH_CODE(dep_mclk_table->count >= 1,
760 "MCLK dependency table count is 0",
761 return -EINVAL);
762
763 /* Initialize Sclk DPM table based on allow Sclk values */
764 data->dpm_table.sclk_table.count = 0;
765 for (i = 0; i < dep_sclk_table->count; i++) {
766 if (i == 0 || data->dpm_table.sclk_table.dpm_levels[data->dpm_table.sclk_table.count - 1].value !=
767 dep_sclk_table->entries[i].clk) {
768
769 data->dpm_table.sclk_table.dpm_levels[data->dpm_table.sclk_table.count].value =
770 dep_sclk_table->entries[i].clk;
771
772 data->dpm_table.sclk_table.dpm_levels[data->dpm_table.sclk_table.count].enabled =
773 (i == 0) ? true : false;
774 data->dpm_table.sclk_table.count++;
775 }
776 }
777
778 /* Initialize Mclk DPM table based on allow Mclk values */
779 data->dpm_table.mclk_table.count = 0;
780 for (i = 0; i < dep_mclk_table->count; i++) {
781 if (i == 0 || data->dpm_table.mclk_table.dpm_levels
782 [data->dpm_table.mclk_table.count - 1].value !=
783 dep_mclk_table->entries[i].clk) {
784 data->dpm_table.mclk_table.dpm_levels[data->dpm_table.mclk_table.count].value =
785 dep_mclk_table->entries[i].clk;
786 data->dpm_table.mclk_table.dpm_levels[data->dpm_table.mclk_table.count].enabled =
787 (i == 0) ? true : false;
788 data->dpm_table.mclk_table.count++;
789 }
790 }
791
792 return 0;
793}
794
f8a4c11b 795static int smu7_setup_default_dpm_tables(struct pp_hwmgr *hwmgr)
599a7e9f
RZ
796{
797 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
798
799 smu7_reset_dpm_tables(hwmgr);
800
801 if (hwmgr->pp_table_version == PP_TABLE_V1)
802 smu7_setup_dpm_tables_v1(hwmgr);
803 else if (hwmgr->pp_table_version == PP_TABLE_V0)
804 smu7_setup_dpm_tables_v0(hwmgr);
805
806 smu7_setup_default_pcie_table(hwmgr);
807
808 /* save a copy of the default DPM table */
809 memcpy(&(data->golden_dpm_table), &(data->dpm_table),
810 sizeof(struct smu7_dpm_table));
811 return 0;
812}
813
814uint32_t smu7_get_xclk(struct pp_hwmgr *hwmgr)
815{
816 uint32_t reference_clock, tmp;
817 struct cgs_display_info info = {0};
8b95f4f7 818 struct cgs_mode_info mode_info = {0};
599a7e9f
RZ
819
820 info.mode_info = &mode_info;
821
822 tmp = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_CLKPIN_CNTL_2, MUX_TCLK_TO_XCLK);
823
824 if (tmp)
825 return TCLK;
826
827 cgs_get_active_displays_info(hwmgr->device, &info);
828 reference_clock = mode_info.ref_clock;
829
830 tmp = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_CLKPIN_CNTL, XTALIN_DIVIDE);
831
832 if (0 != tmp)
833 return reference_clock / 4;
834
835 return reference_clock;
836}
837
838static int smu7_enable_vrhot_gpio_interrupt(struct pp_hwmgr *hwmgr)
839{
840
841 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
842 PHM_PlatformCaps_RegulatorHot))
d3f8c0ab 843 return smum_send_msg_to_smc(hwmgr,
599a7e9f
RZ
844 PPSMC_MSG_EnableVRHotGPIOInterrupt);
845
846 return 0;
847}
848
849static int smu7_enable_sclk_control(struct pp_hwmgr *hwmgr)
850{
851 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SCLK_PWRMGT_CNTL,
852 SCLK_PWRMGT_OFF, 0);
853 return 0;
854}
855
856static int smu7_enable_ulv(struct pp_hwmgr *hwmgr)
857{
858 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
859
860 if (data->ulv_supported)
d3f8c0ab 861 return smum_send_msg_to_smc(hwmgr, PPSMC_MSG_EnableULV);
599a7e9f
RZ
862
863 return 0;
864}
865
866static int smu7_disable_ulv(struct pp_hwmgr *hwmgr)
867{
868 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
869
870 if (data->ulv_supported)
d3f8c0ab 871 return smum_send_msg_to_smc(hwmgr, PPSMC_MSG_DisableULV);
599a7e9f
RZ
872
873 return 0;
874}
875
876static int smu7_enable_deep_sleep_master_switch(struct pp_hwmgr *hwmgr)
877{
878 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
879 PHM_PlatformCaps_SclkDeepSleep)) {
d3f8c0ab 880 if (smum_send_msg_to_smc(hwmgr, PPSMC_MSG_MASTER_DeepSleep_ON))
599a7e9f
RZ
881 PP_ASSERT_WITH_CODE(false,
882 "Attempt to enable Master Deep Sleep switch failed!",
883 return -EINVAL);
884 } else {
d3f8c0ab 885 if (smum_send_msg_to_smc(hwmgr,
599a7e9f
RZ
886 PPSMC_MSG_MASTER_DeepSleep_OFF)) {
887 PP_ASSERT_WITH_CODE(false,
888 "Attempt to disable Master Deep Sleep switch failed!",
889 return -EINVAL);
890 }
891 }
892
893 return 0;
894}
895
896static int smu7_disable_deep_sleep_master_switch(struct pp_hwmgr *hwmgr)
897{
898 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
899 PHM_PlatformCaps_SclkDeepSleep)) {
d3f8c0ab 900 if (smum_send_msg_to_smc(hwmgr,
599a7e9f
RZ
901 PPSMC_MSG_MASTER_DeepSleep_OFF)) {
902 PP_ASSERT_WITH_CODE(false,
903 "Attempt to disable Master Deep Sleep switch failed!",
904 return -EINVAL);
905 }
906 }
907
908 return 0;
909}
910
911static int smu7_disable_handshake_uvd(struct pp_hwmgr *hwmgr)
912{
913 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
914 uint32_t soft_register_value = 0;
915 uint32_t handshake_disables_offset = data->soft_regs_start
d3f8c0ab 916 + smum_get_offsetof(hwmgr,
599a7e9f
RZ
917 SMU_SoftRegisters, HandshakeDisables);
918
919 soft_register_value = cgs_read_ind_register(hwmgr->device,
920 CGS_IND_REG__SMC, handshake_disables_offset);
d3f8c0ab 921 soft_register_value |= smum_get_mac_definition(hwmgr,
599a7e9f
RZ
922 SMU_UVD_MCLK_HANDSHAKE_DISABLE);
923 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
924 handshake_disables_offset, soft_register_value);
925 return 0;
926}
927
928static int smu7_enable_sclk_mclk_dpm(struct pp_hwmgr *hwmgr)
929{
930 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
931
932 /* enable SCLK dpm */
933 if (!data->sclk_dpm_key_disabled)
934 PP_ASSERT_WITH_CODE(
d3f8c0ab 935 (0 == smum_send_msg_to_smc(hwmgr, PPSMC_MSG_DPM_Enable)),
599a7e9f
RZ
936 "Failed to enable SCLK DPM during DPM Start Function!",
937 return -EINVAL);
938
939 /* enable MCLK dpm */
940 if (0 == data->mclk_dpm_key_disabled) {
941 if (!(hwmgr->feature_mask & PP_UVD_HANDSHAKE_MASK))
942 smu7_disable_handshake_uvd(hwmgr);
943 PP_ASSERT_WITH_CODE(
d3f8c0ab 944 (0 == smum_send_msg_to_smc(hwmgr,
599a7e9f
RZ
945 PPSMC_MSG_MCLKDPM_Enable)),
946 "Failed to enable MCLK DPM during DPM Start Function!",
947 return -EINVAL);
948
949 PHM_WRITE_FIELD(hwmgr->device, MC_SEQ_CNTL_3, CAC_EN, 0x1);
950
86457c3b
RZ
951
952 if (hwmgr->chip_family == AMDGPU_FAMILY_CI) {
953 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, 0xc0400d30, 0x5);
954 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, 0xc0400d3c, 0x5);
955 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, 0xc0400d80, 0x100005);
956 udelay(10);
957 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, 0xc0400d30, 0x400005);
958 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, 0xc0400d3c, 0x400005);
959 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, 0xc0400d80, 0x500005);
960 } else {
961 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixLCAC_MC0_CNTL, 0x5);
962 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixLCAC_MC1_CNTL, 0x5);
963 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixLCAC_CPL_CNTL, 0x100005);
964 udelay(10);
965 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixLCAC_MC0_CNTL, 0x400005);
966 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixLCAC_MC1_CNTL, 0x400005);
967 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixLCAC_CPL_CNTL, 0x500005);
968 }
599a7e9f
RZ
969 }
970
971 return 0;
972}
973
974static int smu7_start_dpm(struct pp_hwmgr *hwmgr)
975{
976 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
977
978 /*enable general power management */
979
980 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, GENERAL_PWRMGT,
981 GLOBAL_PWRMGT_EN, 1);
982
983 /* enable sclk deep sleep */
984
985 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SCLK_PWRMGT_CNTL,
986 DYNAMIC_PM_EN, 1);
987
988 /* prepare for PCIE DPM */
989
990 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
991 data->soft_regs_start +
d3f8c0ab 992 smum_get_offsetof(hwmgr, SMU_SoftRegisters,
599a7e9f
RZ
993 VoltageChangeTimeout), 0x1000);
994 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__PCIE,
995 SWRST_COMMAND_1, RESETLC, 0x0);
996
86457c3b
RZ
997 if (hwmgr->chip_family == AMDGPU_FAMILY_CI)
998 cgs_write_register(hwmgr->device, 0x1488,
999 (cgs_read_register(hwmgr->device, 0x1488) & ~0x1));
1000
599a7e9f 1001 if (smu7_enable_sclk_mclk_dpm(hwmgr)) {
b5c11b8e 1002 pr_err("Failed to enable Sclk DPM and Mclk DPM!");
599a7e9f
RZ
1003 return -EINVAL;
1004 }
1005
1006 /* enable PCIE dpm */
1007 if (0 == data->pcie_dpm_key_disabled) {
1008 PP_ASSERT_WITH_CODE(
d3f8c0ab 1009 (0 == smum_send_msg_to_smc(hwmgr,
599a7e9f
RZ
1010 PPSMC_MSG_PCIeDPM_Enable)),
1011 "Failed to enable pcie DPM during DPM Start Function!",
1012 return -EINVAL);
1013 }
1014
1015 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
1016 PHM_PlatformCaps_Falcon_QuickTransition)) {
d3f8c0ab 1017 PP_ASSERT_WITH_CODE((0 == smum_send_msg_to_smc(hwmgr,
599a7e9f
RZ
1018 PPSMC_MSG_EnableACDCGPIOInterrupt)),
1019 "Failed to enable AC DC GPIO Interrupt!",
1020 );
1021 }
1022
1023 return 0;
1024}
1025
1026static int smu7_disable_sclk_mclk_dpm(struct pp_hwmgr *hwmgr)
1027{
1028 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1029
1030 /* disable SCLK dpm */
f28a9b65
RZ
1031 if (!data->sclk_dpm_key_disabled) {
1032 PP_ASSERT_WITH_CODE(true == smum_is_dpm_running(hwmgr),
1033 "Trying to disable SCLK DPM when DPM is disabled",
1034 return 0);
d3f8c0ab 1035 smum_send_msg_to_smc(hwmgr, PPSMC_MSG_DPM_Disable);
f28a9b65 1036 }
599a7e9f
RZ
1037
1038 /* disable MCLK dpm */
1039 if (!data->mclk_dpm_key_disabled) {
f28a9b65
RZ
1040 PP_ASSERT_WITH_CODE(true == smum_is_dpm_running(hwmgr),
1041 "Trying to disable MCLK DPM when DPM is disabled",
1042 return 0);
d3f8c0ab 1043 smum_send_msg_to_smc(hwmgr, PPSMC_MSG_MCLKDPM_Disable);
599a7e9f
RZ
1044 }
1045
1046 return 0;
1047}
1048
1049static int smu7_stop_dpm(struct pp_hwmgr *hwmgr)
1050{
1051 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1052
1053 /* disable general power management */
1054 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, GENERAL_PWRMGT,
1055 GLOBAL_PWRMGT_EN, 0);
1056 /* disable sclk deep sleep */
1057 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SCLK_PWRMGT_CNTL,
1058 DYNAMIC_PM_EN, 0);
1059
1060 /* disable PCIE dpm */
1061 if (!data->pcie_dpm_key_disabled) {
1062 PP_ASSERT_WITH_CODE(
d3f8c0ab 1063 (smum_send_msg_to_smc(hwmgr,
599a7e9f
RZ
1064 PPSMC_MSG_PCIeDPM_Disable) == 0),
1065 "Failed to disable pcie DPM during DPM Stop Function!",
1066 return -EINVAL);
1067 }
1068
f28a9b65
RZ
1069 smu7_disable_sclk_mclk_dpm(hwmgr);
1070
1071 PP_ASSERT_WITH_CODE(true == smum_is_dpm_running(hwmgr),
1072 "Trying to disable voltage DPM when DPM is disabled",
1073 return 0);
1074
d3f8c0ab 1075 smum_send_msg_to_smc(hwmgr, PPSMC_MSG_Voltage_Cntl_Disable);
599a7e9f
RZ
1076
1077 return 0;
1078}
1079
1080static void smu7_set_dpm_event_sources(struct pp_hwmgr *hwmgr, uint32_t sources)
1081{
1082 bool protection;
1083 enum DPM_EVENT_SRC src;
1084
1085 switch (sources) {
1086 default:
b5c11b8e 1087 pr_err("Unknown throttling event sources.");
599a7e9f
RZ
1088 /* fall through */
1089 case 0:
1090 protection = false;
1091 /* src is unused */
1092 break;
1093 case (1 << PHM_AutoThrottleSource_Thermal):
1094 protection = true;
1095 src = DPM_EVENT_SRC_DIGITAL;
1096 break;
1097 case (1 << PHM_AutoThrottleSource_External):
1098 protection = true;
1099 src = DPM_EVENT_SRC_EXTERNAL;
1100 break;
1101 case (1 << PHM_AutoThrottleSource_External) |
1102 (1 << PHM_AutoThrottleSource_Thermal):
1103 protection = true;
1104 src = DPM_EVENT_SRC_DIGITAL_OR_EXTERNAL;
1105 break;
1106 }
1107 /* Order matters - don't enable thermal protection for the wrong source. */
1108 if (protection) {
1109 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_THERMAL_CTRL,
1110 DPM_EVENT_SRC, src);
1111 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, GENERAL_PWRMGT,
1112 THERMAL_PROTECTION_DIS,
1113 !phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
1114 PHM_PlatformCaps_ThermalController));
1115 } else
1116 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, GENERAL_PWRMGT,
1117 THERMAL_PROTECTION_DIS, 1);
1118}
1119
1120static int smu7_enable_auto_throttle_source(struct pp_hwmgr *hwmgr,
1121 PHM_AutoThrottleSource source)
1122{
1123 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1124
1125 if (!(data->active_auto_throttle_sources & (1 << source))) {
1126 data->active_auto_throttle_sources |= 1 << source;
1127 smu7_set_dpm_event_sources(hwmgr, data->active_auto_throttle_sources);
1128 }
1129 return 0;
1130}
1131
1132static int smu7_enable_thermal_auto_throttle(struct pp_hwmgr *hwmgr)
1133{
1134 return smu7_enable_auto_throttle_source(hwmgr, PHM_AutoThrottleSource_Thermal);
1135}
1136
1137static int smu7_disable_auto_throttle_source(struct pp_hwmgr *hwmgr,
1138 PHM_AutoThrottleSource source)
1139{
1140 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1141
1142 if (data->active_auto_throttle_sources & (1 << source)) {
1143 data->active_auto_throttle_sources &= ~(1 << source);
1144 smu7_set_dpm_event_sources(hwmgr, data->active_auto_throttle_sources);
1145 }
1146 return 0;
1147}
1148
1149static int smu7_disable_thermal_auto_throttle(struct pp_hwmgr *hwmgr)
1150{
1151 return smu7_disable_auto_throttle_source(hwmgr, PHM_AutoThrottleSource_Thermal);
1152}
1153
f8a4c11b 1154static int smu7_pcie_performance_request(struct pp_hwmgr *hwmgr)
599a7e9f
RZ
1155{
1156 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1157 data->pcie_performance_request = true;
1158
1159 return 0;
1160}
1161
f8a4c11b 1162static int smu7_enable_dpm_tasks(struct pp_hwmgr *hwmgr)
599a7e9f
RZ
1163{
1164 int tmp_result = 0;
1165 int result = 0;
1166
1167 tmp_result = (!smum_is_dpm_running(hwmgr)) ? 0 : -1;
1168 PP_ASSERT_WITH_CODE(tmp_result == 0,
8861a820
GI
1169 "DPM is already running",
1170 );
599a7e9f
RZ
1171
1172 if (smu7_voltage_control(hwmgr)) {
1173 tmp_result = smu7_enable_voltage_control(hwmgr);
1174 PP_ASSERT_WITH_CODE(tmp_result == 0,
1175 "Failed to enable voltage control!",
1176 result = tmp_result);
1177
1178 tmp_result = smu7_construct_voltage_tables(hwmgr);
1179 PP_ASSERT_WITH_CODE((0 == tmp_result),
1180 "Failed to contruct voltage tables!",
1181 result = tmp_result);
1182 }
1183 smum_initialize_mc_reg_table(hwmgr);
1184
1185 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
1186 PHM_PlatformCaps_EngineSpreadSpectrumSupport))
1187 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
1188 GENERAL_PWRMGT, DYN_SPREAD_SPECTRUM_EN, 1);
1189
1190 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
1191 PHM_PlatformCaps_ThermalController))
1192 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
1193 GENERAL_PWRMGT, THERMAL_PROTECTION_DIS, 0);
1194
1195 tmp_result = smu7_program_static_screen_threshold_parameters(hwmgr);
1196 PP_ASSERT_WITH_CODE((0 == tmp_result),
1197 "Failed to program static screen threshold parameters!",
1198 result = tmp_result);
1199
1200 tmp_result = smu7_enable_display_gap(hwmgr);
1201 PP_ASSERT_WITH_CODE((0 == tmp_result),
1202 "Failed to enable display gap!", result = tmp_result);
1203
1204 tmp_result = smu7_program_voting_clients(hwmgr);
1205 PP_ASSERT_WITH_CODE((0 == tmp_result),
1206 "Failed to program voting clients!", result = tmp_result);
1207
1208 tmp_result = smum_process_firmware_header(hwmgr);
1209 PP_ASSERT_WITH_CODE((0 == tmp_result),
1210 "Failed to process firmware header!", result = tmp_result);
1211
1212 tmp_result = smu7_initial_switch_from_arbf0_to_f1(hwmgr);
1213 PP_ASSERT_WITH_CODE((0 == tmp_result),
1214 "Failed to initialize switch from ArbF0 to F1!",
1215 result = tmp_result);
1216
1217 result = smu7_setup_default_dpm_tables(hwmgr);
1218 PP_ASSERT_WITH_CODE(0 == result,
1219 "Failed to setup default DPM tables!", return result);
1220
1221 tmp_result = smum_init_smc_table(hwmgr);
1222 PP_ASSERT_WITH_CODE((0 == tmp_result),
1223 "Failed to initialize SMC table!", result = tmp_result);
1224
1225 tmp_result = smu7_enable_vrhot_gpio_interrupt(hwmgr);
1226 PP_ASSERT_WITH_CODE((0 == tmp_result),
1227 "Failed to enable VR hot GPIO interrupt!", result = tmp_result);
1228
d3f8c0ab 1229 smum_send_msg_to_smc(hwmgr, (PPSMC_Msg)PPSMC_NoDisplay);
599a7e9f
RZ
1230
1231 tmp_result = smu7_enable_sclk_control(hwmgr);
1232 PP_ASSERT_WITH_CODE((0 == tmp_result),
1233 "Failed to enable SCLK control!", result = tmp_result);
1234
1235 tmp_result = smu7_enable_smc_voltage_controller(hwmgr);
1236 PP_ASSERT_WITH_CODE((0 == tmp_result),
1237 "Failed to enable voltage control!", result = tmp_result);
1238
1239 tmp_result = smu7_enable_ulv(hwmgr);
1240 PP_ASSERT_WITH_CODE((0 == tmp_result),
1241 "Failed to enable ULV!", result = tmp_result);
1242
1243 tmp_result = smu7_enable_deep_sleep_master_switch(hwmgr);
1244 PP_ASSERT_WITH_CODE((0 == tmp_result),
1245 "Failed to enable deep sleep master switch!", result = tmp_result);
1246
1247 tmp_result = smu7_enable_didt_config(hwmgr);
1248 PP_ASSERT_WITH_CODE((tmp_result == 0),
1249 "Failed to enable deep sleep master switch!", result = tmp_result);
1250
1251 tmp_result = smu7_start_dpm(hwmgr);
1252 PP_ASSERT_WITH_CODE((0 == tmp_result),
1253 "Failed to start DPM!", result = tmp_result);
1254
1255 tmp_result = smu7_enable_smc_cac(hwmgr);
1256 PP_ASSERT_WITH_CODE((0 == tmp_result),
1257 "Failed to enable SMC CAC!", result = tmp_result);
1258
1259 tmp_result = smu7_enable_power_containment(hwmgr);
1260 PP_ASSERT_WITH_CODE((0 == tmp_result),
1261 "Failed to enable power containment!", result = tmp_result);
1262
1263 tmp_result = smu7_power_control_set_level(hwmgr);
1264 PP_ASSERT_WITH_CODE((0 == tmp_result),
1265 "Failed to power control set level!", result = tmp_result);
1266
1267 tmp_result = smu7_enable_thermal_auto_throttle(hwmgr);
1268 PP_ASSERT_WITH_CODE((0 == tmp_result),
1269 "Failed to enable thermal auto throttle!", result = tmp_result);
1270
1271 tmp_result = smu7_pcie_performance_request(hwmgr);
1272 PP_ASSERT_WITH_CODE((0 == tmp_result),
1273 "pcie performance request failed!", result = tmp_result);
1274
1275 return 0;
1276}
1277
1278int smu7_disable_dpm_tasks(struct pp_hwmgr *hwmgr)
1279{
1280 int tmp_result, result = 0;
1281
1282 tmp_result = (smum_is_dpm_running(hwmgr)) ? 0 : -1;
1283 PP_ASSERT_WITH_CODE(tmp_result == 0,
1284 "DPM is not running right now, no need to disable DPM!",
1285 return 0);
1286
1287 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
1288 PHM_PlatformCaps_ThermalController))
1289 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
1290 GENERAL_PWRMGT, THERMAL_PROTECTION_DIS, 1);
1291
1292 tmp_result = smu7_disable_power_containment(hwmgr);
1293 PP_ASSERT_WITH_CODE((tmp_result == 0),
1294 "Failed to disable power containment!", result = tmp_result);
1295
1296 tmp_result = smu7_disable_smc_cac(hwmgr);
1297 PP_ASSERT_WITH_CODE((tmp_result == 0),
1298 "Failed to disable SMC CAC!", result = tmp_result);
1299
7f61bed0
RZ
1300 tmp_result = smu7_disable_didt_config(hwmgr);
1301 PP_ASSERT_WITH_CODE((tmp_result == 0),
1302 "Failed to disable DIDT!", result = tmp_result);
1303
599a7e9f
RZ
1304 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
1305 CG_SPLL_SPREAD_SPECTRUM, SSEN, 0);
1306 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
1307 GENERAL_PWRMGT, DYN_SPREAD_SPECTRUM_EN, 0);
1308
1309 tmp_result = smu7_disable_thermal_auto_throttle(hwmgr);
1310 PP_ASSERT_WITH_CODE((tmp_result == 0),
1311 "Failed to disable thermal auto throttle!", result = tmp_result);
1312
35011d39
EH
1313 tmp_result = smu7_avfs_control(hwmgr, false);
1314 PP_ASSERT_WITH_CODE((tmp_result == 0),
1315 "Failed to disable AVFS!", result = tmp_result);
f28a9b65 1316
599a7e9f
RZ
1317 tmp_result = smu7_stop_dpm(hwmgr);
1318 PP_ASSERT_WITH_CODE((tmp_result == 0),
1319 "Failed to stop DPM!", result = tmp_result);
1320
1321 tmp_result = smu7_disable_deep_sleep_master_switch(hwmgr);
1322 PP_ASSERT_WITH_CODE((tmp_result == 0),
1323 "Failed to disable deep sleep master switch!", result = tmp_result);
1324
1325 tmp_result = smu7_disable_ulv(hwmgr);
1326 PP_ASSERT_WITH_CODE((tmp_result == 0),
1327 "Failed to disable ULV!", result = tmp_result);
1328
1329 tmp_result = smu7_clear_voting_clients(hwmgr);
1330 PP_ASSERT_WITH_CODE((tmp_result == 0),
1331 "Failed to clear voting clients!", result = tmp_result);
1332
1333 tmp_result = smu7_reset_to_default(hwmgr);
1334 PP_ASSERT_WITH_CODE((tmp_result == 0),
1335 "Failed to reset to default!", result = tmp_result);
1336
1337 tmp_result = smu7_force_switch_to_arbf0(hwmgr);
1338 PP_ASSERT_WITH_CODE((tmp_result == 0),
1339 "Failed to force to switch arbf0!", result = tmp_result);
1340
1341 return result;
1342}
1343
1344int smu7_reset_asic_tasks(struct pp_hwmgr *hwmgr)
1345{
1346
1347 return 0;
1348}
1349
1350static void smu7_init_dpm_defaults(struct pp_hwmgr *hwmgr)
1351{
1352 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1353 struct phm_ppt_v1_information *table_info =
1354 (struct phm_ppt_v1_information *)(hwmgr->pptable);
97f40ef0
TSD
1355 struct cgs_system_info sys_info = {0};
1356 int result;
599a7e9f
RZ
1357
1358 data->dll_default_on = false;
1359 data->mclk_dpm0_activity_target = 0xa;
1360 data->mclk_activity_target = SMU7_MCLK_TARGETACTIVITY_DFLT;
1361 data->vddc_vddgfx_delta = 300;
1362 data->static_screen_threshold = SMU7_STATICSCREENTHRESHOLD_DFLT;
1363 data->static_screen_threshold_unit = SMU7_STATICSCREENTHRESHOLDUNIT_DFLT;
0596df6b
RZ
1364 data->voting_rights_clients[0] = SMU7_VOTINGRIGHTSCLIENTS_DFLT0;
1365 data->voting_rights_clients[1]= SMU7_VOTINGRIGHTSCLIENTS_DFLT1;
1366 data->voting_rights_clients[2] = SMU7_VOTINGRIGHTSCLIENTS_DFLT2;
1367 data->voting_rights_clients[3]= SMU7_VOTINGRIGHTSCLIENTS_DFLT3;
1368 data->voting_rights_clients[4]= SMU7_VOTINGRIGHTSCLIENTS_DFLT4;
1369 data->voting_rights_clients[5]= SMU7_VOTINGRIGHTSCLIENTS_DFLT5;
1370 data->voting_rights_clients[6]= SMU7_VOTINGRIGHTSCLIENTS_DFLT6;
1371 data->voting_rights_clients[7]= SMU7_VOTINGRIGHTSCLIENTS_DFLT7;
599a7e9f
RZ
1372
1373 data->mclk_dpm_key_disabled = hwmgr->feature_mask & PP_MCLK_DPM_MASK ? false : true;
1374 data->sclk_dpm_key_disabled = hwmgr->feature_mask & PP_SCLK_DPM_MASK ? false : true;
1375 data->pcie_dpm_key_disabled = hwmgr->feature_mask & PP_PCIE_DPM_MASK ? false : true;
1376 /* need to set voltage control types before EVV patching */
1377 data->voltage_control = SMU7_VOLTAGE_CONTROL_NONE;
1378 data->vddci_control = SMU7_VOLTAGE_CONTROL_NONE;
1379 data->mvdd_control = SMU7_VOLTAGE_CONTROL_NONE;
1380 data->enable_tdc_limit_feature = true;
1381 data->enable_pkg_pwr_tracking_feature = true;
1382 data->force_pcie_gen = PP_PCIEGenInvalid;
1383 data->ulv_supported = hwmgr->feature_mask & PP_ULV_MASK ? true : false;
1384
b3b03052 1385 if (hwmgr->chip_id == CHIP_POLARIS12 || hwmgr->is_kicker) {
187368a5
RZ
1386 uint8_t tmp1, tmp2;
1387 uint16_t tmp3 = 0;
1388 atomctrl_get_svi2_info(hwmgr, VOLTAGE_TYPE_VDDC, &tmp1, &tmp2,
1389 &tmp3);
1390 tmp3 = (tmp3 >> 5) & 0x3;
1391 data->vddc_phase_shed_control = ((tmp3 << 1) | (tmp3 >> 1)) & 0x3;
86457c3b
RZ
1392 } else if (hwmgr->chip_family == AMDGPU_FAMILY_CI) {
1393 data->vddc_phase_shed_control = 1;
1394 } else {
1395 data->vddc_phase_shed_control = 0;
1396 }
1397
1398 if (hwmgr->chip_id == CHIP_HAWAII) {
1399 data->thermal_temp_setting.temperature_low = 94500;
1400 data->thermal_temp_setting.temperature_high = 95000;
1401 data->thermal_temp_setting.temperature_shutdown = 104000;
1402 } else {
1403 data->thermal_temp_setting.temperature_low = 99500;
1404 data->thermal_temp_setting.temperature_high = 100000;
1405 data->thermal_temp_setting.temperature_shutdown = 104000;
187368a5
RZ
1406 }
1407
599a7e9f 1408 data->fast_watermark_threshold = 100;
e71b7ae6 1409 if (atomctrl_is_voltage_controlled_by_gpio_v3(hwmgr,
599a7e9f
RZ
1410 VOLTAGE_TYPE_VDDC, VOLTAGE_OBJ_SVID2))
1411 data->voltage_control = SMU7_VOLTAGE_CONTROL_BY_SVID2;
86457c3b
RZ
1412 else if (atomctrl_is_voltage_controlled_by_gpio_v3(hwmgr,
1413 VOLTAGE_TYPE_VDDC, VOLTAGE_OBJ_GPIO_LUT))
1414 data->voltage_control = SMU7_VOLTAGE_CONTROL_BY_GPIO;
599a7e9f
RZ
1415
1416 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
1417 PHM_PlatformCaps_ControlVDDGFX)) {
e71b7ae6 1418 if (atomctrl_is_voltage_controlled_by_gpio_v3(hwmgr,
599a7e9f
RZ
1419 VOLTAGE_TYPE_VDDGFX, VOLTAGE_OBJ_SVID2)) {
1420 data->vdd_gfx_control = SMU7_VOLTAGE_CONTROL_BY_SVID2;
1421 }
1422 }
1423
1424 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
1425 PHM_PlatformCaps_EnableMVDDControl)) {
e71b7ae6 1426 if (atomctrl_is_voltage_controlled_by_gpio_v3(hwmgr,
599a7e9f
RZ
1427 VOLTAGE_TYPE_MVDDC, VOLTAGE_OBJ_GPIO_LUT))
1428 data->mvdd_control = SMU7_VOLTAGE_CONTROL_BY_GPIO;
e71b7ae6 1429 else if (atomctrl_is_voltage_controlled_by_gpio_v3(hwmgr,
599a7e9f
RZ
1430 VOLTAGE_TYPE_MVDDC, VOLTAGE_OBJ_SVID2))
1431 data->mvdd_control = SMU7_VOLTAGE_CONTROL_BY_SVID2;
1432 }
1433
86457c3b 1434 if (SMU7_VOLTAGE_CONTROL_NONE == data->vdd_gfx_control)
599a7e9f
RZ
1435 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
1436 PHM_PlatformCaps_ControlVDDGFX);
599a7e9f
RZ
1437
1438 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
1439 PHM_PlatformCaps_ControlVDDCI)) {
e71b7ae6 1440 if (atomctrl_is_voltage_controlled_by_gpio_v3(hwmgr,
599a7e9f
RZ
1441 VOLTAGE_TYPE_VDDCI, VOLTAGE_OBJ_GPIO_LUT))
1442 data->vddci_control = SMU7_VOLTAGE_CONTROL_BY_GPIO;
e71b7ae6 1443 else if (atomctrl_is_voltage_controlled_by_gpio_v3(hwmgr,
599a7e9f
RZ
1444 VOLTAGE_TYPE_VDDCI, VOLTAGE_OBJ_SVID2))
1445 data->vddci_control = SMU7_VOLTAGE_CONTROL_BY_SVID2;
1446 }
1447
1448 if (data->mvdd_control == SMU7_VOLTAGE_CONTROL_NONE)
1449 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
1450 PHM_PlatformCaps_EnableMVDDControl);
1451
1452 if (data->vddci_control == SMU7_VOLTAGE_CONTROL_NONE)
1453 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
1454 PHM_PlatformCaps_ControlVDDCI);
1455
53b963b6 1456 if ((hwmgr->pp_table_version != PP_TABLE_V0) && (hwmgr->feature_mask & PP_CLOCK_STRETCH_MASK)
599a7e9f
RZ
1457 && (table_info->cac_dtp_table->usClockStretchAmount != 0))
1458 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
1459 PHM_PlatformCaps_ClockStretcher);
1460
1461 data->pcie_gen_performance.max = PP_PCIEGen1;
1462 data->pcie_gen_performance.min = PP_PCIEGen3;
1463 data->pcie_gen_power_saving.max = PP_PCIEGen1;
1464 data->pcie_gen_power_saving.min = PP_PCIEGen3;
1465 data->pcie_lane_performance.max = 0;
1466 data->pcie_lane_performance.min = 16;
1467 data->pcie_lane_power_saving.max = 0;
1468 data->pcie_lane_power_saving.min = 16;
97f40ef0
TSD
1469
1470 sys_info.size = sizeof(struct cgs_system_info);
1471 sys_info.info_id = CGS_SYSTEM_INFO_PG_FLAGS;
1472 result = cgs_query_system_info(hwmgr->device, &sys_info);
1473 if (!result) {
1474 if (sys_info.value & AMD_PG_SUPPORT_UVD)
1475 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
1476 PHM_PlatformCaps_UVDPowerGating);
1477 if (sys_info.value & AMD_PG_SUPPORT_VCE)
1478 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
1479 PHM_PlatformCaps_VCEPowerGating);
1480 }
599a7e9f
RZ
1481}
1482
1483/**
1484* Get Leakage VDDC based on leakage ID.
1485*
1486* @param hwmgr the address of the powerplay hardware manager.
1487* @return always 0
1488*/
1489static int smu7_get_evv_voltages(struct pp_hwmgr *hwmgr)
1490{
1491 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1492 uint16_t vv_id;
1493 uint16_t vddc = 0;
1494 uint16_t vddgfx = 0;
1495 uint16_t i, j;
1496 uint32_t sclk = 0;
1497 struct phm_ppt_v1_information *table_info =
1498 (struct phm_ppt_v1_information *)hwmgr->pptable;
1499 struct phm_ppt_v1_clock_voltage_dependency_table *sclk_table = NULL;
1500
1501
599a7e9f
RZ
1502 for (i = 0; i < SMU7_MAX_LEAKAGE_COUNT; i++) {
1503 vv_id = ATOM_VIRTUAL_VOLTAGE_ID0 + i;
1504
1505 if (data->vdd_gfx_control == SMU7_VOLTAGE_CONTROL_BY_SVID2) {
0f12f73c
AD
1506 if ((hwmgr->pp_table_version == PP_TABLE_V1)
1507 && !phm_get_sclk_for_voltage_evv(hwmgr,
599a7e9f
RZ
1508 table_info->vddgfx_lookup_table, vv_id, &sclk)) {
1509 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
1510 PHM_PlatformCaps_ClockStretcher)) {
0f12f73c
AD
1511 sclk_table = table_info->vdd_dep_on_sclk;
1512
599a7e9f
RZ
1513 for (j = 1; j < sclk_table->count; j++) {
1514 if (sclk_table->entries[j].clk == sclk &&
1515 sclk_table->entries[j].cks_enable == 0) {
1516 sclk += 5000;
1517 break;
1518 }
1519 }
1520 }
1521 if (0 == atomctrl_get_voltage_evv_on_sclk
1522 (hwmgr, VOLTAGE_TYPE_VDDGFX, sclk,
1523 vv_id, &vddgfx)) {
1524 /* need to make sure vddgfx is less than 2v or else, it could burn the ASIC. */
1525 PP_ASSERT_WITH_CODE((vddgfx < 2000 && vddgfx != 0), "Invalid VDDGFX value!", return -EINVAL);
1526
1527 /* the voltage should not be zero nor equal to leakage ID */
1528 if (vddgfx != 0 && vddgfx != vv_id) {
1529 data->vddcgfx_leakage.actual_voltage[data->vddcgfx_leakage.count] = vddgfx;
1530 data->vddcgfx_leakage.leakage_id[data->vddcgfx_leakage.count] = vv_id;
1531 data->vddcgfx_leakage.count++;
1532 }
1533 } else {
b5c11b8e 1534 pr_info("Error retrieving EVV voltage value!\n");
599a7e9f
RZ
1535 }
1536 }
1537 } else {
599a7e9f
RZ
1538 if ((hwmgr->pp_table_version == PP_TABLE_V0)
1539 || !phm_get_sclk_for_voltage_evv(hwmgr,
1540 table_info->vddc_lookup_table, vv_id, &sclk)) {
1541 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
1542 PHM_PlatformCaps_ClockStretcher)) {
0f12f73c
AD
1543 if (table_info == NULL)
1544 return -EINVAL;
1545 sclk_table = table_info->vdd_dep_on_sclk;
1546
599a7e9f
RZ
1547 for (j = 1; j < sclk_table->count; j++) {
1548 if (sclk_table->entries[j].clk == sclk &&
1549 sclk_table->entries[j].cks_enable == 0) {
1550 sclk += 5000;
1551 break;
1552 }
1553 }
1554 }
1555
1556 if (phm_get_voltage_evv_on_sclk(hwmgr,
1557 VOLTAGE_TYPE_VDDC,
1558 sclk, vv_id, &vddc) == 0) {
1559 if (vddc >= 2000 || vddc == 0)
1560 return -EINVAL;
1561 } else {
89c67699 1562 pr_debug("failed to retrieving EVV voltage!\n");
599a7e9f
RZ
1563 continue;
1564 }
1565
1566 /* the voltage should not be zero nor equal to leakage ID */
1567 if (vddc != 0 && vddc != vv_id) {
1568 data->vddc_leakage.actual_voltage[data->vddc_leakage.count] = (uint16_t)(vddc);
1569 data->vddc_leakage.leakage_id[data->vddc_leakage.count] = vv_id;
1570 data->vddc_leakage.count++;
1571 }
1572 }
1573 }
1574 }
1575
1576 return 0;
1577}
1578
1579/**
1580 * Change virtual leakage voltage to actual value.
1581 *
1582 * @param hwmgr the address of the powerplay hardware manager.
1583 * @param pointer to changing voltage
1584 * @param pointer to leakage table
1585 */
1586static void smu7_patch_ppt_v1_with_vdd_leakage(struct pp_hwmgr *hwmgr,
1587 uint16_t *voltage, struct smu7_leakage_voltage *leakage_table)
1588{
1589 uint32_t index;
1590
1591 /* search for leakage voltage ID 0xff01 ~ 0xff08 */
1592 for (index = 0; index < leakage_table->count; index++) {
1593 /* if this voltage matches a leakage voltage ID */
1594 /* patch with actual leakage voltage */
1595 if (leakage_table->leakage_id[index] == *voltage) {
1596 *voltage = leakage_table->actual_voltage[index];
1597 break;
1598 }
1599 }
1600
1601 if (*voltage > ATOM_VIRTUAL_VOLTAGE_ID0)
b5c11b8e 1602 pr_err("Voltage value looks like a Leakage ID but it's not patched \n");
599a7e9f
RZ
1603}
1604
1605/**
1606* Patch voltage lookup table by EVV leakages.
1607*
1608* @param hwmgr the address of the powerplay hardware manager.
1609* @param pointer to voltage lookup table
1610* @param pointer to leakage table
1611* @return always 0
1612*/
1613static int smu7_patch_lookup_table_with_leakage(struct pp_hwmgr *hwmgr,
1614 phm_ppt_v1_voltage_lookup_table *lookup_table,
1615 struct smu7_leakage_voltage *leakage_table)
1616{
1617 uint32_t i;
1618
1619 for (i = 0; i < lookup_table->count; i++)
1620 smu7_patch_ppt_v1_with_vdd_leakage(hwmgr,
1621 &lookup_table->entries[i].us_vdd, leakage_table);
1622
1623 return 0;
1624}
1625
1626static int smu7_patch_clock_voltage_limits_with_vddc_leakage(
1627 struct pp_hwmgr *hwmgr, struct smu7_leakage_voltage *leakage_table,
1628 uint16_t *vddc)
1629{
1630 struct phm_ppt_v1_information *table_info =
1631 (struct phm_ppt_v1_information *)(hwmgr->pptable);
1632 smu7_patch_ppt_v1_with_vdd_leakage(hwmgr, (uint16_t *)vddc, leakage_table);
1633 hwmgr->dyn_state.max_clock_voltage_on_dc.vddc =
1634 table_info->max_clock_voltage_on_dc.vddc;
1635 return 0;
1636}
1637
1638static int smu7_patch_voltage_dependency_tables_with_lookup_table(
1639 struct pp_hwmgr *hwmgr)
1640{
1641 uint8_t entry_id;
1642 uint8_t voltage_id;
1643 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1644 struct phm_ppt_v1_information *table_info =
1645 (struct phm_ppt_v1_information *)(hwmgr->pptable);
1646
1647 struct phm_ppt_v1_clock_voltage_dependency_table *sclk_table =
1648 table_info->vdd_dep_on_sclk;
1649 struct phm_ppt_v1_clock_voltage_dependency_table *mclk_table =
1650 table_info->vdd_dep_on_mclk;
1651 struct phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table =
1652 table_info->mm_dep_table;
1653
1654 if (data->vdd_gfx_control == SMU7_VOLTAGE_CONTROL_BY_SVID2) {
1655 for (entry_id = 0; entry_id < sclk_table->count; ++entry_id) {
1656 voltage_id = sclk_table->entries[entry_id].vddInd;
1657 sclk_table->entries[entry_id].vddgfx =
1658 table_info->vddgfx_lookup_table->entries[voltage_id].us_vdd;
1659 }
1660 } else {
1661 for (entry_id = 0; entry_id < sclk_table->count; ++entry_id) {
1662 voltage_id = sclk_table->entries[entry_id].vddInd;
1663 sclk_table->entries[entry_id].vddc =
1664 table_info->vddc_lookup_table->entries[voltage_id].us_vdd;
1665 }
1666 }
1667
1668 for (entry_id = 0; entry_id < mclk_table->count; ++entry_id) {
1669 voltage_id = mclk_table->entries[entry_id].vddInd;
1670 mclk_table->entries[entry_id].vddc =
1671 table_info->vddc_lookup_table->entries[voltage_id].us_vdd;
1672 }
1673
1674 for (entry_id = 0; entry_id < mm_table->count; ++entry_id) {
1675 voltage_id = mm_table->entries[entry_id].vddcInd;
1676 mm_table->entries[entry_id].vddc =
1677 table_info->vddc_lookup_table->entries[voltage_id].us_vdd;
1678 }
1679
1680 return 0;
1681
1682}
1683
1684static int phm_add_voltage(struct pp_hwmgr *hwmgr,
1685 phm_ppt_v1_voltage_lookup_table *look_up_table,
1686 phm_ppt_v1_voltage_lookup_record *record)
1687{
1688 uint32_t i;
1689
1690 PP_ASSERT_WITH_CODE((NULL != look_up_table),
1691 "Lookup Table empty.", return -EINVAL);
1692 PP_ASSERT_WITH_CODE((0 != look_up_table->count),
1693 "Lookup Table empty.", return -EINVAL);
1694
d3f8c0ab 1695 i = smum_get_mac_definition(hwmgr, SMU_MAX_LEVELS_VDDGFX);
599a7e9f
RZ
1696 PP_ASSERT_WITH_CODE((i >= look_up_table->count),
1697 "Lookup Table is full.", return -EINVAL);
1698
1699 /* This is to avoid entering duplicate calculated records. */
1700 for (i = 0; i < look_up_table->count; i++) {
1701 if (look_up_table->entries[i].us_vdd == record->us_vdd) {
1702 if (look_up_table->entries[i].us_calculated == 1)
1703 return 0;
1704 break;
1705 }
1706 }
1707
1708 look_up_table->entries[i].us_calculated = 1;
1709 look_up_table->entries[i].us_vdd = record->us_vdd;
1710 look_up_table->entries[i].us_cac_low = record->us_cac_low;
1711 look_up_table->entries[i].us_cac_mid = record->us_cac_mid;
1712 look_up_table->entries[i].us_cac_high = record->us_cac_high;
1713 /* Only increment the count when we're appending, not replacing duplicate entry. */
1714 if (i == look_up_table->count)
1715 look_up_table->count++;
1716
1717 return 0;
1718}
1719
1720
1721static int smu7_calc_voltage_dependency_tables(struct pp_hwmgr *hwmgr)
1722{
1723 uint8_t entry_id;
1724 struct phm_ppt_v1_voltage_lookup_record v_record;
1725 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1726 struct phm_ppt_v1_information *pptable_info = (struct phm_ppt_v1_information *)(hwmgr->pptable);
1727
1728 phm_ppt_v1_clock_voltage_dependency_table *sclk_table = pptable_info->vdd_dep_on_sclk;
1729 phm_ppt_v1_clock_voltage_dependency_table *mclk_table = pptable_info->vdd_dep_on_mclk;
1730
1731 if (data->vdd_gfx_control == SMU7_VOLTAGE_CONTROL_BY_SVID2) {
1732 for (entry_id = 0; entry_id < sclk_table->count; ++entry_id) {
1733 if (sclk_table->entries[entry_id].vdd_offset & (1 << 15))
1734 v_record.us_vdd = sclk_table->entries[entry_id].vddgfx +
1735 sclk_table->entries[entry_id].vdd_offset - 0xFFFF;
1736 else
1737 v_record.us_vdd = sclk_table->entries[entry_id].vddgfx +
1738 sclk_table->entries[entry_id].vdd_offset;
1739
1740 sclk_table->entries[entry_id].vddc =
1741 v_record.us_cac_low = v_record.us_cac_mid =
1742 v_record.us_cac_high = v_record.us_vdd;
1743
1744 phm_add_voltage(hwmgr, pptable_info->vddc_lookup_table, &v_record);
1745 }
1746
1747 for (entry_id = 0; entry_id < mclk_table->count; ++entry_id) {
1748 if (mclk_table->entries[entry_id].vdd_offset & (1 << 15))
1749 v_record.us_vdd = mclk_table->entries[entry_id].vddc +
1750 mclk_table->entries[entry_id].vdd_offset - 0xFFFF;
1751 else
1752 v_record.us_vdd = mclk_table->entries[entry_id].vddc +
1753 mclk_table->entries[entry_id].vdd_offset;
1754
1755 mclk_table->entries[entry_id].vddgfx = v_record.us_cac_low =
1756 v_record.us_cac_mid = v_record.us_cac_high = v_record.us_vdd;
1757 phm_add_voltage(hwmgr, pptable_info->vddgfx_lookup_table, &v_record);
1758 }
1759 }
1760 return 0;
1761}
1762
1763static int smu7_calc_mm_voltage_dependency_table(struct pp_hwmgr *hwmgr)
1764{
1765 uint8_t entry_id;
1766 struct phm_ppt_v1_voltage_lookup_record v_record;
1767 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1768 struct phm_ppt_v1_information *pptable_info = (struct phm_ppt_v1_information *)(hwmgr->pptable);
1769 phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table = pptable_info->mm_dep_table;
1770
1771 if (data->vdd_gfx_control == SMU7_VOLTAGE_CONTROL_BY_SVID2) {
1772 for (entry_id = 0; entry_id < mm_table->count; entry_id++) {
1773 if (mm_table->entries[entry_id].vddgfx_offset & (1 << 15))
1774 v_record.us_vdd = mm_table->entries[entry_id].vddc +
1775 mm_table->entries[entry_id].vddgfx_offset - 0xFFFF;
1776 else
1777 v_record.us_vdd = mm_table->entries[entry_id].vddc +
1778 mm_table->entries[entry_id].vddgfx_offset;
1779
1780 /* Add the calculated VDDGFX to the VDDGFX lookup table */
1781 mm_table->entries[entry_id].vddgfx = v_record.us_cac_low =
1782 v_record.us_cac_mid = v_record.us_cac_high = v_record.us_vdd;
1783 phm_add_voltage(hwmgr, pptable_info->vddgfx_lookup_table, &v_record);
1784 }
1785 }
1786 return 0;
1787}
1788
1789static int smu7_sort_lookup_table(struct pp_hwmgr *hwmgr,
1790 struct phm_ppt_v1_voltage_lookup_table *lookup_table)
1791{
1792 uint32_t table_size, i, j;
1793 struct phm_ppt_v1_voltage_lookup_record tmp_voltage_lookup_record;
1794 table_size = lookup_table->count;
1795
1796 PP_ASSERT_WITH_CODE(0 != lookup_table->count,
1797 "Lookup table is empty", return -EINVAL);
1798
1799 /* Sorting voltages */
1800 for (i = 0; i < table_size - 1; i++) {
1801 for (j = i + 1; j > 0; j--) {
1802 if (lookup_table->entries[j].us_vdd <
1803 lookup_table->entries[j - 1].us_vdd) {
1804 tmp_voltage_lookup_record = lookup_table->entries[j - 1];
1805 lookup_table->entries[j - 1] = lookup_table->entries[j];
1806 lookup_table->entries[j] = tmp_voltage_lookup_record;
1807 }
1808 }
1809 }
1810
1811 return 0;
1812}
1813
1814static int smu7_complete_dependency_tables(struct pp_hwmgr *hwmgr)
1815{
1816 int result = 0;
1817 int tmp_result;
1818 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1819 struct phm_ppt_v1_information *table_info =
1820 (struct phm_ppt_v1_information *)(hwmgr->pptable);
1821
1822 if (data->vdd_gfx_control == SMU7_VOLTAGE_CONTROL_BY_SVID2) {
1823 tmp_result = smu7_patch_lookup_table_with_leakage(hwmgr,
1824 table_info->vddgfx_lookup_table, &(data->vddcgfx_leakage));
1825 if (tmp_result != 0)
1826 result = tmp_result;
1827
1828 smu7_patch_ppt_v1_with_vdd_leakage(hwmgr,
1829 &table_info->max_clock_voltage_on_dc.vddgfx, &(data->vddcgfx_leakage));
1830 } else {
1831
1832 tmp_result = smu7_patch_lookup_table_with_leakage(hwmgr,
1833 table_info->vddc_lookup_table, &(data->vddc_leakage));
1834 if (tmp_result)
1835 result = tmp_result;
1836
1837 tmp_result = smu7_patch_clock_voltage_limits_with_vddc_leakage(hwmgr,
1838 &(data->vddc_leakage), &table_info->max_clock_voltage_on_dc.vddc);
1839 if (tmp_result)
1840 result = tmp_result;
1841 }
1842
1843 tmp_result = smu7_patch_voltage_dependency_tables_with_lookup_table(hwmgr);
1844 if (tmp_result)
1845 result = tmp_result;
1846
1847 tmp_result = smu7_calc_voltage_dependency_tables(hwmgr);
1848 if (tmp_result)
1849 result = tmp_result;
1850
1851 tmp_result = smu7_calc_mm_voltage_dependency_table(hwmgr);
1852 if (tmp_result)
1853 result = tmp_result;
1854
1855 tmp_result = smu7_sort_lookup_table(hwmgr, table_info->vddgfx_lookup_table);
1856 if (tmp_result)
1857 result = tmp_result;
1858
1859 tmp_result = smu7_sort_lookup_table(hwmgr, table_info->vddc_lookup_table);
1860 if (tmp_result)
1861 result = tmp_result;
1862
1863 return result;
1864}
1865
1866static int smu7_set_private_data_based_on_pptable_v1(struct pp_hwmgr *hwmgr)
1867{
1868 struct phm_ppt_v1_information *table_info =
1869 (struct phm_ppt_v1_information *)(hwmgr->pptable);
1870
1871 struct phm_ppt_v1_clock_voltage_dependency_table *allowed_sclk_vdd_table =
1872 table_info->vdd_dep_on_sclk;
1873 struct phm_ppt_v1_clock_voltage_dependency_table *allowed_mclk_vdd_table =
1874 table_info->vdd_dep_on_mclk;
1875
1876 PP_ASSERT_WITH_CODE(allowed_sclk_vdd_table != NULL,
1877 "VDD dependency on SCLK table is missing.",
1878 return -EINVAL);
1879 PP_ASSERT_WITH_CODE(allowed_sclk_vdd_table->count >= 1,
1880 "VDD dependency on SCLK table has to have is missing.",
1881 return -EINVAL);
1882
1883 PP_ASSERT_WITH_CODE(allowed_mclk_vdd_table != NULL,
1884 "VDD dependency on MCLK table is missing",
1885 return -EINVAL);
1886 PP_ASSERT_WITH_CODE(allowed_mclk_vdd_table->count >= 1,
1887 "VDD dependency on MCLK table has to have is missing.",
1888 return -EINVAL);
1889
1890 table_info->max_clock_voltage_on_ac.sclk =
1891 allowed_sclk_vdd_table->entries[allowed_sclk_vdd_table->count - 1].clk;
1892 table_info->max_clock_voltage_on_ac.mclk =
1893 allowed_mclk_vdd_table->entries[allowed_mclk_vdd_table->count - 1].clk;
1894 table_info->max_clock_voltage_on_ac.vddc =
1895 allowed_sclk_vdd_table->entries[allowed_sclk_vdd_table->count - 1].vddc;
1896 table_info->max_clock_voltage_on_ac.vddci =
1897 allowed_mclk_vdd_table->entries[allowed_mclk_vdd_table->count - 1].vddci;
1898
1899 hwmgr->dyn_state.max_clock_voltage_on_ac.sclk = table_info->max_clock_voltage_on_ac.sclk;
1900 hwmgr->dyn_state.max_clock_voltage_on_ac.mclk = table_info->max_clock_voltage_on_ac.mclk;
1901 hwmgr->dyn_state.max_clock_voltage_on_ac.vddc = table_info->max_clock_voltage_on_ac.vddc;
1902 hwmgr->dyn_state.max_clock_voltage_on_ac.vddci = table_info->max_clock_voltage_on_ac.vddci;
1903
1904 return 0;
1905}
1906
f8a4c11b 1907static int smu7_patch_voltage_workaround(struct pp_hwmgr *hwmgr)
599a7e9f
RZ
1908{
1909 struct phm_ppt_v1_information *table_info =
1910 (struct phm_ppt_v1_information *)(hwmgr->pptable);
1911 struct phm_ppt_v1_clock_voltage_dependency_table *dep_mclk_table;
1912 struct phm_ppt_v1_voltage_lookup_table *lookup_table;
1913 uint32_t i;
1914 uint32_t hw_revision, sub_vendor_id, sub_sys_id;
1915 struct cgs_system_info sys_info = {0};
1916
1917 if (table_info != NULL) {
1918 dep_mclk_table = table_info->vdd_dep_on_mclk;
1919 lookup_table = table_info->vddc_lookup_table;
1920 } else
1921 return 0;
1922
1923 sys_info.size = sizeof(struct cgs_system_info);
1924
1925 sys_info.info_id = CGS_SYSTEM_INFO_PCIE_REV;
1926 cgs_query_system_info(hwmgr->device, &sys_info);
1927 hw_revision = (uint32_t)sys_info.value;
1928
1929 sys_info.info_id = CGS_SYSTEM_INFO_PCIE_SUB_SYS_ID;
1930 cgs_query_system_info(hwmgr->device, &sys_info);
1931 sub_sys_id = (uint32_t)sys_info.value;
1932
1933 sys_info.info_id = CGS_SYSTEM_INFO_PCIE_SUB_SYS_VENDOR_ID;
1934 cgs_query_system_info(hwmgr->device, &sys_info);
1935 sub_vendor_id = (uint32_t)sys_info.value;
1936
1937 if (hwmgr->chip_id == CHIP_POLARIS10 && hw_revision == 0xC7 &&
1938 ((sub_sys_id == 0xb37 && sub_vendor_id == 0x1002) ||
1939 (sub_sys_id == 0x4a8 && sub_vendor_id == 0x1043) ||
1940 (sub_sys_id == 0x9480 && sub_vendor_id == 0x1682))) {
1941 if (lookup_table->entries[dep_mclk_table->entries[dep_mclk_table->count-1].vddInd].us_vdd >= 1000)
1942 return 0;
1943
1944 for (i = 0; i < lookup_table->count; i++) {
1945 if (lookup_table->entries[i].us_vdd < 0xff01 && lookup_table->entries[i].us_vdd >= 1000) {
1946 dep_mclk_table->entries[dep_mclk_table->count-1].vddInd = (uint8_t) i;
1947 return 0;
1948 }
1949 }
1950 }
1951 return 0;
1952}
1953
1954static int smu7_thermal_parameter_init(struct pp_hwmgr *hwmgr)
1955{
1956 struct pp_atomctrl_gpio_pin_assignment gpio_pin_assignment;
1957 uint32_t temp_reg;
1958 struct phm_ppt_v1_information *table_info =
1959 (struct phm_ppt_v1_information *)(hwmgr->pptable);
1960
1961
1962 if (atomctrl_get_pp_assign_pin(hwmgr, VDDC_PCC_GPIO_PINID, &gpio_pin_assignment)) {
1963 temp_reg = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCNB_PWRMGT_CNTL);
1964 switch (gpio_pin_assignment.uc_gpio_pin_bit_shift) {
1965 case 0:
1966 temp_reg = PHM_SET_FIELD(temp_reg, CNB_PWRMGT_CNTL, GNB_SLOW_MODE, 0x1);
1967 break;
1968 case 1:
1969 temp_reg = PHM_SET_FIELD(temp_reg, CNB_PWRMGT_CNTL, GNB_SLOW_MODE, 0x2);
1970 break;
1971 case 2:
1972 temp_reg = PHM_SET_FIELD(temp_reg, CNB_PWRMGT_CNTL, GNB_SLOW, 0x1);
1973 break;
1974 case 3:
1975 temp_reg = PHM_SET_FIELD(temp_reg, CNB_PWRMGT_CNTL, FORCE_NB_PS1, 0x1);
1976 break;
1977 case 4:
1978 temp_reg = PHM_SET_FIELD(temp_reg, CNB_PWRMGT_CNTL, DPM_ENABLED, 0x1);
1979 break;
1980 default:
599a7e9f
RZ
1981 break;
1982 }
1983 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCNB_PWRMGT_CNTL, temp_reg);
1984 }
1985
1986 if (table_info == NULL)
1987 return 0;
1988
1989 if (table_info->cac_dtp_table->usDefaultTargetOperatingTemp != 0 &&
1990 hwmgr->thermal_controller.advanceFanControlParameters.ucFanControlMode) {
1991 hwmgr->thermal_controller.advanceFanControlParameters.usFanPWMMinLimit =
1992 (uint16_t)hwmgr->thermal_controller.advanceFanControlParameters.ucMinimumPWMLimit;
1993
1994 hwmgr->thermal_controller.advanceFanControlParameters.usFanPWMMaxLimit =
1995 (uint16_t)hwmgr->thermal_controller.advanceFanControlParameters.usDefaultMaxFanPWM;
1996
1997 hwmgr->thermal_controller.advanceFanControlParameters.usFanPWMStep = 1;
1998
1999 hwmgr->thermal_controller.advanceFanControlParameters.usFanRPMMaxLimit = 100;
2000
2001 hwmgr->thermal_controller.advanceFanControlParameters.usFanRPMMinLimit =
2002 (uint16_t)hwmgr->thermal_controller.advanceFanControlParameters.ucMinimumPWMLimit;
2003
2004 hwmgr->thermal_controller.advanceFanControlParameters.usFanRPMStep = 1;
2005
2006 table_info->cac_dtp_table->usDefaultTargetOperatingTemp = (table_info->cac_dtp_table->usDefaultTargetOperatingTemp >= 50) ?
2007 (table_info->cac_dtp_table->usDefaultTargetOperatingTemp - 50) : 0;
2008
2009 table_info->cac_dtp_table->usOperatingTempMaxLimit = table_info->cac_dtp_table->usDefaultTargetOperatingTemp;
2010 table_info->cac_dtp_table->usOperatingTempStep = 1;
2011 table_info->cac_dtp_table->usOperatingTempHyst = 1;
2012
2013 hwmgr->thermal_controller.advanceFanControlParameters.usMaxFanPWM =
2014 hwmgr->thermal_controller.advanceFanControlParameters.usDefaultMaxFanPWM;
2015
2016 hwmgr->thermal_controller.advanceFanControlParameters.usMaxFanRPM =
2017 hwmgr->thermal_controller.advanceFanControlParameters.usDefaultMaxFanRPM;
2018
2019 hwmgr->dyn_state.cac_dtp_table->usOperatingTempMinLimit =
2020 table_info->cac_dtp_table->usOperatingTempMinLimit;
2021
2022 hwmgr->dyn_state.cac_dtp_table->usOperatingTempMaxLimit =
2023 table_info->cac_dtp_table->usOperatingTempMaxLimit;
2024
2025 hwmgr->dyn_state.cac_dtp_table->usDefaultTargetOperatingTemp =
2026 table_info->cac_dtp_table->usDefaultTargetOperatingTemp;
2027
2028 hwmgr->dyn_state.cac_dtp_table->usOperatingTempStep =
2029 table_info->cac_dtp_table->usOperatingTempStep;
2030
2031 hwmgr->dyn_state.cac_dtp_table->usTargetOperatingTemp =
2032 table_info->cac_dtp_table->usTargetOperatingTemp;
cf54d6d9
RZ
2033 if (hwmgr->feature_mask & PP_OD_FUZZY_FAN_CONTROL_MASK)
2034 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
2035 PHM_PlatformCaps_ODFuzzyFanControlSupport);
599a7e9f
RZ
2036 }
2037
2038 return 0;
2039}
2040
2041/**
2042 * Change virtual leakage voltage to actual value.
2043 *
2044 * @param hwmgr the address of the powerplay hardware manager.
2045 * @param pointer to changing voltage
2046 * @param pointer to leakage table
2047 */
2048static void smu7_patch_ppt_v0_with_vdd_leakage(struct pp_hwmgr *hwmgr,
2049 uint32_t *voltage, struct smu7_leakage_voltage *leakage_table)
2050{
2051 uint32_t index;
2052
2053 /* search for leakage voltage ID 0xff01 ~ 0xff08 */
2054 for (index = 0; index < leakage_table->count; index++) {
2055 /* if this voltage matches a leakage voltage ID */
2056 /* patch with actual leakage voltage */
2057 if (leakage_table->leakage_id[index] == *voltage) {
2058 *voltage = leakage_table->actual_voltage[index];
2059 break;
2060 }
2061 }
2062
2063 if (*voltage > ATOM_VIRTUAL_VOLTAGE_ID0)
b5c11b8e 2064 pr_err("Voltage value looks like a Leakage ID but it's not patched \n");
599a7e9f
RZ
2065}
2066
2067
2068static int smu7_patch_vddc(struct pp_hwmgr *hwmgr,
2069 struct phm_clock_voltage_dependency_table *tab)
2070{
2071 uint16_t i;
2072 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
2073
2074 if (tab)
2075 for (i = 0; i < tab->count; i++)
2076 smu7_patch_ppt_v0_with_vdd_leakage(hwmgr, &tab->entries[i].v,
2077 &data->vddc_leakage);
2078
2079 return 0;
2080}
2081
2082static int smu7_patch_vddci(struct pp_hwmgr *hwmgr,
2083 struct phm_clock_voltage_dependency_table *tab)
2084{
2085 uint16_t i;
2086 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
2087
2088 if (tab)
2089 for (i = 0; i < tab->count; i++)
2090 smu7_patch_ppt_v0_with_vdd_leakage(hwmgr, &tab->entries[i].v,
2091 &data->vddci_leakage);
2092
2093 return 0;
2094}
2095
2096static int smu7_patch_vce_vddc(struct pp_hwmgr *hwmgr,
2097 struct phm_vce_clock_voltage_dependency_table *tab)
2098{
2099 uint16_t i;
2100 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
2101
2102 if (tab)
2103 for (i = 0; i < tab->count; i++)
2104 smu7_patch_ppt_v0_with_vdd_leakage(hwmgr, &tab->entries[i].v,
2105 &data->vddc_leakage);
2106
2107 return 0;
2108}
2109
2110
2111static int smu7_patch_uvd_vddc(struct pp_hwmgr *hwmgr,
2112 struct phm_uvd_clock_voltage_dependency_table *tab)
2113{
2114 uint16_t i;
2115 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
2116
2117 if (tab)
2118 for (i = 0; i < tab->count; i++)
2119 smu7_patch_ppt_v0_with_vdd_leakage(hwmgr, &tab->entries[i].v,
2120 &data->vddc_leakage);
2121
2122 return 0;
2123}
2124
2125static int smu7_patch_vddc_shed_limit(struct pp_hwmgr *hwmgr,
2126 struct phm_phase_shedding_limits_table *tab)
2127{
2128 uint16_t i;
2129 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
2130
2131 if (tab)
2132 for (i = 0; i < tab->count; i++)
2133 smu7_patch_ppt_v0_with_vdd_leakage(hwmgr, &tab->entries[i].Voltage,
2134 &data->vddc_leakage);
2135
2136 return 0;
2137}
2138
2139static int smu7_patch_samu_vddc(struct pp_hwmgr *hwmgr,
2140 struct phm_samu_clock_voltage_dependency_table *tab)
2141{
2142 uint16_t i;
2143 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
2144
2145 if (tab)
2146 for (i = 0; i < tab->count; i++)
2147 smu7_patch_ppt_v0_with_vdd_leakage(hwmgr, &tab->entries[i].v,
2148 &data->vddc_leakage);
2149
2150 return 0;
2151}
2152
2153static int smu7_patch_acp_vddc(struct pp_hwmgr *hwmgr,
2154 struct phm_acp_clock_voltage_dependency_table *tab)
2155{
2156 uint16_t i;
2157 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
2158
2159 if (tab)
2160 for (i = 0; i < tab->count; i++)
2161 smu7_patch_ppt_v0_with_vdd_leakage(hwmgr, &tab->entries[i].v,
2162 &data->vddc_leakage);
2163
2164 return 0;
2165}
2166
2167static int smu7_patch_limits_vddc(struct pp_hwmgr *hwmgr,
77f7f71f 2168 struct phm_clock_and_voltage_limits *tab)
599a7e9f 2169{
77f7f71f 2170 uint32_t vddc, vddci;
599a7e9f
RZ
2171 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
2172
2173 if (tab) {
a29d1260 2174 vddc = tab->vddc;
77f7f71f
AD
2175 smu7_patch_ppt_v0_with_vdd_leakage(hwmgr, &vddc,
2176 &data->vddc_leakage);
2177 tab->vddc = vddc;
a29d1260 2178 vddci = tab->vddci;
77f7f71f
AD
2179 smu7_patch_ppt_v0_with_vdd_leakage(hwmgr, &vddci,
2180 &data->vddci_leakage);
2181 tab->vddci = vddci;
599a7e9f
RZ
2182 }
2183
2184 return 0;
2185}
2186
2187static int smu7_patch_cac_vddc(struct pp_hwmgr *hwmgr, struct phm_cac_leakage_table *tab)
2188{
2189 uint32_t i;
2190 uint32_t vddc;
2191 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
2192
2193 if (tab) {
2194 for (i = 0; i < tab->count; i++) {
2195 vddc = (uint32_t)(tab->entries[i].Vddc);
2196 smu7_patch_ppt_v0_with_vdd_leakage(hwmgr, &vddc, &data->vddc_leakage);
2197 tab->entries[i].Vddc = (uint16_t)vddc;
2198 }
2199 }
2200
2201 return 0;
2202}
2203
2204static int smu7_patch_dependency_tables_with_leakage(struct pp_hwmgr *hwmgr)
2205{
2206 int tmp;
2207
2208 tmp = smu7_patch_vddc(hwmgr, hwmgr->dyn_state.vddc_dependency_on_sclk);
2209 if (tmp)
2210 return -EINVAL;
2211
2212 tmp = smu7_patch_vddc(hwmgr, hwmgr->dyn_state.vddc_dependency_on_mclk);
2213 if (tmp)
2214 return -EINVAL;
2215
2216 tmp = smu7_patch_vddc(hwmgr, hwmgr->dyn_state.vddc_dep_on_dal_pwrl);
2217 if (tmp)
2218 return -EINVAL;
2219
2220 tmp = smu7_patch_vddci(hwmgr, hwmgr->dyn_state.vddci_dependency_on_mclk);
2221 if (tmp)
2222 return -EINVAL;
2223
2224 tmp = smu7_patch_vce_vddc(hwmgr, hwmgr->dyn_state.vce_clock_voltage_dependency_table);
2225 if (tmp)
2226 return -EINVAL;
2227
2228 tmp = smu7_patch_uvd_vddc(hwmgr, hwmgr->dyn_state.uvd_clock_voltage_dependency_table);
2229 if (tmp)
2230 return -EINVAL;
2231
2232 tmp = smu7_patch_samu_vddc(hwmgr, hwmgr->dyn_state.samu_clock_voltage_dependency_table);
2233 if (tmp)
2234 return -EINVAL;
2235
2236 tmp = smu7_patch_acp_vddc(hwmgr, hwmgr->dyn_state.acp_clock_voltage_dependency_table);
2237 if (tmp)
2238 return -EINVAL;
2239
2240 tmp = smu7_patch_vddc_shed_limit(hwmgr, hwmgr->dyn_state.vddc_phase_shed_limits_table);
2241 if (tmp)
2242 return -EINVAL;
2243
2244 tmp = smu7_patch_limits_vddc(hwmgr, &hwmgr->dyn_state.max_clock_voltage_on_ac);
2245 if (tmp)
2246 return -EINVAL;
2247
2248 tmp = smu7_patch_limits_vddc(hwmgr, &hwmgr->dyn_state.max_clock_voltage_on_dc);
2249 if (tmp)
2250 return -EINVAL;
2251
2252 tmp = smu7_patch_cac_vddc(hwmgr, hwmgr->dyn_state.cac_leakage_table);
2253 if (tmp)
2254 return -EINVAL;
2255
2256 return 0;
2257}
2258
2259
2260static int smu7_set_private_data_based_on_pptable_v0(struct pp_hwmgr *hwmgr)
2261{
2262 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
2263
2264 struct phm_clock_voltage_dependency_table *allowed_sclk_vddc_table = hwmgr->dyn_state.vddc_dependency_on_sclk;
2265 struct phm_clock_voltage_dependency_table *allowed_mclk_vddc_table = hwmgr->dyn_state.vddc_dependency_on_mclk;
2266 struct phm_clock_voltage_dependency_table *allowed_mclk_vddci_table = hwmgr->dyn_state.vddci_dependency_on_mclk;
2267
2268 PP_ASSERT_WITH_CODE(allowed_sclk_vddc_table != NULL,
1446413f
JL
2269 "VDDC dependency on SCLK table is missing. This table is mandatory",
2270 return -EINVAL);
599a7e9f 2271 PP_ASSERT_WITH_CODE(allowed_sclk_vddc_table->count >= 1,
1446413f
JL
2272 "VDDC dependency on SCLK table has to have is missing. This table is mandatory",
2273 return -EINVAL);
599a7e9f
RZ
2274
2275 PP_ASSERT_WITH_CODE(allowed_mclk_vddc_table != NULL,
1446413f
JL
2276 "VDDC dependency on MCLK table is missing. This table is mandatory",
2277 return -EINVAL);
599a7e9f 2278 PP_ASSERT_WITH_CODE(allowed_mclk_vddc_table->count >= 1,
1446413f
JL
2279 "VDD dependency on MCLK table has to have is missing. This table is mandatory",
2280 return -EINVAL);
599a7e9f
RZ
2281
2282 data->min_vddc_in_pptable = (uint16_t)allowed_sclk_vddc_table->entries[0].v;
2283 data->max_vddc_in_pptable = (uint16_t)allowed_sclk_vddc_table->entries[allowed_sclk_vddc_table->count - 1].v;
2284
2285 hwmgr->dyn_state.max_clock_voltage_on_ac.sclk =
2286 allowed_sclk_vddc_table->entries[allowed_sclk_vddc_table->count - 1].clk;
2287 hwmgr->dyn_state.max_clock_voltage_on_ac.mclk =
2288 allowed_mclk_vddc_table->entries[allowed_mclk_vddc_table->count - 1].clk;
2289 hwmgr->dyn_state.max_clock_voltage_on_ac.vddc =
2290 allowed_sclk_vddc_table->entries[allowed_sclk_vddc_table->count - 1].v;
2291
2292 if (allowed_mclk_vddci_table != NULL && allowed_mclk_vddci_table->count >= 1) {
2293 data->min_vddci_in_pptable = (uint16_t)allowed_mclk_vddci_table->entries[0].v;
2294 data->max_vddci_in_pptable = (uint16_t)allowed_mclk_vddci_table->entries[allowed_mclk_vddci_table->count - 1].v;
2295 }
2296
86457c3b 2297 if (hwmgr->dyn_state.vddci_dependency_on_mclk != NULL && hwmgr->dyn_state.vddci_dependency_on_mclk->count >= 1)
599a7e9f
RZ
2298 hwmgr->dyn_state.max_clock_voltage_on_ac.vddci = hwmgr->dyn_state.vddci_dependency_on_mclk->entries[hwmgr->dyn_state.vddci_dependency_on_mclk->count - 1].v;
2299
2300 return 0;
2301}
2302
a0aa7046
RZ
2303static int smu7_hwmgr_backend_fini(struct pp_hwmgr *hwmgr)
2304{
ebe02de2
HJ
2305 kfree(hwmgr->dyn_state.vddc_dep_on_dal_pwrl);
2306 hwmgr->dyn_state.vddc_dep_on_dal_pwrl = NULL;
ebe02de2
HJ
2307 kfree(hwmgr->backend);
2308 hwmgr->backend = NULL;
a0aa7046
RZ
2309
2310 return 0;
2311}
2312
86457c3b
RZ
2313static int smu7_get_elb_voltages(struct pp_hwmgr *hwmgr)
2314{
2315 uint16_t virtual_voltage_id, vddc, vddci, efuse_voltage_id;
2316 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
2317 int i;
2318
2319 if (atomctrl_get_leakage_id_from_efuse(hwmgr, &efuse_voltage_id) == 0) {
2320 for (i = 0; i < SMU7_MAX_LEAKAGE_COUNT; i++) {
2321 virtual_voltage_id = ATOM_VIRTUAL_VOLTAGE_ID0 + i;
2322 if (atomctrl_get_leakage_vddc_base_on_leakage(hwmgr, &vddc, &vddci,
2323 virtual_voltage_id,
2324 efuse_voltage_id) == 0) {
2325 if (vddc != 0 && vddc != virtual_voltage_id) {
2326 data->vddc_leakage.actual_voltage[data->vddc_leakage.count] = vddc;
2327 data->vddc_leakage.leakage_id[data->vddc_leakage.count] = virtual_voltage_id;
2328 data->vddc_leakage.count++;
2329 }
2330 if (vddci != 0 && vddci != virtual_voltage_id) {
2331 data->vddci_leakage.actual_voltage[data->vddci_leakage.count] = vddci;
2332 data->vddci_leakage.leakage_id[data->vddci_leakage.count] = virtual_voltage_id;
2333 data->vddci_leakage.count++;
2334 }
2335 }
2336 }
2337 }
2338 return 0;
2339}
2340
f8a4c11b 2341static int smu7_hwmgr_backend_init(struct pp_hwmgr *hwmgr)
599a7e9f
RZ
2342{
2343 struct smu7_hwmgr *data;
86457c3b 2344 int result = 0;
599a7e9f
RZ
2345
2346 data = kzalloc(sizeof(struct smu7_hwmgr), GFP_KERNEL);
2347 if (data == NULL)
2348 return -ENOMEM;
2349
2350 hwmgr->backend = data;
599a7e9f
RZ
2351 smu7_patch_voltage_workaround(hwmgr);
2352 smu7_init_dpm_defaults(hwmgr);
2353
2354 /* Get leakage voltage based on leakage ID. */
86457c3b
RZ
2355 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
2356 PHM_PlatformCaps_EVV)) {
2357 result = smu7_get_evv_voltages(hwmgr);
2358 if (result) {
2359 pr_info("Get EVV Voltage Failed. Abort Driver loading!\n");
2360 return -EINVAL;
2361 }
2362 } else {
2363 smu7_get_elb_voltages(hwmgr);
599a7e9f
RZ
2364 }
2365
2366 if (hwmgr->pp_table_version == PP_TABLE_V1) {
2367 smu7_complete_dependency_tables(hwmgr);
2368 smu7_set_private_data_based_on_pptable_v1(hwmgr);
2369 } else if (hwmgr->pp_table_version == PP_TABLE_V0) {
2370 smu7_patch_dependency_tables_with_leakage(hwmgr);
2371 smu7_set_private_data_based_on_pptable_v0(hwmgr);
2372 }
2373
2374 /* Initalize Dynamic State Adjustment Rule Settings */
2375 result = phm_initializa_dynamic_state_adjustment_rule_settings(hwmgr);
2376
2377 if (0 == result) {
2378 struct cgs_system_info sys_info = {0};
2379
2380 data->is_tlu_enabled = false;
2381
2382 hwmgr->platform_descriptor.hardwareActivityPerformanceLevels =
2383 SMU7_MAX_HARDWARE_POWERLEVELS;
2384 hwmgr->platform_descriptor.hardwarePerformanceLevels = 2;
2385 hwmgr->platform_descriptor.minimumClocksReductionPercentage = 50;
2386
2387 sys_info.size = sizeof(struct cgs_system_info);
2388 sys_info.info_id = CGS_SYSTEM_INFO_PCIE_GEN_INFO;
2389 result = cgs_query_system_info(hwmgr->device, &sys_info);
2390 if (result)
2391 data->pcie_gen_cap = AMDGPU_DEFAULT_PCIE_GEN_MASK;
2392 else
2393 data->pcie_gen_cap = (uint32_t)sys_info.value;
2394 if (data->pcie_gen_cap & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3)
2395 data->pcie_spc_cap = 20;
2396 sys_info.size = sizeof(struct cgs_system_info);
2397 sys_info.info_id = CGS_SYSTEM_INFO_PCIE_MLW;
2398 result = cgs_query_system_info(hwmgr->device, &sys_info);
2399 if (result)
2400 data->pcie_lane_cap = AMDGPU_DEFAULT_PCIE_MLW_MASK;
2401 else
2402 data->pcie_lane_cap = (uint32_t)sys_info.value;
2403
2404 hwmgr->platform_descriptor.vbiosInterruptId = 0x20000400; /* IRQ_SOURCE1_SW_INT */
2405/* The true clock step depends on the frequency, typically 4.5 or 9 MHz. Here we use 5. */
2406 hwmgr->platform_descriptor.clockStep.engineClock = 500;
2407 hwmgr->platform_descriptor.clockStep.memoryClock = 500;
2408 smu7_thermal_parameter_init(hwmgr);
2409 } else {
2410 /* Ignore return value in here, we are cleaning up a mess. */
a0aa7046 2411 smu7_hwmgr_backend_fini(hwmgr);
599a7e9f
RZ
2412 }
2413
2414 return 0;
2415}
2416
2417static int smu7_force_dpm_highest(struct pp_hwmgr *hwmgr)
2418{
2419 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
2420 uint32_t level, tmp;
2421
2422 if (!data->pcie_dpm_key_disabled) {
2423 if (data->dpm_level_enable_mask.pcie_dpm_enable_mask) {
2424 level = 0;
2425 tmp = data->dpm_level_enable_mask.pcie_dpm_enable_mask;
2426 while (tmp >>= 1)
2427 level++;
2428
2429 if (level)
d3f8c0ab 2430 smum_send_msg_to_smc_with_parameter(hwmgr,
599a7e9f
RZ
2431 PPSMC_MSG_PCIeDPM_ForceLevel, level);
2432 }
2433 }
2434
2435 if (!data->sclk_dpm_key_disabled) {
2436 if (data->dpm_level_enable_mask.sclk_dpm_enable_mask) {
2437 level = 0;
2438 tmp = data->dpm_level_enable_mask.sclk_dpm_enable_mask;
2439 while (tmp >>= 1)
2440 level++;
2441
2442 if (level)
d3f8c0ab 2443 smum_send_msg_to_smc_with_parameter(hwmgr,
599a7e9f
RZ
2444 PPSMC_MSG_SCLKDPM_SetEnabledMask,
2445 (1 << level));
2446 }
2447 }
2448
2449 if (!data->mclk_dpm_key_disabled) {
2450 if (data->dpm_level_enable_mask.mclk_dpm_enable_mask) {
2451 level = 0;
2452 tmp = data->dpm_level_enable_mask.mclk_dpm_enable_mask;
2453 while (tmp >>= 1)
2454 level++;
2455
2456 if (level)
d3f8c0ab 2457 smum_send_msg_to_smc_with_parameter(hwmgr,
599a7e9f
RZ
2458 PPSMC_MSG_MCLKDPM_SetEnabledMask,
2459 (1 << level));
2460 }
2461 }
2462
2463 return 0;
2464}
2465
2466static int smu7_upload_dpm_level_enable_mask(struct pp_hwmgr *hwmgr)
2467{
2468 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
2469
2470 if (hwmgr->pp_table_version == PP_TABLE_V1)
2471 phm_apply_dal_min_voltage_request(hwmgr);
2472/* TO DO for v0 iceland and Ci*/
2473
2474 if (!data->sclk_dpm_key_disabled) {
2475 if (data->dpm_level_enable_mask.sclk_dpm_enable_mask)
d3f8c0ab 2476 smum_send_msg_to_smc_with_parameter(hwmgr,
599a7e9f
RZ
2477 PPSMC_MSG_SCLKDPM_SetEnabledMask,
2478 data->dpm_level_enable_mask.sclk_dpm_enable_mask);
2479 }
2480
2481 if (!data->mclk_dpm_key_disabled) {
2482 if (data->dpm_level_enable_mask.mclk_dpm_enable_mask)
d3f8c0ab 2483 smum_send_msg_to_smc_with_parameter(hwmgr,
599a7e9f
RZ
2484 PPSMC_MSG_MCLKDPM_SetEnabledMask,
2485 data->dpm_level_enable_mask.mclk_dpm_enable_mask);
2486 }
2487
2488 return 0;
2489}
2490
2491static int smu7_unforce_dpm_levels(struct pp_hwmgr *hwmgr)
2492{
2493 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
2494
2495 if (!smum_is_dpm_running(hwmgr))
2496 return -EINVAL;
2497
2498 if (!data->pcie_dpm_key_disabled) {
d3f8c0ab 2499 smum_send_msg_to_smc(hwmgr,
599a7e9f
RZ
2500 PPSMC_MSG_PCIeDPM_UnForceLevel);
2501 }
2502
2503 return smu7_upload_dpm_level_enable_mask(hwmgr);
2504}
2505
2506static int smu7_force_dpm_lowest(struct pp_hwmgr *hwmgr)
2507{
2508 struct smu7_hwmgr *data =
2509 (struct smu7_hwmgr *)(hwmgr->backend);
2510 uint32_t level;
2511
2512 if (!data->sclk_dpm_key_disabled)
2513 if (data->dpm_level_enable_mask.sclk_dpm_enable_mask) {
2514 level = phm_get_lowest_enabled_level(hwmgr,
2515 data->dpm_level_enable_mask.sclk_dpm_enable_mask);
d3f8c0ab 2516 smum_send_msg_to_smc_with_parameter(hwmgr,
599a7e9f
RZ
2517 PPSMC_MSG_SCLKDPM_SetEnabledMask,
2518 (1 << level));
2519
2520 }
2521
2522 if (!data->mclk_dpm_key_disabled) {
2523 if (data->dpm_level_enable_mask.mclk_dpm_enable_mask) {
2524 level = phm_get_lowest_enabled_level(hwmgr,
2525 data->dpm_level_enable_mask.mclk_dpm_enable_mask);
d3f8c0ab 2526 smum_send_msg_to_smc_with_parameter(hwmgr,
599a7e9f
RZ
2527 PPSMC_MSG_MCLKDPM_SetEnabledMask,
2528 (1 << level));
2529 }
2530 }
2531
2532 if (!data->pcie_dpm_key_disabled) {
2533 if (data->dpm_level_enable_mask.pcie_dpm_enable_mask) {
2534 level = phm_get_lowest_enabled_level(hwmgr,
2535 data->dpm_level_enable_mask.pcie_dpm_enable_mask);
d3f8c0ab 2536 smum_send_msg_to_smc_with_parameter(hwmgr,
599a7e9f
RZ
2537 PPSMC_MSG_PCIeDPM_ForceLevel,
2538 (level));
2539 }
2540 }
2541
2542 return 0;
570272d2
RZ
2543}
2544
2545static int smu7_get_profiling_clk(struct pp_hwmgr *hwmgr, enum amd_dpm_forced_level level,
2546 uint32_t *sclk_mask, uint32_t *mclk_mask, uint32_t *pcie_mask)
2547{
2548 uint32_t percentage;
2549 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
2550 struct smu7_dpm_table *golden_dpm_table = &data->golden_dpm_table;
2551 int32_t tmp_mclk;
2552 int32_t tmp_sclk;
2553 int32_t count;
2554
2555 if (golden_dpm_table->mclk_table.count < 1)
2556 return -EINVAL;
2557
2558 percentage = 100 * golden_dpm_table->sclk_table.dpm_levels[golden_dpm_table->sclk_table.count - 1].value /
2559 golden_dpm_table->mclk_table.dpm_levels[golden_dpm_table->mclk_table.count - 1].value;
599a7e9f 2560
570272d2
RZ
2561 if (golden_dpm_table->mclk_table.count == 1) {
2562 percentage = 70;
2563 tmp_mclk = golden_dpm_table->mclk_table.dpm_levels[golden_dpm_table->mclk_table.count - 1].value;
2564 *mclk_mask = golden_dpm_table->mclk_table.count - 1;
2565 } else {
2566 tmp_mclk = golden_dpm_table->mclk_table.dpm_levels[golden_dpm_table->mclk_table.count - 2].value;
2567 *mclk_mask = golden_dpm_table->mclk_table.count - 2;
2568 }
2569
2570 tmp_sclk = tmp_mclk * percentage / 100;
2571
2572 if (hwmgr->pp_table_version == PP_TABLE_V0) {
2573 for (count = hwmgr->dyn_state.vddc_dependency_on_sclk->count-1;
2574 count >= 0; count--) {
2575 if (tmp_sclk >= hwmgr->dyn_state.vddc_dependency_on_sclk->entries[count].clk) {
2576 tmp_sclk = hwmgr->dyn_state.vddc_dependency_on_sclk->entries[count].clk;
2577 *sclk_mask = count;
2578 break;
2579 }
2580 }
2581 if (count < 0 || level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK)
2582 *sclk_mask = 0;
2583
2584 if (level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK)
2585 *sclk_mask = hwmgr->dyn_state.vddc_dependency_on_sclk->count-1;
2586 } else if (hwmgr->pp_table_version == PP_TABLE_V1) {
2587 struct phm_ppt_v1_information *table_info =
2588 (struct phm_ppt_v1_information *)(hwmgr->pptable);
2589
2590 for (count = table_info->vdd_dep_on_sclk->count-1; count >= 0; count--) {
2591 if (tmp_sclk >= table_info->vdd_dep_on_sclk->entries[count].clk) {
2592 tmp_sclk = table_info->vdd_dep_on_sclk->entries[count].clk;
2593 *sclk_mask = count;
2594 break;
2595 }
2596 }
2597 if (count < 0 || level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK)
2598 *sclk_mask = 0;
2599
2600 if (level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK)
2601 *sclk_mask = table_info->vdd_dep_on_sclk->count - 1;
2602 }
2603
2604 if (level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK)
2605 *mclk_mask = 0;
2606 else if (level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK)
2607 *mclk_mask = golden_dpm_table->mclk_table.count - 1;
2608
2609 *pcie_mask = data->dpm_table.pcie_speed_table.count - 1;
2610 return 0;
599a7e9f 2611}
570272d2 2612
599a7e9f
RZ
2613static int smu7_force_dpm_level(struct pp_hwmgr *hwmgr,
2614 enum amd_dpm_forced_level level)
2615{
2616 int ret = 0;
570272d2
RZ
2617 uint32_t sclk_mask = 0;
2618 uint32_t mclk_mask = 0;
2619 uint32_t pcie_mask = 0;
599a7e9f
RZ
2620
2621 switch (level) {
2622 case AMD_DPM_FORCED_LEVEL_HIGH:
2623 ret = smu7_force_dpm_highest(hwmgr);
599a7e9f
RZ
2624 break;
2625 case AMD_DPM_FORCED_LEVEL_LOW:
2626 ret = smu7_force_dpm_lowest(hwmgr);
599a7e9f
RZ
2627 break;
2628 case AMD_DPM_FORCED_LEVEL_AUTO:
2629 ret = smu7_unforce_dpm_levels(hwmgr);
599a7e9f 2630 break;
570272d2
RZ
2631 case AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD:
2632 case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK:
2633 case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK:
2634 case AMD_DPM_FORCED_LEVEL_PROFILE_PEAK:
2635 ret = smu7_get_profiling_clk(hwmgr, level, &sclk_mask, &mclk_mask, &pcie_mask);
2636 if (ret)
2637 return ret;
570272d2
RZ
2638 smu7_force_clock_level(hwmgr, PP_SCLK, 1<<sclk_mask);
2639 smu7_force_clock_level(hwmgr, PP_MCLK, 1<<mclk_mask);
2640 smu7_force_clock_level(hwmgr, PP_PCIE, 1<<pcie_mask);
2641 break;
cb256cc3 2642 case AMD_DPM_FORCED_LEVEL_MANUAL:
570272d2 2643 case AMD_DPM_FORCED_LEVEL_PROFILE_EXIT:
599a7e9f
RZ
2644 default:
2645 break;
2646 }
2647
9947f704
RZ
2648 if (!ret) {
2649 if (level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK && hwmgr->dpm_level != AMD_DPM_FORCED_LEVEL_PROFILE_PEAK)
2650 smu7_fan_ctrl_set_fan_speed_percent(hwmgr, 100);
2651 else if (level != AMD_DPM_FORCED_LEVEL_PROFILE_PEAK && hwmgr->dpm_level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK)
2652 smu7_fan_ctrl_reset_fan_speed_to_default(hwmgr);
2653 }
2654 return ret;
599a7e9f
RZ
2655}
2656
2657static int smu7_get_power_state_size(struct pp_hwmgr *hwmgr)
2658{
2659 return sizeof(struct smu7_power_state);
2660}
2661
09be4a52
AD
2662static int smu7_vblank_too_short(struct pp_hwmgr *hwmgr,
2663 uint32_t vblank_time_us)
2664{
2665 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
2666 uint32_t switch_limit_us;
2667
2668 switch (hwmgr->chip_id) {
2669 case CHIP_POLARIS10:
2670 case CHIP_POLARIS11:
2671 case CHIP_POLARIS12:
2672 switch_limit_us = data->is_memory_gddr5 ? 190 : 150;
2673 break;
2674 default:
2675 switch_limit_us = data->is_memory_gddr5 ? 450 : 150;
2676 break;
2677 }
2678
2679 if (vblank_time_us < switch_limit_us)
2680 return true;
2681 else
2682 return false;
2683}
599a7e9f
RZ
2684
2685static int smu7_apply_state_adjust_rules(struct pp_hwmgr *hwmgr,
2686 struct pp_power_state *request_ps,
2687 const struct pp_power_state *current_ps)
2688{
2689
2690 struct smu7_power_state *smu7_ps =
2691 cast_phw_smu7_power_state(&request_ps->hardware);
2692 uint32_t sclk;
2693 uint32_t mclk;
2694 struct PP_Clocks minimum_clocks = {0};
2695 bool disable_mclk_switching;
2696 bool disable_mclk_switching_for_frame_lock;
2697 struct cgs_display_info info = {0};
09be4a52 2698 struct cgs_mode_info mode_info = {0};
599a7e9f
RZ
2699 const struct phm_clock_and_voltage_limits *max_limits;
2700 uint32_t i;
2701 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
2702 struct phm_ppt_v1_information *table_info =
2703 (struct phm_ppt_v1_information *)(hwmgr->pptable);
2704 int32_t count;
2705 int32_t stable_pstate_sclk = 0, stable_pstate_mclk = 0;
2706
09be4a52 2707 info.mode_info = &mode_info;
599a7e9f
RZ
2708 data->battery_state = (PP_StateUILabel_Battery ==
2709 request_ps->classification.ui_label);
2710
2711 PP_ASSERT_WITH_CODE(smu7_ps->performance_level_count == 2,
2712 "VI should always have 2 performance levels",
2713 );
2714
2715 max_limits = (PP_PowerSource_AC == hwmgr->power_source) ?
2716 &(hwmgr->dyn_state.max_clock_voltage_on_ac) :
2717 &(hwmgr->dyn_state.max_clock_voltage_on_dc);
2718
2719 /* Cap clock DPM tables at DC MAX if it is in DC. */
2720 if (PP_PowerSource_DC == hwmgr->power_source) {
2721 for (i = 0; i < smu7_ps->performance_level_count; i++) {
2722 if (smu7_ps->performance_levels[i].memory_clock > max_limits->mclk)
2723 smu7_ps->performance_levels[i].memory_clock = max_limits->mclk;
2724 if (smu7_ps->performance_levels[i].engine_clock > max_limits->sclk)
2725 smu7_ps->performance_levels[i].engine_clock = max_limits->sclk;
2726 }
2727 }
2728
599a7e9f
RZ
2729 cgs_get_active_displays_info(hwmgr->device, &info);
2730
599a7e9f
RZ
2731 minimum_clocks.engineClock = hwmgr->display_config.min_core_set_clock;
2732 minimum_clocks.memoryClock = hwmgr->display_config.min_mem_set_clock;
2733
2734 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
2735 PHM_PlatformCaps_StablePState)) {
2736 max_limits = &(hwmgr->dyn_state.max_clock_voltage_on_ac);
2737 stable_pstate_sclk = (max_limits->sclk * 75) / 100;
2738
2739 for (count = table_info->vdd_dep_on_sclk->count - 1;
2740 count >= 0; count--) {
2741 if (stable_pstate_sclk >=
2742 table_info->vdd_dep_on_sclk->entries[count].clk) {
2743 stable_pstate_sclk =
2744 table_info->vdd_dep_on_sclk->entries[count].clk;
2745 break;
2746 }
2747 }
2748
2749 if (count < 0)
2750 stable_pstate_sclk = table_info->vdd_dep_on_sclk->entries[0].clk;
2751
2752 stable_pstate_mclk = max_limits->mclk;
2753
2754 minimum_clocks.engineClock = stable_pstate_sclk;
2755 minimum_clocks.memoryClock = stable_pstate_mclk;
2756 }
2757
599a7e9f
RZ
2758 disable_mclk_switching_for_frame_lock = phm_cap_enabled(
2759 hwmgr->platform_descriptor.platformCaps,
2760 PHM_PlatformCaps_DisableMclkSwitchingForFrameLock);
2761
2762
09be4a52
AD
2763 disable_mclk_switching = ((1 < info.display_count) ||
2764 disable_mclk_switching_for_frame_lock ||
2275a3a2
AD
2765 smu7_vblank_too_short(hwmgr, mode_info.vblank_time_us) ||
2766 (mode_info.refresh_rate > 120));
599a7e9f
RZ
2767
2768 sclk = smu7_ps->performance_levels[0].engine_clock;
2769 mclk = smu7_ps->performance_levels[0].memory_clock;
2770
2771 if (disable_mclk_switching)
2772 mclk = smu7_ps->performance_levels
2773 [smu7_ps->performance_level_count - 1].memory_clock;
2774
2775 if (sclk < minimum_clocks.engineClock)
2776 sclk = (minimum_clocks.engineClock > max_limits->sclk) ?
2777 max_limits->sclk : minimum_clocks.engineClock;
2778
2779 if (mclk < minimum_clocks.memoryClock)
2780 mclk = (minimum_clocks.memoryClock > max_limits->mclk) ?
2781 max_limits->mclk : minimum_clocks.memoryClock;
2782
2783 smu7_ps->performance_levels[0].engine_clock = sclk;
2784 smu7_ps->performance_levels[0].memory_clock = mclk;
2785
2786 smu7_ps->performance_levels[1].engine_clock =
2787 (smu7_ps->performance_levels[1].engine_clock >=
2788 smu7_ps->performance_levels[0].engine_clock) ?
2789 smu7_ps->performance_levels[1].engine_clock :
2790 smu7_ps->performance_levels[0].engine_clock;
2791
2792 if (disable_mclk_switching) {
2793 if (mclk < smu7_ps->performance_levels[1].memory_clock)
2794 mclk = smu7_ps->performance_levels[1].memory_clock;
2795
2796 smu7_ps->performance_levels[0].memory_clock = mclk;
2797 smu7_ps->performance_levels[1].memory_clock = mclk;
2798 } else {
2799 if (smu7_ps->performance_levels[1].memory_clock <
2800 smu7_ps->performance_levels[0].memory_clock)
2801 smu7_ps->performance_levels[1].memory_clock =
2802 smu7_ps->performance_levels[0].memory_clock;
2803 }
2804
2805 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
2806 PHM_PlatformCaps_StablePState)) {
2807 for (i = 0; i < smu7_ps->performance_level_count; i++) {
2808 smu7_ps->performance_levels[i].engine_clock = stable_pstate_sclk;
2809 smu7_ps->performance_levels[i].memory_clock = stable_pstate_mclk;
2810 smu7_ps->performance_levels[i].pcie_gen = data->pcie_gen_performance.max;
2811 smu7_ps->performance_levels[i].pcie_lane = data->pcie_gen_performance.max;
2812 }
2813 }
2814 return 0;
2815}
2816
2817
f93f0c3a 2818static uint32_t smu7_dpm_get_mclk(struct pp_hwmgr *hwmgr, bool low)
599a7e9f
RZ
2819{
2820 struct pp_power_state *ps;
2821 struct smu7_power_state *smu7_ps;
2822
2823 if (hwmgr == NULL)
2824 return -EINVAL;
2825
2826 ps = hwmgr->request_ps;
2827
2828 if (ps == NULL)
2829 return -EINVAL;
2830
2831 smu7_ps = cast_phw_smu7_power_state(&ps->hardware);
2832
2833 if (low)
2834 return smu7_ps->performance_levels[0].memory_clock;
2835 else
2836 return smu7_ps->performance_levels
2837 [smu7_ps->performance_level_count-1].memory_clock;
2838}
2839
f93f0c3a 2840static uint32_t smu7_dpm_get_sclk(struct pp_hwmgr *hwmgr, bool low)
599a7e9f
RZ
2841{
2842 struct pp_power_state *ps;
2843 struct smu7_power_state *smu7_ps;
2844
2845 if (hwmgr == NULL)
2846 return -EINVAL;
2847
2848 ps = hwmgr->request_ps;
2849
2850 if (ps == NULL)
2851 return -EINVAL;
2852
2853 smu7_ps = cast_phw_smu7_power_state(&ps->hardware);
2854
2855 if (low)
2856 return smu7_ps->performance_levels[0].engine_clock;
2857 else
2858 return smu7_ps->performance_levels
2859 [smu7_ps->performance_level_count-1].engine_clock;
2860}
2861
2862static int smu7_dpm_patch_boot_state(struct pp_hwmgr *hwmgr,
2863 struct pp_hw_power_state *hw_ps)
2864{
2865 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
2866 struct smu7_power_state *ps = (struct smu7_power_state *)hw_ps;
2867 ATOM_FIRMWARE_INFO_V2_2 *fw_info;
2868 uint16_t size;
2869 uint8_t frev, crev;
2870 int index = GetIndexIntoMasterTable(DATA, FirmwareInfo);
2871
2872 /* First retrieve the Boot clocks and VDDC from the firmware info table.
2873 * We assume here that fw_info is unchanged if this call fails.
2874 */
2875 fw_info = (ATOM_FIRMWARE_INFO_V2_2 *)cgs_atom_get_data_table(
2876 hwmgr->device, index,
2877 &size, &frev, &crev);
2878 if (!fw_info)
2879 /* During a test, there is no firmware info table. */
2880 return 0;
2881
2882 /* Patch the state. */
2883 data->vbios_boot_state.sclk_bootup_value =
2884 le32_to_cpu(fw_info->ulDefaultEngineClock);
2885 data->vbios_boot_state.mclk_bootup_value =
2886 le32_to_cpu(fw_info->ulDefaultMemoryClock);
2887 data->vbios_boot_state.mvdd_bootup_value =
2888 le16_to_cpu(fw_info->usBootUpMVDDCVoltage);
2889 data->vbios_boot_state.vddc_bootup_value =
2890 le16_to_cpu(fw_info->usBootUpVDDCVoltage);
2891 data->vbios_boot_state.vddci_bootup_value =
2892 le16_to_cpu(fw_info->usBootUpVDDCIVoltage);
2893 data->vbios_boot_state.pcie_gen_bootup_value =
2894 smu7_get_current_pcie_speed(hwmgr);
2895
2896 data->vbios_boot_state.pcie_lane_bootup_value =
2897 (uint16_t)smu7_get_current_pcie_lane_number(hwmgr);
2898
2899 /* set boot power state */
2900 ps->performance_levels[0].memory_clock = data->vbios_boot_state.mclk_bootup_value;
2901 ps->performance_levels[0].engine_clock = data->vbios_boot_state.sclk_bootup_value;
2902 ps->performance_levels[0].pcie_gen = data->vbios_boot_state.pcie_gen_bootup_value;
2903 ps->performance_levels[0].pcie_lane = data->vbios_boot_state.pcie_lane_bootup_value;
2904
2905 return 0;
2906}
2907
2908static int smu7_get_number_of_powerplay_table_entries(struct pp_hwmgr *hwmgr)
2909{
2910 int result;
2911 unsigned long ret = 0;
2912
2913 if (hwmgr->pp_table_version == PP_TABLE_V0) {
2914 result = pp_tables_get_num_of_entries(hwmgr, &ret);
2915 return result ? 0 : ret;
2916 } else if (hwmgr->pp_table_version == PP_TABLE_V1) {
2917 result = get_number_of_powerplay_table_entries_v1_0(hwmgr);
2918 return result;
2919 }
2920 return 0;
2921}
2922
2923static int smu7_get_pp_table_entry_callback_func_v1(struct pp_hwmgr *hwmgr,
2924 void *state, struct pp_power_state *power_state,
2925 void *pp_table, uint32_t classification_flag)
2926{
2927 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
2928 struct smu7_power_state *smu7_power_state =
2929 (struct smu7_power_state *)(&(power_state->hardware));
2930 struct smu7_performance_level *performance_level;
2931 ATOM_Tonga_State *state_entry = (ATOM_Tonga_State *)state;
2932 ATOM_Tonga_POWERPLAYTABLE *powerplay_table =
2933 (ATOM_Tonga_POWERPLAYTABLE *)pp_table;
2934 PPTable_Generic_SubTable_Header *sclk_dep_table =
2935 (PPTable_Generic_SubTable_Header *)
2936 (((unsigned long)powerplay_table) +
2937 le16_to_cpu(powerplay_table->usSclkDependencyTableOffset));
2938
2939 ATOM_Tonga_MCLK_Dependency_Table *mclk_dep_table =
2940 (ATOM_Tonga_MCLK_Dependency_Table *)
2941 (((unsigned long)powerplay_table) +
2942 le16_to_cpu(powerplay_table->usMclkDependencyTableOffset));
2943
2944 /* The following fields are not initialized here: id orderedList allStatesList */
2945 power_state->classification.ui_label =
2946 (le16_to_cpu(state_entry->usClassification) &
2947 ATOM_PPLIB_CLASSIFICATION_UI_MASK) >>
2948 ATOM_PPLIB_CLASSIFICATION_UI_SHIFT;
2949 power_state->classification.flags = classification_flag;
2950 /* NOTE: There is a classification2 flag in BIOS that is not being used right now */
2951
2952 power_state->classification.temporary_state = false;
2953 power_state->classification.to_be_deleted = false;
2954
2955 power_state->validation.disallowOnDC =
2956 (0 != (le32_to_cpu(state_entry->ulCapsAndSettings) &
2957 ATOM_Tonga_DISALLOW_ON_DC));
2958
2959 power_state->pcie.lanes = 0;
2960
2961 power_state->display.disableFrameModulation = false;
2962 power_state->display.limitRefreshrate = false;
2963 power_state->display.enableVariBright =
2964 (0 != (le32_to_cpu(state_entry->ulCapsAndSettings) &
2965 ATOM_Tonga_ENABLE_VARIBRIGHT));
2966
2967 power_state->validation.supportedPowerLevels = 0;
2968 power_state->uvd_clocks.VCLK = 0;
2969 power_state->uvd_clocks.DCLK = 0;
2970 power_state->temperatures.min = 0;
2971 power_state->temperatures.max = 0;
2972
2973 performance_level = &(smu7_power_state->performance_levels
2974 [smu7_power_state->performance_level_count++]);
2975
2976 PP_ASSERT_WITH_CODE(
d3f8c0ab 2977 (smu7_power_state->performance_level_count < smum_get_mac_definition(hwmgr, SMU_MAX_LEVELS_GRAPHICS)),
599a7e9f
RZ
2978 "Performance levels exceeds SMC limit!",
2979 return -EINVAL);
2980
2981 PP_ASSERT_WITH_CODE(
2982 (smu7_power_state->performance_level_count <=
2983 hwmgr->platform_descriptor.hardwareActivityPerformanceLevels),
2984 "Performance levels exceeds Driver limit!",
2985 return -EINVAL);
2986
2987 /* Performance levels are arranged from low to high. */
2988 performance_level->memory_clock = mclk_dep_table->entries
2989 [state_entry->ucMemoryClockIndexLow].ulMclk;
2990 if (sclk_dep_table->ucRevId == 0)
2991 performance_level->engine_clock = ((ATOM_Tonga_SCLK_Dependency_Table *)sclk_dep_table)->entries
2992 [state_entry->ucEngineClockIndexLow].ulSclk;
2993 else if (sclk_dep_table->ucRevId == 1)
2994 performance_level->engine_clock = ((ATOM_Polaris_SCLK_Dependency_Table *)sclk_dep_table)->entries
2995 [state_entry->ucEngineClockIndexLow].ulSclk;
2996 performance_level->pcie_gen = get_pcie_gen_support(data->pcie_gen_cap,
2997 state_entry->ucPCIEGenLow);
2998 performance_level->pcie_lane = get_pcie_lane_support(data->pcie_lane_cap,
2999 state_entry->ucPCIELaneHigh);
3000
3001 performance_level = &(smu7_power_state->performance_levels
3002 [smu7_power_state->performance_level_count++]);
3003 performance_level->memory_clock = mclk_dep_table->entries
3004 [state_entry->ucMemoryClockIndexHigh].ulMclk;
3005
3006 if (sclk_dep_table->ucRevId == 0)
3007 performance_level->engine_clock = ((ATOM_Tonga_SCLK_Dependency_Table *)sclk_dep_table)->entries
3008 [state_entry->ucEngineClockIndexHigh].ulSclk;
3009 else if (sclk_dep_table->ucRevId == 1)
3010 performance_level->engine_clock = ((ATOM_Polaris_SCLK_Dependency_Table *)sclk_dep_table)->entries
3011 [state_entry->ucEngineClockIndexHigh].ulSclk;
3012
3013 performance_level->pcie_gen = get_pcie_gen_support(data->pcie_gen_cap,
3014 state_entry->ucPCIEGenHigh);
3015 performance_level->pcie_lane = get_pcie_lane_support(data->pcie_lane_cap,
3016 state_entry->ucPCIELaneHigh);
3017
3018 return 0;
3019}
3020
3021static int smu7_get_pp_table_entry_v1(struct pp_hwmgr *hwmgr,
3022 unsigned long entry_index, struct pp_power_state *state)
3023{
3024 int result;
3025 struct smu7_power_state *ps;
3026 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
3027 struct phm_ppt_v1_information *table_info =
3028 (struct phm_ppt_v1_information *)(hwmgr->pptable);
3029 struct phm_ppt_v1_clock_voltage_dependency_table *dep_mclk_table =
3030 table_info->vdd_dep_on_mclk;
3031
3032 state->hardware.magic = PHM_VIslands_Magic;
3033
3034 ps = (struct smu7_power_state *)(&state->hardware);
3035
3036 result = get_powerplay_table_entry_v1_0(hwmgr, entry_index, state,
3037 smu7_get_pp_table_entry_callback_func_v1);
3038
3039 /* This is the earliest time we have all the dependency table and the VBIOS boot state
3040 * as PP_Tables_GetPowerPlayTableEntry retrieves the VBIOS boot state
3041 * if there is only one VDDCI/MCLK level, check if it's the same as VBIOS boot state
3042 */
3043 if (dep_mclk_table != NULL && dep_mclk_table->count == 1) {
3044 if (dep_mclk_table->entries[0].clk !=
3045 data->vbios_boot_state.mclk_bootup_value)
89c67699 3046 pr_debug("Single MCLK entry VDDCI/MCLK dependency table "
599a7e9f
RZ
3047 "does not match VBIOS boot MCLK level");
3048 if (dep_mclk_table->entries[0].vddci !=
3049 data->vbios_boot_state.vddci_bootup_value)
89c67699 3050 pr_debug("Single VDDCI entry VDDCI/MCLK dependency table "
599a7e9f
RZ
3051 "does not match VBIOS boot VDDCI level");
3052 }
3053
3054 /* set DC compatible flag if this state supports DC */
3055 if (!state->validation.disallowOnDC)
3056 ps->dc_compatible = true;
3057
3058 if (state->classification.flags & PP_StateClassificationFlag_ACPI)
3059 data->acpi_pcie_gen = ps->performance_levels[0].pcie_gen;
3060
3061 ps->uvd_clks.vclk = state->uvd_clocks.VCLK;
3062 ps->uvd_clks.dclk = state->uvd_clocks.DCLK;
3063
3064 if (!result) {
3065 uint32_t i;
3066
3067 switch (state->classification.ui_label) {
3068 case PP_StateUILabel_Performance:
3069 data->use_pcie_performance_levels = true;
3070 for (i = 0; i < ps->performance_level_count; i++) {
3071 if (data->pcie_gen_performance.max <
3072 ps->performance_levels[i].pcie_gen)
3073 data->pcie_gen_performance.max =
3074 ps->performance_levels[i].pcie_gen;
3075
3076 if (data->pcie_gen_performance.min >
3077 ps->performance_levels[i].pcie_gen)
3078 data->pcie_gen_performance.min =
3079 ps->performance_levels[i].pcie_gen;
3080
3081 if (data->pcie_lane_performance.max <
3082 ps->performance_levels[i].pcie_lane)
3083 data->pcie_lane_performance.max =
3084 ps->performance_levels[i].pcie_lane;
3085 if (data->pcie_lane_performance.min >
3086 ps->performance_levels[i].pcie_lane)
3087 data->pcie_lane_performance.min =
3088 ps->performance_levels[i].pcie_lane;
3089 }
3090 break;
3091 case PP_StateUILabel_Battery:
3092 data->use_pcie_power_saving_levels = true;
3093
3094 for (i = 0; i < ps->performance_level_count; i++) {
3095 if (data->pcie_gen_power_saving.max <
3096 ps->performance_levels[i].pcie_gen)
3097 data->pcie_gen_power_saving.max =
3098 ps->performance_levels[i].pcie_gen;
3099
3100 if (data->pcie_gen_power_saving.min >
3101 ps->performance_levels[i].pcie_gen)
3102 data->pcie_gen_power_saving.min =
3103 ps->performance_levels[i].pcie_gen;
3104
3105 if (data->pcie_lane_power_saving.max <
3106 ps->performance_levels[i].pcie_lane)
3107 data->pcie_lane_power_saving.max =
3108 ps->performance_levels[i].pcie_lane;
3109
3110 if (data->pcie_lane_power_saving.min >
3111 ps->performance_levels[i].pcie_lane)
3112 data->pcie_lane_power_saving.min =
3113 ps->performance_levels[i].pcie_lane;
3114 }
3115 break;
3116 default:
3117 break;
3118 }
3119 }
3120 return 0;
3121}
3122
3123static int smu7_get_pp_table_entry_callback_func_v0(struct pp_hwmgr *hwmgr,
3124 struct pp_hw_power_state *power_state,
3125 unsigned int index, const void *clock_info)
3126{
3127 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
3128 struct smu7_power_state *ps = cast_phw_smu7_power_state(power_state);
3129 const ATOM_PPLIB_CI_CLOCK_INFO *visland_clk_info = clock_info;
3130 struct smu7_performance_level *performance_level;
3131 uint32_t engine_clock, memory_clock;
3132 uint16_t pcie_gen_from_bios;
3133
3134 engine_clock = visland_clk_info->ucEngineClockHigh << 16 | visland_clk_info->usEngineClockLow;
3135 memory_clock = visland_clk_info->ucMemoryClockHigh << 16 | visland_clk_info->usMemoryClockLow;
3136
3137 if (!(data->mc_micro_code_feature & DISABLE_MC_LOADMICROCODE) && memory_clock > data->highest_mclk)
3138 data->highest_mclk = memory_clock;
3139
599a7e9f 3140 PP_ASSERT_WITH_CODE(
d3f8c0ab 3141 (ps->performance_level_count < smum_get_mac_definition(hwmgr, SMU_MAX_LEVELS_GRAPHICS)),
599a7e9f
RZ
3142 "Performance levels exceeds SMC limit!",
3143 return -EINVAL);
3144
3145 PP_ASSERT_WITH_CODE(
da7800a8 3146 (ps->performance_level_count <
599a7e9f 3147 hwmgr->platform_descriptor.hardwareActivityPerformanceLevels),
da7800a8
RZ
3148 "Performance levels exceeds Driver limit, Skip!",
3149 return 0);
3150
3151 performance_level = &(ps->performance_levels
3152 [ps->performance_level_count++]);
599a7e9f
RZ
3153
3154 /* Performance levels are arranged from low to high. */
3155 performance_level->memory_clock = memory_clock;
3156 performance_level->engine_clock = engine_clock;
3157
3158 pcie_gen_from_bios = visland_clk_info->ucPCIEGen;
3159
3160 performance_level->pcie_gen = get_pcie_gen_support(data->pcie_gen_cap, pcie_gen_from_bios);
3161 performance_level->pcie_lane = get_pcie_lane_support(data->pcie_lane_cap, visland_clk_info->usPCIELane);
3162
3163 return 0;
3164}
3165
3166static int smu7_get_pp_table_entry_v0(struct pp_hwmgr *hwmgr,
3167 unsigned long entry_index, struct pp_power_state *state)
3168{
3169 int result;
3170 struct smu7_power_state *ps;
3171 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
3172 struct phm_clock_voltage_dependency_table *dep_mclk_table =
3173 hwmgr->dyn_state.vddci_dependency_on_mclk;
3174
3175 memset(&state->hardware, 0x00, sizeof(struct pp_hw_power_state));
3176
3177 state->hardware.magic = PHM_VIslands_Magic;
3178
3179 ps = (struct smu7_power_state *)(&state->hardware);
3180
3181 result = pp_tables_get_entry(hwmgr, entry_index, state,
3182 smu7_get_pp_table_entry_callback_func_v0);
3183
3184 /*
3185 * This is the earliest time we have all the dependency table
3186 * and the VBIOS boot state as
3187 * PP_Tables_GetPowerPlayTableEntry retrieves the VBIOS boot
3188 * state if there is only one VDDCI/MCLK level, check if it's
3189 * the same as VBIOS boot state
3190 */
3191 if (dep_mclk_table != NULL && dep_mclk_table->count == 1) {
3192 if (dep_mclk_table->entries[0].clk !=
3193 data->vbios_boot_state.mclk_bootup_value)
89c67699 3194 pr_debug("Single MCLK entry VDDCI/MCLK dependency table "
599a7e9f
RZ
3195 "does not match VBIOS boot MCLK level");
3196 if (dep_mclk_table->entries[0].v !=
3197 data->vbios_boot_state.vddci_bootup_value)
89c67699 3198 pr_debug("Single VDDCI entry VDDCI/MCLK dependency table "
599a7e9f
RZ
3199 "does not match VBIOS boot VDDCI level");
3200 }
3201
3202 /* set DC compatible flag if this state supports DC */
3203 if (!state->validation.disallowOnDC)
3204 ps->dc_compatible = true;
3205
3206 if (state->classification.flags & PP_StateClassificationFlag_ACPI)
3207 data->acpi_pcie_gen = ps->performance_levels[0].pcie_gen;
3208
3209 ps->uvd_clks.vclk = state->uvd_clocks.VCLK;
3210 ps->uvd_clks.dclk = state->uvd_clocks.DCLK;
3211
3212 if (!result) {
3213 uint32_t i;
3214
3215 switch (state->classification.ui_label) {
3216 case PP_StateUILabel_Performance:
3217 data->use_pcie_performance_levels = true;
3218
3219 for (i = 0; i < ps->performance_level_count; i++) {
3220 if (data->pcie_gen_performance.max <
3221 ps->performance_levels[i].pcie_gen)
3222 data->pcie_gen_performance.max =
3223 ps->performance_levels[i].pcie_gen;
3224
3225 if (data->pcie_gen_performance.min >
3226 ps->performance_levels[i].pcie_gen)
3227 data->pcie_gen_performance.min =
3228 ps->performance_levels[i].pcie_gen;
3229
3230 if (data->pcie_lane_performance.max <
3231 ps->performance_levels[i].pcie_lane)
3232 data->pcie_lane_performance.max =
3233 ps->performance_levels[i].pcie_lane;
3234
3235 if (data->pcie_lane_performance.min >
3236 ps->performance_levels[i].pcie_lane)
3237 data->pcie_lane_performance.min =
3238 ps->performance_levels[i].pcie_lane;
3239 }
3240 break;
3241 case PP_StateUILabel_Battery:
3242 data->use_pcie_power_saving_levels = true;
3243
3244 for (i = 0; i < ps->performance_level_count; i++) {
3245 if (data->pcie_gen_power_saving.max <
3246 ps->performance_levels[i].pcie_gen)
3247 data->pcie_gen_power_saving.max =
3248 ps->performance_levels[i].pcie_gen;
3249
3250 if (data->pcie_gen_power_saving.min >
3251 ps->performance_levels[i].pcie_gen)
3252 data->pcie_gen_power_saving.min =
3253 ps->performance_levels[i].pcie_gen;
3254
3255 if (data->pcie_lane_power_saving.max <
3256 ps->performance_levels[i].pcie_lane)
3257 data->pcie_lane_power_saving.max =
3258 ps->performance_levels[i].pcie_lane;
3259
3260 if (data->pcie_lane_power_saving.min >
3261 ps->performance_levels[i].pcie_lane)
3262 data->pcie_lane_power_saving.min =
3263 ps->performance_levels[i].pcie_lane;
3264 }
3265 break;
3266 default:
3267 break;
3268 }
3269 }
3270 return 0;
3271}
3272
3273static int smu7_get_pp_table_entry(struct pp_hwmgr *hwmgr,
3274 unsigned long entry_index, struct pp_power_state *state)
3275{
3276 if (hwmgr->pp_table_version == PP_TABLE_V0)
3277 return smu7_get_pp_table_entry_v0(hwmgr, entry_index, state);
3278 else if (hwmgr->pp_table_version == PP_TABLE_V1)
3279 return smu7_get_pp_table_entry_v1(hwmgr, entry_index, state);
3280
3281 return 0;
3282}
3283
2245b60f
EH
3284static int smu7_get_gpu_power(struct pp_hwmgr *hwmgr,
3285 struct pp_gpu_power *query)
3286{
d3f8c0ab 3287 PP_ASSERT_WITH_CODE(!smum_send_msg_to_smc(hwmgr,
2245b60f
EH
3288 PPSMC_MSG_PmStatusLogStart),
3289 "Failed to start pm status log!",
3290 return -1);
3291
a7c7bc4c 3292 msleep_interruptible(20);
2245b60f 3293
d3f8c0ab 3294 PP_ASSERT_WITH_CODE(!smum_send_msg_to_smc(hwmgr,
2245b60f
EH
3295 PPSMC_MSG_PmStatusLogSample),
3296 "Failed to sample pm status log!",
3297 return -1);
3298
3299 query->vddc_power = cgs_read_ind_register(hwmgr->device,
3300 CGS_IND_REG__SMC,
3301 ixSMU_PM_STATUS_40);
3302 query->vddci_power = cgs_read_ind_register(hwmgr->device,
3303 CGS_IND_REG__SMC,
3304 ixSMU_PM_STATUS_49);
3305 query->max_gpu_power = cgs_read_ind_register(hwmgr->device,
3306 CGS_IND_REG__SMC,
3307 ixSMU_PM_STATUS_94);
3308 query->average_gpu_power = cgs_read_ind_register(hwmgr->device,
3309 CGS_IND_REG__SMC,
3310 ixSMU_PM_STATUS_95);
3311
3312 return 0;
3313}
3314
9f8df7d7
TSD
3315static int smu7_read_sensor(struct pp_hwmgr *hwmgr, int idx,
3316 void *value, int *size)
a6e36952
TSD
3317{
3318 uint32_t sclk, mclk, activity_percent;
3319 uint32_t offset;
3320 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
3321
9f8df7d7
TSD
3322 /* size must be at least 4 bytes for all sensors */
3323 if (*size < 4)
3324 return -EINVAL;
3325
a6e36952
TSD
3326 switch (idx) {
3327 case AMDGPU_PP_SENSOR_GFX_SCLK:
d3f8c0ab 3328 smum_send_msg_to_smc(hwmgr, PPSMC_MSG_API_GetSclkFrequency);
a6e36952 3329 sclk = cgs_read_register(hwmgr->device, mmSMC_MSG_ARG_0);
cd7b0c66 3330 *((uint32_t *)value) = sclk;
9f8df7d7 3331 *size = 4;
a6e36952
TSD
3332 return 0;
3333 case AMDGPU_PP_SENSOR_GFX_MCLK:
d3f8c0ab 3334 smum_send_msg_to_smc(hwmgr, PPSMC_MSG_API_GetMclkFrequency);
a6e36952 3335 mclk = cgs_read_register(hwmgr->device, mmSMC_MSG_ARG_0);
cd7b0c66 3336 *((uint32_t *)value) = mclk;
9f8df7d7 3337 *size = 4;
a6e36952
TSD
3338 return 0;
3339 case AMDGPU_PP_SENSOR_GPU_LOAD:
d3f8c0ab 3340 offset = data->soft_regs_start + smum_get_offsetof(hwmgr,
a6e36952
TSD
3341 SMU_SoftRegisters,
3342 AverageGraphicsActivity);
3343
3344 activity_percent = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, offset);
3345 activity_percent += 0x80;
3346 activity_percent >>= 8;
cd7b0c66 3347 *((uint32_t *)value) = activity_percent > 100 ? 100 : activity_percent;
9f8df7d7 3348 *size = 4;
a6e36952
TSD
3349 return 0;
3350 case AMDGPU_PP_SENSOR_GPU_TEMP:
cd7b0c66 3351 *((uint32_t *)value) = smu7_thermal_get_temperature(hwmgr);
9f8df7d7 3352 *size = 4;
a6e36952 3353 return 0;
3de4ec57 3354 case AMDGPU_PP_SENSOR_UVD_POWER:
cd7b0c66 3355 *((uint32_t *)value) = data->uvd_power_gated ? 0 : 1;
9f8df7d7 3356 *size = 4;
3de4ec57
TSD
3357 return 0;
3358 case AMDGPU_PP_SENSOR_VCE_POWER:
cd7b0c66 3359 *((uint32_t *)value) = data->vce_power_gated ? 0 : 1;
9f8df7d7 3360 *size = 4;
3de4ec57 3361 return 0;
2245b60f 3362 case AMDGPU_PP_SENSOR_GPU_POWER:
9f8df7d7
TSD
3363 if (*size < sizeof(struct pp_gpu_power))
3364 return -EINVAL;
3365 *size = sizeof(struct pp_gpu_power);
2245b60f 3366 return smu7_get_gpu_power(hwmgr, (struct pp_gpu_power *)value);
a6e36952
TSD
3367 default:
3368 return -EINVAL;
3369 }
3370}
3371
599a7e9f
RZ
3372static int smu7_find_dpm_states_clocks_in_dpm_table(struct pp_hwmgr *hwmgr, const void *input)
3373{
3374 const struct phm_set_power_state_input *states =
3375 (const struct phm_set_power_state_input *)input;
3376 const struct smu7_power_state *smu7_ps =
3377 cast_const_phw_smu7_power_state(states->pnew_state);
3378 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
3379 struct smu7_single_dpm_table *sclk_table = &(data->dpm_table.sclk_table);
3380 uint32_t sclk = smu7_ps->performance_levels
3381 [smu7_ps->performance_level_count - 1].engine_clock;
3382 struct smu7_single_dpm_table *mclk_table = &(data->dpm_table.mclk_table);
3383 uint32_t mclk = smu7_ps->performance_levels
3384 [smu7_ps->performance_level_count - 1].memory_clock;
3385 struct PP_Clocks min_clocks = {0};
3386 uint32_t i;
3387 struct cgs_display_info info = {0};
3388
3389 data->need_update_smu7_dpm_table = 0;
3390
3391 for (i = 0; i < sclk_table->count; i++) {
3392 if (sclk == sclk_table->dpm_levels[i].value)
3393 break;
3394 }
3395
3396 if (i >= sclk_table->count)
3397 data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_SCLK;
3398 else {
3399 /* TODO: Check SCLK in DAL's minimum clocks
3400 * in case DeepSleep divider update is required.
3401 */
3402 if (data->display_timing.min_clock_in_sr != min_clocks.engineClockInSR &&
3403 (min_clocks.engineClockInSR >= SMU7_MINIMUM_ENGINE_CLOCK ||
3404 data->display_timing.min_clock_in_sr >= SMU7_MINIMUM_ENGINE_CLOCK))
3405 data->need_update_smu7_dpm_table |= DPMTABLE_UPDATE_SCLK;
3406 }
3407
3408 for (i = 0; i < mclk_table->count; i++) {
3409 if (mclk == mclk_table->dpm_levels[i].value)
3410 break;
3411 }
3412
3413 if (i >= mclk_table->count)
3414 data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_MCLK;
3415
3416 cgs_get_active_displays_info(hwmgr->device, &info);
3417
3418 if (data->display_timing.num_existing_displays != info.display_count)
3419 data->need_update_smu7_dpm_table |= DPMTABLE_UPDATE_MCLK;
3420
3421 return 0;
3422}
3423
3424static uint16_t smu7_get_maximum_link_speed(struct pp_hwmgr *hwmgr,
3425 const struct smu7_power_state *smu7_ps)
3426{
3427 uint32_t i;
3428 uint32_t sclk, max_sclk = 0;
3429 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
3430 struct smu7_dpm_table *dpm_table = &data->dpm_table;
3431
3432 for (i = 0; i < smu7_ps->performance_level_count; i++) {
3433 sclk = smu7_ps->performance_levels[i].engine_clock;
3434 if (max_sclk < sclk)
3435 max_sclk = sclk;
3436 }
3437
3438 for (i = 0; i < dpm_table->sclk_table.count; i++) {
3439 if (dpm_table->sclk_table.dpm_levels[i].value == max_sclk)
3440 return (uint16_t) ((i >= dpm_table->pcie_speed_table.count) ?
3441 dpm_table->pcie_speed_table.dpm_levels
3442 [dpm_table->pcie_speed_table.count - 1].value :
3443 dpm_table->pcie_speed_table.dpm_levels[i].value);
3444 }
3445
3446 return 0;
3447}
3448
3449static int smu7_request_link_speed_change_before_state_change(
3450 struct pp_hwmgr *hwmgr, const void *input)
3451{
3452 const struct phm_set_power_state_input *states =
3453 (const struct phm_set_power_state_input *)input;
3454 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
3455 const struct smu7_power_state *smu7_nps =
3456 cast_const_phw_smu7_power_state(states->pnew_state);
3457 const struct smu7_power_state *polaris10_cps =
3458 cast_const_phw_smu7_power_state(states->pcurrent_state);
3459
3460 uint16_t target_link_speed = smu7_get_maximum_link_speed(hwmgr, smu7_nps);
3461 uint16_t current_link_speed;
3462
3463 if (data->force_pcie_gen == PP_PCIEGenInvalid)
3464 current_link_speed = smu7_get_maximum_link_speed(hwmgr, polaris10_cps);
3465 else
3466 current_link_speed = data->force_pcie_gen;
3467
3468 data->force_pcie_gen = PP_PCIEGenInvalid;
3469 data->pspp_notify_required = false;
3470
3471 if (target_link_speed > current_link_speed) {
3472 switch (target_link_speed) {
3473 case PP_PCIEGen3:
3474 if (0 == acpi_pcie_perf_request(hwmgr->device, PCIE_PERF_REQ_GEN3, false))
3475 break;
3476 data->force_pcie_gen = PP_PCIEGen2;
3477 if (current_link_speed == PP_PCIEGen2)
3478 break;
3479 case PP_PCIEGen2:
3480 if (0 == acpi_pcie_perf_request(hwmgr->device, PCIE_PERF_REQ_GEN2, false))
3481 break;
3482 default:
3483 data->force_pcie_gen = smu7_get_current_pcie_speed(hwmgr);
3484 break;
3485 }
3486 } else {
3487 if (target_link_speed < current_link_speed)
3488 data->pspp_notify_required = true;
3489 }
3490
3491 return 0;
3492}
3493
3494static int smu7_freeze_sclk_mclk_dpm(struct pp_hwmgr *hwmgr)
3495{
3496 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
3497
3498 if (0 == data->need_update_smu7_dpm_table)
3499 return 0;
3500
3501 if ((0 == data->sclk_dpm_key_disabled) &&
3502 (data->need_update_smu7_dpm_table &
3503 (DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_UPDATE_SCLK))) {
3504 PP_ASSERT_WITH_CODE(true == smum_is_dpm_running(hwmgr),
3505 "Trying to freeze SCLK DPM when DPM is disabled",
3506 );
d3f8c0ab 3507 PP_ASSERT_WITH_CODE(0 == smum_send_msg_to_smc(hwmgr,
599a7e9f
RZ
3508 PPSMC_MSG_SCLKDPM_FreezeLevel),
3509 "Failed to freeze SCLK DPM during FreezeSclkMclkDPM Function!",
3510 return -EINVAL);
3511 }
3512
3513 if ((0 == data->mclk_dpm_key_disabled) &&
3514 (data->need_update_smu7_dpm_table &
3515 DPMTABLE_OD_UPDATE_MCLK)) {
3516 PP_ASSERT_WITH_CODE(true == smum_is_dpm_running(hwmgr),
3517 "Trying to freeze MCLK DPM when DPM is disabled",
3518 );
d3f8c0ab 3519 PP_ASSERT_WITH_CODE(0 == smum_send_msg_to_smc(hwmgr,
599a7e9f
RZ
3520 PPSMC_MSG_MCLKDPM_FreezeLevel),
3521 "Failed to freeze MCLK DPM during FreezeSclkMclkDPM Function!",
3522 return -EINVAL);
3523 }
3524
3525 return 0;
3526}
3527
3528static int smu7_populate_and_upload_sclk_mclk_dpm_levels(
3529 struct pp_hwmgr *hwmgr, const void *input)
3530{
3531 int result = 0;
3532 const struct phm_set_power_state_input *states =
3533 (const struct phm_set_power_state_input *)input;
3534 const struct smu7_power_state *smu7_ps =
3535 cast_const_phw_smu7_power_state(states->pnew_state);
3536 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
3537 uint32_t sclk = smu7_ps->performance_levels
3538 [smu7_ps->performance_level_count - 1].engine_clock;
3539 uint32_t mclk = smu7_ps->performance_levels
3540 [smu7_ps->performance_level_count - 1].memory_clock;
3541 struct smu7_dpm_table *dpm_table = &data->dpm_table;
3542
3543 struct smu7_dpm_table *golden_dpm_table = &data->golden_dpm_table;
3544 uint32_t dpm_count, clock_percent;
3545 uint32_t i;
3546
3547 if (0 == data->need_update_smu7_dpm_table)
3548 return 0;
3549
3550 if (data->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_SCLK) {
3551 dpm_table->sclk_table.dpm_levels
3552 [dpm_table->sclk_table.count - 1].value = sclk;
3553
3554 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_OD6PlusinACSupport) ||
3555 phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_OD6PlusinDCSupport)) {
3556 /* Need to do calculation based on the golden DPM table
3557 * as the Heatmap GPU Clock axis is also based on the default values
3558 */
3559 PP_ASSERT_WITH_CODE(
3560 (golden_dpm_table->sclk_table.dpm_levels
3561 [golden_dpm_table->sclk_table.count - 1].value != 0),
3562 "Divide by 0!",
3563 return -EINVAL);
3564 dpm_count = dpm_table->sclk_table.count < 2 ? 0 : dpm_table->sclk_table.count - 2;
3565
3566 for (i = dpm_count; i > 1; i--) {
3567 if (sclk > golden_dpm_table->sclk_table.dpm_levels[golden_dpm_table->sclk_table.count-1].value) {
3568 clock_percent =
3569 ((sclk
3570 - golden_dpm_table->sclk_table.dpm_levels[golden_dpm_table->sclk_table.count-1].value
3571 ) * 100)
3572 / golden_dpm_table->sclk_table.dpm_levels[golden_dpm_table->sclk_table.count-1].value;
3573
3574 dpm_table->sclk_table.dpm_levels[i].value =
3575 golden_dpm_table->sclk_table.dpm_levels[i].value +
3576 (golden_dpm_table->sclk_table.dpm_levels[i].value *
3577 clock_percent)/100;
3578
3579 } else if (golden_dpm_table->sclk_table.dpm_levels[dpm_table->sclk_table.count-1].value > sclk) {
3580 clock_percent =
3581 ((golden_dpm_table->sclk_table.dpm_levels[golden_dpm_table->sclk_table.count - 1].value
3582 - sclk) * 100)
3583 / golden_dpm_table->sclk_table.dpm_levels[golden_dpm_table->sclk_table.count-1].value;
3584
3585 dpm_table->sclk_table.dpm_levels[i].value =
3586 golden_dpm_table->sclk_table.dpm_levels[i].value -
3587 (golden_dpm_table->sclk_table.dpm_levels[i].value *
3588 clock_percent) / 100;
3589 } else
3590 dpm_table->sclk_table.dpm_levels[i].value =
3591 golden_dpm_table->sclk_table.dpm_levels[i].value;
3592 }
3593 }
3594 }
3595
3596 if (data->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK) {
3597 dpm_table->mclk_table.dpm_levels
3598 [dpm_table->mclk_table.count - 1].value = mclk;
3599
3600 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_OD6PlusinACSupport) ||
3601 phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_OD6PlusinDCSupport)) {
3602
3603 PP_ASSERT_WITH_CODE(
3604 (golden_dpm_table->mclk_table.dpm_levels
3605 [golden_dpm_table->mclk_table.count-1].value != 0),
3606 "Divide by 0!",
3607 return -EINVAL);
3608 dpm_count = dpm_table->mclk_table.count < 2 ? 0 : dpm_table->mclk_table.count - 2;
3609 for (i = dpm_count; i > 1; i--) {
3610 if (golden_dpm_table->mclk_table.dpm_levels[golden_dpm_table->mclk_table.count-1].value < mclk) {
3611 clock_percent = ((mclk -
3612 golden_dpm_table->mclk_table.dpm_levels[golden_dpm_table->mclk_table.count-1].value) * 100)
3613 / golden_dpm_table->mclk_table.dpm_levels[golden_dpm_table->mclk_table.count-1].value;
3614
3615 dpm_table->mclk_table.dpm_levels[i].value =
3616 golden_dpm_table->mclk_table.dpm_levels[i].value +
3617 (golden_dpm_table->mclk_table.dpm_levels[i].value *
3618 clock_percent) / 100;
3619
3620 } else if (golden_dpm_table->mclk_table.dpm_levels[dpm_table->mclk_table.count-1].value > mclk) {
3621 clock_percent = (
3622 (golden_dpm_table->mclk_table.dpm_levels[golden_dpm_table->mclk_table.count-1].value - mclk)
3623 * 100)
3624 / golden_dpm_table->mclk_table.dpm_levels[golden_dpm_table->mclk_table.count-1].value;
3625
3626 dpm_table->mclk_table.dpm_levels[i].value =
3627 golden_dpm_table->mclk_table.dpm_levels[i].value -
3628 (golden_dpm_table->mclk_table.dpm_levels[i].value *
3629 clock_percent) / 100;
3630 } else
3631 dpm_table->mclk_table.dpm_levels[i].value =
3632 golden_dpm_table->mclk_table.dpm_levels[i].value;
3633 }
3634 }
3635 }
3636
3637 if (data->need_update_smu7_dpm_table &
3638 (DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_UPDATE_SCLK)) {
3639 result = smum_populate_all_graphic_levels(hwmgr);
3640 PP_ASSERT_WITH_CODE((0 == result),
3641 "Failed to populate SCLK during PopulateNewDPMClocksStates Function!",
3642 return result);
3643 }
3644
3645 if (data->need_update_smu7_dpm_table &
3646 (DPMTABLE_OD_UPDATE_MCLK + DPMTABLE_UPDATE_MCLK)) {
3647 /*populate MCLK dpm table to SMU7 */
3648 result = smum_populate_all_memory_levels(hwmgr);
3649 PP_ASSERT_WITH_CODE((0 == result),
3650 "Failed to populate MCLK during PopulateNewDPMClocksStates Function!",
3651 return result);
3652 }
3653
3654 return result;
3655}
3656
3657static int smu7_trim_single_dpm_states(struct pp_hwmgr *hwmgr,
3658 struct smu7_single_dpm_table *dpm_table,
3659 uint32_t low_limit, uint32_t high_limit)
3660{
3661 uint32_t i;
3662
3663 for (i = 0; i < dpm_table->count; i++) {
3664 if ((dpm_table->dpm_levels[i].value < low_limit)
3665 || (dpm_table->dpm_levels[i].value > high_limit))
3666 dpm_table->dpm_levels[i].enabled = false;
3667 else
3668 dpm_table->dpm_levels[i].enabled = true;
3669 }
3670
3671 return 0;
3672}
3673
3674static int smu7_trim_dpm_states(struct pp_hwmgr *hwmgr,
3675 const struct smu7_power_state *smu7_ps)
3676{
3677 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
3678 uint32_t high_limit_count;
3679
3680 PP_ASSERT_WITH_CODE((smu7_ps->performance_level_count >= 1),
3681 "power state did not have any performance level",
3682 return -EINVAL);
3683
3684 high_limit_count = (1 == smu7_ps->performance_level_count) ? 0 : 1;
3685
3686 smu7_trim_single_dpm_states(hwmgr,
3687 &(data->dpm_table.sclk_table),
3688 smu7_ps->performance_levels[0].engine_clock,
3689 smu7_ps->performance_levels[high_limit_count].engine_clock);
3690
3691 smu7_trim_single_dpm_states(hwmgr,
3692 &(data->dpm_table.mclk_table),
3693 smu7_ps->performance_levels[0].memory_clock,
3694 smu7_ps->performance_levels[high_limit_count].memory_clock);
3695
3696 return 0;
3697}
3698
3699static int smu7_generate_dpm_level_enable_mask(
3700 struct pp_hwmgr *hwmgr, const void *input)
3701{
3702 int result;
3703 const struct phm_set_power_state_input *states =
3704 (const struct phm_set_power_state_input *)input;
3705 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
3706 const struct smu7_power_state *smu7_ps =
3707 cast_const_phw_smu7_power_state(states->pnew_state);
3708
3709 result = smu7_trim_dpm_states(hwmgr, smu7_ps);
3710 if (result)
3711 return result;
3712
3713 data->dpm_level_enable_mask.sclk_dpm_enable_mask =
3714 phm_get_dpm_level_enable_mask_value(&data->dpm_table.sclk_table);
3715 data->dpm_level_enable_mask.mclk_dpm_enable_mask =
3716 phm_get_dpm_level_enable_mask_value(&data->dpm_table.mclk_table);
3717 data->dpm_level_enable_mask.pcie_dpm_enable_mask =
3718 phm_get_dpm_level_enable_mask_value(&data->dpm_table.pcie_speed_table);
3719
3720 return 0;
3721}
3722
3723static int smu7_unfreeze_sclk_mclk_dpm(struct pp_hwmgr *hwmgr)
3724{
3725 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
3726
3727 if (0 == data->need_update_smu7_dpm_table)
3728 return 0;
3729
3730 if ((0 == data->sclk_dpm_key_disabled) &&
3731 (data->need_update_smu7_dpm_table &
3732 (DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_UPDATE_SCLK))) {
3733
3734 PP_ASSERT_WITH_CODE(true == smum_is_dpm_running(hwmgr),
3735 "Trying to Unfreeze SCLK DPM when DPM is disabled",
3736 );
d3f8c0ab 3737 PP_ASSERT_WITH_CODE(0 == smum_send_msg_to_smc(hwmgr,
599a7e9f
RZ
3738 PPSMC_MSG_SCLKDPM_UnfreezeLevel),
3739 "Failed to unfreeze SCLK DPM during UnFreezeSclkMclkDPM Function!",
3740 return -EINVAL);
3741 }
3742
3743 if ((0 == data->mclk_dpm_key_disabled) &&
3744 (data->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK)) {
3745
3746 PP_ASSERT_WITH_CODE(true == smum_is_dpm_running(hwmgr),
3747 "Trying to Unfreeze MCLK DPM when DPM is disabled",
3748 );
d3f8c0ab 3749 PP_ASSERT_WITH_CODE(0 == smum_send_msg_to_smc(hwmgr,
fd78e6af 3750 PPSMC_MSG_MCLKDPM_UnfreezeLevel),
599a7e9f
RZ
3751 "Failed to unfreeze MCLK DPM during UnFreezeSclkMclkDPM Function!",
3752 return -EINVAL);
3753 }
3754
3755 data->need_update_smu7_dpm_table = 0;
3756
3757 return 0;
3758}
3759
3760static int smu7_notify_link_speed_change_after_state_change(
3761 struct pp_hwmgr *hwmgr, const void *input)
3762{
3763 const struct phm_set_power_state_input *states =
3764 (const struct phm_set_power_state_input *)input;
3765 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
3766 const struct smu7_power_state *smu7_ps =
3767 cast_const_phw_smu7_power_state(states->pnew_state);
3768 uint16_t target_link_speed = smu7_get_maximum_link_speed(hwmgr, smu7_ps);
3769 uint8_t request;
3770
3771 if (data->pspp_notify_required) {
3772 if (target_link_speed == PP_PCIEGen3)
3773 request = PCIE_PERF_REQ_GEN3;
3774 else if (target_link_speed == PP_PCIEGen2)
3775 request = PCIE_PERF_REQ_GEN2;
3776 else
3777 request = PCIE_PERF_REQ_GEN1;
3778
3779 if (request == PCIE_PERF_REQ_GEN1 &&
3780 smu7_get_current_pcie_speed(hwmgr) > 0)
3781 return 0;
3782
3783 if (acpi_pcie_perf_request(hwmgr->device, request, false)) {
3784 if (PP_PCIEGen2 == target_link_speed)
b5c11b8e 3785 pr_info("PSPP request to switch to Gen2 from Gen3 Failed!");
599a7e9f 3786 else
b5c11b8e 3787 pr_info("PSPP request to switch to Gen1 from Gen2 Failed!");
599a7e9f
RZ
3788 }
3789 }
3790
3791 return 0;
3792}
3793
3794static int smu7_notify_smc_display(struct pp_hwmgr *hwmgr)
3795{
3796 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
3797
1756f1bb 3798 if (hwmgr->feature_mask & PP_VBI_TIME_SUPPORT_MASK)
d3f8c0ab 3799 smum_send_msg_to_smc_with_parameter(hwmgr,
599a7e9f 3800 (PPSMC_Msg)PPSMC_MSG_SetVBITimeout, data->frame_time_x2);
1756f1bb 3801 return (smum_send_msg_to_smc(hwmgr, (PPSMC_Msg)PPSMC_HasDisplay) == 0) ? 0 : -EINVAL;
599a7e9f
RZ
3802}
3803
3804static int smu7_set_power_state_tasks(struct pp_hwmgr *hwmgr, const void *input)
3805{
3806 int tmp_result, result = 0;
3807 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
3808
3809 tmp_result = smu7_find_dpm_states_clocks_in_dpm_table(hwmgr, input);
3810 PP_ASSERT_WITH_CODE((0 == tmp_result),
3811 "Failed to find DPM states clocks in DPM table!",
3812 result = tmp_result);
3813
3814 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
3815 PHM_PlatformCaps_PCIEPerformanceRequest)) {
3816 tmp_result =
3817 smu7_request_link_speed_change_before_state_change(hwmgr, input);
3818 PP_ASSERT_WITH_CODE((0 == tmp_result),
3819 "Failed to request link speed change before state change!",
3820 result = tmp_result);
3821 }
3822
3823 tmp_result = smu7_freeze_sclk_mclk_dpm(hwmgr);
3824 PP_ASSERT_WITH_CODE((0 == tmp_result),
3825 "Failed to freeze SCLK MCLK DPM!", result = tmp_result);
3826
3827 tmp_result = smu7_populate_and_upload_sclk_mclk_dpm_levels(hwmgr, input);
3828 PP_ASSERT_WITH_CODE((0 == tmp_result),
3829 "Failed to populate and upload SCLK MCLK DPM levels!",
3830 result = tmp_result);
3831
3832 tmp_result = smu7_generate_dpm_level_enable_mask(hwmgr, input);
3833 PP_ASSERT_WITH_CODE((0 == tmp_result),
3834 "Failed to generate DPM level enabled mask!",
3835 result = tmp_result);
3836
3837 tmp_result = smum_update_sclk_threshold(hwmgr);
3838 PP_ASSERT_WITH_CODE((0 == tmp_result),
3839 "Failed to update SCLK threshold!",
3840 result = tmp_result);
3841
3842 tmp_result = smu7_notify_smc_display(hwmgr);
3843 PP_ASSERT_WITH_CODE((0 == tmp_result),
3844 "Failed to notify smc display settings!",
3845 result = tmp_result);
3846
3847 tmp_result = smu7_unfreeze_sclk_mclk_dpm(hwmgr);
3848 PP_ASSERT_WITH_CODE((0 == tmp_result),
3849 "Failed to unfreeze SCLK MCLK DPM!",
3850 result = tmp_result);
3851
3852 tmp_result = smu7_upload_dpm_level_enable_mask(hwmgr);
3853 PP_ASSERT_WITH_CODE((0 == tmp_result),
3854 "Failed to upload DPM level enabled mask!",
3855 result = tmp_result);
3856
3857 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
3858 PHM_PlatformCaps_PCIEPerformanceRequest)) {
3859 tmp_result =
3860 smu7_notify_link_speed_change_after_state_change(hwmgr, input);
3861 PP_ASSERT_WITH_CODE((0 == tmp_result),
3862 "Failed to notify link speed change after state change!",
3863 result = tmp_result);
3864 }
3865 data->apply_optimized_settings = false;
3866 return result;
3867}
3868
3869static int smu7_set_max_fan_pwm_output(struct pp_hwmgr *hwmgr, uint16_t us_max_fan_pwm)
3870{
3871 hwmgr->thermal_controller.
3872 advanceFanControlParameters.usMaxFanPWM = us_max_fan_pwm;
3873
d3f8c0ab 3874 return smum_send_msg_to_smc_with_parameter(hwmgr,
599a7e9f
RZ
3875 PPSMC_MSG_SetFanPwmMax, us_max_fan_pwm);
3876}
3877
f8a4c11b
BX
3878static int
3879smu7_notify_smc_display_change(struct pp_hwmgr *hwmgr, bool has_display)
599a7e9f
RZ
3880{
3881 PPSMC_Msg msg = has_display ? (PPSMC_Msg)PPSMC_HasDisplay : (PPSMC_Msg)PPSMC_NoDisplay;
3882
d3f8c0ab 3883 return (smum_send_msg_to_smc(hwmgr, msg) == 0) ? 0 : -1;
599a7e9f
RZ
3884}
3885
f8a4c11b
BX
3886static int
3887smu7_notify_smc_display_config_after_ps_adjustment(struct pp_hwmgr *hwmgr)
599a7e9f
RZ
3888{
3889 uint32_t num_active_displays = 0;
3890 struct cgs_display_info info = {0};
3891
3892 info.mode_info = NULL;
3893 cgs_get_active_displays_info(hwmgr->device, &info);
3894
3895 num_active_displays = info.display_count;
3896
3897 if (num_active_displays > 1 && hwmgr->display_config.multi_monitor_in_sync != true)
3898 smu7_notify_smc_display_change(hwmgr, false);
3899
3900 return 0;
3901}
3902
3903/**
3904* Programs the display gap
3905*
3906* @param hwmgr the address of the powerplay hardware manager.
3907* @return always OK
3908*/
f8a4c11b 3909static int smu7_program_display_gap(struct pp_hwmgr *hwmgr)
599a7e9f
RZ
3910{
3911 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
3912 uint32_t num_active_displays = 0;
3913 uint32_t display_gap = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_DISPLAY_GAP_CNTL);
3914 uint32_t display_gap2;
3915 uint32_t pre_vbi_time_in_us;
3916 uint32_t frame_time_in_us;
3917 uint32_t ref_clock;
3918 uint32_t refresh_rate = 0;
3919 struct cgs_display_info info = {0};
8b95f4f7 3920 struct cgs_mode_info mode_info = {0};
599a7e9f
RZ
3921
3922 info.mode_info = &mode_info;
599a7e9f
RZ
3923 cgs_get_active_displays_info(hwmgr->device, &info);
3924 num_active_displays = info.display_count;
3925
3926 display_gap = PHM_SET_FIELD(display_gap, CG_DISPLAY_GAP_CNTL, DISP_GAP, (num_active_displays > 0) ? DISPLAY_GAP_VBLANK_OR_WM : DISPLAY_GAP_IGNORE);
3927 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_DISPLAY_GAP_CNTL, display_gap);
3928
3929 ref_clock = mode_info.ref_clock;
3930 refresh_rate = mode_info.refresh_rate;
3931
3932 if (0 == refresh_rate)
3933 refresh_rate = 60;
3934
3935 frame_time_in_us = 1000000 / refresh_rate;
3936
3937 pre_vbi_time_in_us = frame_time_in_us - 200 - mode_info.vblank_time_us;
8b95f4f7 3938
599a7e9f
RZ
3939 data->frame_time_x2 = frame_time_in_us * 2 / 100;
3940
3941 display_gap2 = pre_vbi_time_in_us * (ref_clock / 100);
3942
3943 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_DISPLAY_GAP_CNTL2, display_gap2);
3944
3945 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
d3f8c0ab 3946 data->soft_regs_start + smum_get_offsetof(hwmgr,
599a7e9f
RZ
3947 SMU_SoftRegisters,
3948 PreVBlankGap), 0x64);
3949
3950 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
d3f8c0ab 3951 data->soft_regs_start + smum_get_offsetof(hwmgr,
599a7e9f
RZ
3952 SMU_SoftRegisters,
3953 VBlankTimeout),
3954 (frame_time_in_us - pre_vbi_time_in_us));
3955
3956 return 0;
3957}
3958
f8a4c11b 3959static int smu7_display_configuration_changed_task(struct pp_hwmgr *hwmgr)
599a7e9f
RZ
3960{
3961 return smu7_program_display_gap(hwmgr);
3962}
3963
3964/**
3965* Set maximum target operating fan output RPM
3966*
3967* @param hwmgr: the address of the powerplay hardware manager.
3968* @param usMaxFanRpm: max operating fan RPM value.
3969* @return The response that came from the SMC.
3970*/
3971static int smu7_set_max_fan_rpm_output(struct pp_hwmgr *hwmgr, uint16_t us_max_fan_rpm)
3972{
3973 hwmgr->thermal_controller.
3974 advanceFanControlParameters.usMaxFanRPM = us_max_fan_rpm;
3975
d3f8c0ab 3976 return smum_send_msg_to_smc_with_parameter(hwmgr,
599a7e9f
RZ
3977 PPSMC_MSG_SetFanRpmMax, us_max_fan_rpm);
3978}
3979
f8a4c11b 3980static int smu7_register_internal_thermal_interrupt(struct pp_hwmgr *hwmgr,
599a7e9f
RZ
3981 const void *thermal_interrupt_info)
3982{
3983 return 0;
3984}
3985
f8a4c11b
BX
3986static bool
3987smu7_check_smc_update_required_for_display_configuration(struct pp_hwmgr *hwmgr)
599a7e9f
RZ
3988{
3989 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
3990 bool is_update_required = false;
3991 struct cgs_display_info info = {0, 0, NULL};
3992
3993 cgs_get_active_displays_info(hwmgr->device, &info);
3994
3995 if (data->display_timing.num_existing_displays != info.display_count)
3996 is_update_required = true;
3997
3998 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_SclkDeepSleep)) {
3999 if (data->display_timing.min_clock_in_sr != hwmgr->display_config.min_core_set_clock_in_sr &&
4000 (data->display_timing.min_clock_in_sr >= SMU7_MINIMUM_ENGINE_CLOCK ||
4001 hwmgr->display_config.min_core_set_clock_in_sr >= SMU7_MINIMUM_ENGINE_CLOCK))
4002 is_update_required = true;
4003 }
4004 return is_update_required;
4005}
4006
4007static inline bool smu7_are_power_levels_equal(const struct smu7_performance_level *pl1,
4008 const struct smu7_performance_level *pl2)
4009{
4010 return ((pl1->memory_clock == pl2->memory_clock) &&
4011 (pl1->engine_clock == pl2->engine_clock) &&
4012 (pl1->pcie_gen == pl2->pcie_gen) &&
4013 (pl1->pcie_lane == pl2->pcie_lane));
4014}
4015
f8a4c11b
BX
4016static int smu7_check_states_equal(struct pp_hwmgr *hwmgr,
4017 const struct pp_hw_power_state *pstate1,
4018 const struct pp_hw_power_state *pstate2, bool *equal)
599a7e9f 4019{
9faa6b02
RZ
4020 const struct smu7_power_state *psa;
4021 const struct smu7_power_state *psb;
599a7e9f
RZ
4022 int i;
4023
4024 if (pstate1 == NULL || pstate2 == NULL || equal == NULL)
4025 return -EINVAL;
4026
9faa6b02
RZ
4027 psa = cast_const_phw_smu7_power_state(pstate1);
4028 psb = cast_const_phw_smu7_power_state(pstate2);
599a7e9f
RZ
4029 /* If the two states don't even have the same number of performance levels they cannot be the same state. */
4030 if (psa->performance_level_count != psb->performance_level_count) {
4031 *equal = false;
4032 return 0;
4033 }
4034
4035 for (i = 0; i < psa->performance_level_count; i++) {
4036 if (!smu7_are_power_levels_equal(&(psa->performance_levels[i]), &(psb->performance_levels[i]))) {
4037 /* If we have found even one performance level pair that is different the states are different. */
4038 *equal = false;
4039 return 0;
4040 }
4041 }
4042
4043 /* If all performance levels are the same try to use the UVD clocks to break the tie.*/
4044 *equal = ((psa->uvd_clks.vclk == psb->uvd_clks.vclk) && (psa->uvd_clks.dclk == psb->uvd_clks.dclk));
4045 *equal &= ((psa->vce_clks.evclk == psb->vce_clks.evclk) && (psa->vce_clks.ecclk == psb->vce_clks.ecclk));
4046 *equal &= (psa->sclk_threshold == psb->sclk_threshold);
4047
4048 return 0;
4049}
4050
f8a4c11b 4051static int smu7_upload_mc_firmware(struct pp_hwmgr *hwmgr)
599a7e9f
RZ
4052{
4053 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
4054
4055 uint32_t vbios_version;
4056 uint32_t tmp;
4057
4058 /* Read MC indirect register offset 0x9F bits [3:0] to see
4059 * if VBIOS has already loaded a full version of MC ucode
4060 * or not.
4061 */
4062
4063 smu7_get_mc_microcode_version(hwmgr);
4064 vbios_version = hwmgr->microcode_version_info.MC & 0xf;
4065
4066 data->need_long_memory_training = false;
4067
4068 cgs_write_register(hwmgr->device, mmMC_SEQ_IO_DEBUG_INDEX,
4069 ixMC_IO_DEBUG_UP_13);
4070 tmp = cgs_read_register(hwmgr->device, mmMC_SEQ_IO_DEBUG_DATA);
4071
4072 if (tmp & (1 << 23)) {
4073 data->mem_latency_high = MEM_LATENCY_HIGH;
4074 data->mem_latency_low = MEM_LATENCY_LOW;
4075 } else {
4076 data->mem_latency_high = 330;
4077 data->mem_latency_low = 330;
4078 }
4079
4080 return 0;
4081}
4082
4083static int smu7_read_clock_registers(struct pp_hwmgr *hwmgr)
4084{
4085 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
4086
4087 data->clock_registers.vCG_SPLL_FUNC_CNTL =
4088 cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_SPLL_FUNC_CNTL);
4089 data->clock_registers.vCG_SPLL_FUNC_CNTL_2 =
4090 cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_SPLL_FUNC_CNTL_2);
4091 data->clock_registers.vCG_SPLL_FUNC_CNTL_3 =
4092 cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_SPLL_FUNC_CNTL_3);
4093 data->clock_registers.vCG_SPLL_FUNC_CNTL_4 =
4094 cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_SPLL_FUNC_CNTL_4);
4095 data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM =
4096 cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_SPLL_SPREAD_SPECTRUM);
4097 data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM_2 =
4098 cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_SPLL_SPREAD_SPECTRUM_2);
4099 data->clock_registers.vDLL_CNTL =
4100 cgs_read_register(hwmgr->device, mmDLL_CNTL);
4101 data->clock_registers.vMCLK_PWRMGT_CNTL =
4102 cgs_read_register(hwmgr->device, mmMCLK_PWRMGT_CNTL);
4103 data->clock_registers.vMPLL_AD_FUNC_CNTL =
4104 cgs_read_register(hwmgr->device, mmMPLL_AD_FUNC_CNTL);
4105 data->clock_registers.vMPLL_DQ_FUNC_CNTL =
4106 cgs_read_register(hwmgr->device, mmMPLL_DQ_FUNC_CNTL);
4107 data->clock_registers.vMPLL_FUNC_CNTL =
4108 cgs_read_register(hwmgr->device, mmMPLL_FUNC_CNTL);
4109 data->clock_registers.vMPLL_FUNC_CNTL_1 =
4110 cgs_read_register(hwmgr->device, mmMPLL_FUNC_CNTL_1);
4111 data->clock_registers.vMPLL_FUNC_CNTL_2 =
4112 cgs_read_register(hwmgr->device, mmMPLL_FUNC_CNTL_2);
4113 data->clock_registers.vMPLL_SS1 =
4114 cgs_read_register(hwmgr->device, mmMPLL_SS1);
4115 data->clock_registers.vMPLL_SS2 =
4116 cgs_read_register(hwmgr->device, mmMPLL_SS2);
4117 return 0;
4118
4119}
4120
4121/**
4122 * Find out if memory is GDDR5.
4123 *
4124 * @param hwmgr the address of the powerplay hardware manager.
4125 * @return always 0
4126 */
4127static int smu7_get_memory_type(struct pp_hwmgr *hwmgr)
4128{
4129 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
4130 uint32_t temp;
4131
4132 temp = cgs_read_register(hwmgr->device, mmMC_SEQ_MISC0);
4133
4134 data->is_memory_gddr5 = (MC_SEQ_MISC0_GDDR5_VALUE ==
4135 ((temp & MC_SEQ_MISC0_GDDR5_MASK) >>
4136 MC_SEQ_MISC0_GDDR5_SHIFT));
4137
4138 return 0;
4139}
4140
4141/**
4142 * Enables Dynamic Power Management by SMC
4143 *
4144 * @param hwmgr the address of the powerplay hardware manager.
4145 * @return always 0
4146 */
4147static int smu7_enable_acpi_power_management(struct pp_hwmgr *hwmgr)
4148{
4149 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
4150 GENERAL_PWRMGT, STATIC_PM_EN, 1);
4151
4152 return 0;
4153}
4154
4155/**
4156 * Initialize PowerGating States for different engines
4157 *
4158 * @param hwmgr the address of the powerplay hardware manager.
4159 * @return always 0
4160 */
4161static int smu7_init_power_gate_state(struct pp_hwmgr *hwmgr)
4162{
4163 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
4164
4165 data->uvd_power_gated = false;
4166 data->vce_power_gated = false;
4167 data->samu_power_gated = false;
4168
4169 return 0;
4170}
4171
4172static int smu7_init_sclk_threshold(struct pp_hwmgr *hwmgr)
4173{
4174 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
4175
4176 data->low_sclk_interrupt_threshold = 0;
4177 return 0;
4178}
4179
f8a4c11b 4180static int smu7_setup_asic_task(struct pp_hwmgr *hwmgr)
599a7e9f
RZ
4181{
4182 int tmp_result, result = 0;
4183
4184 smu7_upload_mc_firmware(hwmgr);
4185
4186 tmp_result = smu7_read_clock_registers(hwmgr);
4187 PP_ASSERT_WITH_CODE((0 == tmp_result),
4188 "Failed to read clock registers!", result = tmp_result);
4189
4190 tmp_result = smu7_get_memory_type(hwmgr);
4191 PP_ASSERT_WITH_CODE((0 == tmp_result),
4192 "Failed to get memory type!", result = tmp_result);
4193
4194 tmp_result = smu7_enable_acpi_power_management(hwmgr);
4195 PP_ASSERT_WITH_CODE((0 == tmp_result),
4196 "Failed to enable ACPI power management!", result = tmp_result);
4197
4198 tmp_result = smu7_init_power_gate_state(hwmgr);
4199 PP_ASSERT_WITH_CODE((0 == tmp_result),
4200 "Failed to init power gate state!", result = tmp_result);
4201
4202 tmp_result = smu7_get_mc_microcode_version(hwmgr);
4203 PP_ASSERT_WITH_CODE((0 == tmp_result),
4204 "Failed to get MC microcode version!", result = tmp_result);
4205
4206 tmp_result = smu7_init_sclk_threshold(hwmgr);
4207 PP_ASSERT_WITH_CODE((0 == tmp_result),
4208 "Failed to init sclk threshold!", result = tmp_result);
4209
4210 return result;
4211}
4212
4213static int smu7_force_clock_level(struct pp_hwmgr *hwmgr,
4214 enum pp_clock_type type, uint32_t mask)
4215{
4216 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
4217
9947f704
RZ
4218 if (hwmgr->request_dpm_level & (AMD_DPM_FORCED_LEVEL_AUTO |
4219 AMD_DPM_FORCED_LEVEL_LOW |
4220 AMD_DPM_FORCED_LEVEL_HIGH))
599a7e9f
RZ
4221 return -EINVAL;
4222
4223 switch (type) {
4224 case PP_SCLK:
4225 if (!data->sclk_dpm_key_disabled)
d3f8c0ab 4226 smum_send_msg_to_smc_with_parameter(hwmgr,
599a7e9f
RZ
4227 PPSMC_MSG_SCLKDPM_SetEnabledMask,
4228 data->dpm_level_enable_mask.sclk_dpm_enable_mask & mask);
4229 break;
4230 case PP_MCLK:
4231 if (!data->mclk_dpm_key_disabled)
d3f8c0ab 4232 smum_send_msg_to_smc_with_parameter(hwmgr,
599a7e9f
RZ
4233 PPSMC_MSG_MCLKDPM_SetEnabledMask,
4234 data->dpm_level_enable_mask.mclk_dpm_enable_mask & mask);
4235 break;
4236 case PP_PCIE:
4237 {
4238 uint32_t tmp = mask & data->dpm_level_enable_mask.pcie_dpm_enable_mask;
4239 uint32_t level = 0;
4240
4241 while (tmp >>= 1)
4242 level++;
4243
4244 if (!data->pcie_dpm_key_disabled)
d3f8c0ab 4245 smum_send_msg_to_smc_with_parameter(hwmgr,
599a7e9f
RZ
4246 PPSMC_MSG_PCIeDPM_ForceLevel,
4247 level);
4248 break;
4249 }
4250 default:
4251 break;
4252 }
4253
4254 return 0;
4255}
4256
4257static int smu7_print_clock_levels(struct pp_hwmgr *hwmgr,
4258 enum pp_clock_type type, char *buf)
4259{
4260 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
4261 struct smu7_single_dpm_table *sclk_table = &(data->dpm_table.sclk_table);
4262 struct smu7_single_dpm_table *mclk_table = &(data->dpm_table.mclk_table);
4263 struct smu7_single_dpm_table *pcie_table = &(data->dpm_table.pcie_speed_table);
4264 int i, now, size = 0;
4265 uint32_t clock, pcie_speed;
4266
4267 switch (type) {
4268 case PP_SCLK:
d3f8c0ab 4269 smum_send_msg_to_smc(hwmgr, PPSMC_MSG_API_GetSclkFrequency);
599a7e9f
RZ
4270 clock = cgs_read_register(hwmgr->device, mmSMC_MSG_ARG_0);
4271
4272 for (i = 0; i < sclk_table->count; i++) {
4273 if (clock > sclk_table->dpm_levels[i].value)
4274 continue;
4275 break;
4276 }
4277 now = i;
4278
4279 for (i = 0; i < sclk_table->count; i++)
4280 size += sprintf(buf + size, "%d: %uMhz %s\n",
4281 i, sclk_table->dpm_levels[i].value / 100,
4282 (i == now) ? "*" : "");
4283 break;
4284 case PP_MCLK:
d3f8c0ab 4285 smum_send_msg_to_smc(hwmgr, PPSMC_MSG_API_GetMclkFrequency);
599a7e9f
RZ
4286 clock = cgs_read_register(hwmgr->device, mmSMC_MSG_ARG_0);
4287
4288 for (i = 0; i < mclk_table->count; i++) {
4289 if (clock > mclk_table->dpm_levels[i].value)
4290 continue;
4291 break;
4292 }
4293 now = i;
4294
4295 for (i = 0; i < mclk_table->count; i++)
4296 size += sprintf(buf + size, "%d: %uMhz %s\n",
4297 i, mclk_table->dpm_levels[i].value / 100,
4298 (i == now) ? "*" : "");
4299 break;
4300 case PP_PCIE:
4301 pcie_speed = smu7_get_current_pcie_speed(hwmgr);
4302 for (i = 0; i < pcie_table->count; i++) {
4303 if (pcie_speed != pcie_table->dpm_levels[i].value)
4304 continue;
4305 break;
4306 }
4307 now = i;
4308
4309 for (i = 0; i < pcie_table->count; i++)
4310 size += sprintf(buf + size, "%d: %s %s\n", i,
7413d2fa
EQ
4311 (pcie_table->dpm_levels[i].value == 0) ? "2.5GT/s, x8" :
4312 (pcie_table->dpm_levels[i].value == 1) ? "5.0GT/s, x16" :
4313 (pcie_table->dpm_levels[i].value == 2) ? "8.0GT/s, x16" : "",
599a7e9f
RZ
4314 (i == now) ? "*" : "");
4315 break;
4316 default:
4317 break;
4318 }
4319 return size;
4320}
4321
f93f0c3a 4322static void smu7_set_fan_control_mode(struct pp_hwmgr *hwmgr, uint32_t mode)
599a7e9f 4323{
2fde9ab2
RZ
4324 switch (mode) {
4325 case AMD_FAN_CTRL_NONE:
f93f0c3a 4326 smu7_fan_ctrl_set_fan_speed_percent(hwmgr, 100);
2fde9ab2
RZ
4327 break;
4328 case AMD_FAN_CTRL_MANUAL:
4329 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
4330 PHM_PlatformCaps_MicrocodeFanControl))
f93f0c3a 4331 smu7_fan_ctrl_stop_smc_fan_control(hwmgr);
2fde9ab2
RZ
4332 break;
4333 case AMD_FAN_CTRL_AUTO:
f93f0c3a
RZ
4334 if (!smu7_fan_ctrl_set_static_mode(hwmgr, mode))
4335 smu7_fan_ctrl_start_smc_fan_control(hwmgr);
2fde9ab2
RZ
4336 break;
4337 default:
4338 break;
4339 }
599a7e9f
RZ
4340}
4341
f93f0c3a 4342static uint32_t smu7_get_fan_control_mode(struct pp_hwmgr *hwmgr)
599a7e9f 4343{
2fde9ab2 4344 return hwmgr->fan_ctrl_enabled ? AMD_FAN_CTRL_AUTO : AMD_FAN_CTRL_MANUAL;
599a7e9f
RZ
4345}
4346
4347static int smu7_get_sclk_od(struct pp_hwmgr *hwmgr)
4348{
4349 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
4350 struct smu7_single_dpm_table *sclk_table = &(data->dpm_table.sclk_table);
4351 struct smu7_single_dpm_table *golden_sclk_table =
4352 &(data->golden_dpm_table.sclk_table);
4353 int value;
4354
4355 value = (sclk_table->dpm_levels[sclk_table->count - 1].value -
4356 golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value) *
4357 100 /
4358 golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value;
4359
4360 return value;
4361}
4362
4363static int smu7_set_sclk_od(struct pp_hwmgr *hwmgr, uint32_t value)
4364{
4365 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
4366 struct smu7_single_dpm_table *golden_sclk_table =
4367 &(data->golden_dpm_table.sclk_table);
4368 struct pp_power_state *ps;
4369 struct smu7_power_state *smu7_ps;
4370
4371 if (value > 20)
4372 value = 20;
4373
4374 ps = hwmgr->request_ps;
4375
4376 if (ps == NULL)
4377 return -EINVAL;
4378
4379 smu7_ps = cast_phw_smu7_power_state(&ps->hardware);
4380
4381 smu7_ps->performance_levels[smu7_ps->performance_level_count - 1].engine_clock =
4382 golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value *
4383 value / 100 +
4384 golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value;
4385
4386 return 0;
4387}
4388
4389static int smu7_get_mclk_od(struct pp_hwmgr *hwmgr)
4390{
4391 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
4392 struct smu7_single_dpm_table *mclk_table = &(data->dpm_table.mclk_table);
4393 struct smu7_single_dpm_table *golden_mclk_table =
4394 &(data->golden_dpm_table.mclk_table);
4395 int value;
4396
4397 value = (mclk_table->dpm_levels[mclk_table->count - 1].value -
4398 golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value) *
4399 100 /
4400 golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value;
4401
4402 return value;
4403}
4404
4405static int smu7_set_mclk_od(struct pp_hwmgr *hwmgr, uint32_t value)
4406{
4407 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
4408 struct smu7_single_dpm_table *golden_mclk_table =
4409 &(data->golden_dpm_table.mclk_table);
4410 struct pp_power_state *ps;
4411 struct smu7_power_state *smu7_ps;
4412
4413 if (value > 20)
4414 value = 20;
4415
4416 ps = hwmgr->request_ps;
4417
4418 if (ps == NULL)
4419 return -EINVAL;
4420
4421 smu7_ps = cast_phw_smu7_power_state(&ps->hardware);
4422
4423 smu7_ps->performance_levels[smu7_ps->performance_level_count - 1].memory_clock =
4424 golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value *
4425 value / 100 +
4426 golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value;
4427
4428 return 0;
4429}
4430
4431
4432static int smu7_get_sclks(struct pp_hwmgr *hwmgr, struct amd_pp_clocks *clocks)
4433{
4434 struct phm_ppt_v1_information *table_info =
4435 (struct phm_ppt_v1_information *)hwmgr->pptable;
954e6bee
RZ
4436 struct phm_ppt_v1_clock_voltage_dependency_table *dep_sclk_table = NULL;
4437 struct phm_clock_voltage_dependency_table *sclk_table;
599a7e9f
RZ
4438 int i;
4439
954e6bee
RZ
4440 if (hwmgr->pp_table_version == PP_TABLE_V1) {
4441 if (table_info == NULL || table_info->vdd_dep_on_sclk == NULL)
4442 return -EINVAL;
4443 dep_sclk_table = table_info->vdd_dep_on_sclk;
4d8d44c6 4444 for (i = 0; i < dep_sclk_table->count; i++)
954e6bee 4445 clocks->clock[i] = dep_sclk_table->entries[i].clk;
4d8d44c6 4446 clocks->count = dep_sclk_table->count;
954e6bee
RZ
4447 } else if (hwmgr->pp_table_version == PP_TABLE_V0) {
4448 sclk_table = hwmgr->dyn_state.vddc_dependency_on_sclk;
4d8d44c6 4449 for (i = 0; i < sclk_table->count; i++)
954e6bee 4450 clocks->clock[i] = sclk_table->entries[i].clk;
4d8d44c6 4451 clocks->count = sclk_table->count;
599a7e9f 4452 }
954e6bee 4453
599a7e9f
RZ
4454 return 0;
4455}
4456
4457static uint32_t smu7_get_mem_latency(struct pp_hwmgr *hwmgr, uint32_t clk)
4458{
4459 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
4460
4461 if (clk >= MEM_FREQ_LOW_LATENCY && clk < MEM_FREQ_HIGH_LATENCY)
4462 return data->mem_latency_high;
4463 else if (clk >= MEM_FREQ_HIGH_LATENCY)
4464 return data->mem_latency_low;
4465 else
4466 return MEM_LATENCY_ERR;
4467}
4468
4469static int smu7_get_mclks(struct pp_hwmgr *hwmgr, struct amd_pp_clocks *clocks)
4470{
4471 struct phm_ppt_v1_information *table_info =
4472 (struct phm_ppt_v1_information *)hwmgr->pptable;
4473 struct phm_ppt_v1_clock_voltage_dependency_table *dep_mclk_table;
4474 int i;
954e6bee 4475 struct phm_clock_voltage_dependency_table *mclk_table;
599a7e9f 4476
954e6bee
RZ
4477 if (hwmgr->pp_table_version == PP_TABLE_V1) {
4478 if (table_info == NULL)
4479 return -EINVAL;
4480 dep_mclk_table = table_info->vdd_dep_on_mclk;
4481 for (i = 0; i < dep_mclk_table->count; i++) {
4482 clocks->clock[i] = dep_mclk_table->entries[i].clk;
4483 clocks->latency[i] = smu7_get_mem_latency(hwmgr,
599a7e9f 4484 dep_mclk_table->entries[i].clk);
954e6bee 4485 }
4d8d44c6 4486 clocks->count = dep_mclk_table->count;
954e6bee
RZ
4487 } else if (hwmgr->pp_table_version == PP_TABLE_V0) {
4488 mclk_table = hwmgr->dyn_state.vddc_dependency_on_mclk;
4d8d44c6 4489 for (i = 0; i < mclk_table->count; i++)
954e6bee 4490 clocks->clock[i] = mclk_table->entries[i].clk;
4d8d44c6 4491 clocks->count = mclk_table->count;
599a7e9f
RZ
4492 }
4493 return 0;
4494}
4495
4496static int smu7_get_clock_by_type(struct pp_hwmgr *hwmgr, enum amd_pp_clock_type type,
4497 struct amd_pp_clocks *clocks)
4498{
4499 switch (type) {
4500 case amd_pp_sys_clock:
4501 smu7_get_sclks(hwmgr, clocks);
4502 break;
4503 case amd_pp_mem_clock:
4504 smu7_get_mclks(hwmgr, clocks);
4505 break;
4506 default:
4507 return -EINVAL;
4508 }
4509
4510 return 0;
4511}
4512
ff3953d4
EH
4513static void smu7_find_min_clock_masks(struct pp_hwmgr *hwmgr,
4514 uint32_t *sclk_mask, uint32_t *mclk_mask,
4515 uint32_t min_sclk, uint32_t min_mclk)
4516{
4517 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
4518 struct smu7_dpm_table *dpm_table = &(data->dpm_table);
4519 uint32_t i;
4520
4521 for (i = 0; i < dpm_table->sclk_table.count; i++) {
4522 if (dpm_table->sclk_table.dpm_levels[i].enabled &&
4523 dpm_table->sclk_table.dpm_levels[i].value >= min_sclk)
4524 *sclk_mask |= 1 << i;
4525 }
4526
4527 for (i = 0; i < dpm_table->mclk_table.count; i++) {
4528 if (dpm_table->mclk_table.dpm_levels[i].enabled &&
4529 dpm_table->mclk_table.dpm_levels[i].value >= min_mclk)
4530 *mclk_mask |= 1 << i;
4531 }
4532}
4533
4534static int smu7_set_power_profile_state(struct pp_hwmgr *hwmgr,
4535 struct amd_pp_profile *request)
4536{
4537 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
4538 int tmp_result, result = 0;
4539 uint32_t sclk_mask = 0, mclk_mask = 0;
4540
923d26db
EH
4541 if (hwmgr->chip_id == CHIP_FIJI) {
4542 if (request->type == AMD_PP_GFX_PROFILE)
4543 smu7_enable_power_containment(hwmgr);
4544 else if (request->type == AMD_PP_COMPUTE_PROFILE)
4545 smu7_disable_power_containment(hwmgr);
4546 }
4547
ff3953d4
EH
4548 if (hwmgr->dpm_level != AMD_DPM_FORCED_LEVEL_AUTO)
4549 return -EINVAL;
4550
4551 tmp_result = smu7_freeze_sclk_mclk_dpm(hwmgr);
4552 PP_ASSERT_WITH_CODE(!tmp_result,
4553 "Failed to freeze SCLK MCLK DPM!",
4554 result = tmp_result);
4555
4556 tmp_result = smum_populate_requested_graphic_levels(hwmgr, request);
4557 PP_ASSERT_WITH_CODE(!tmp_result,
4558 "Failed to populate requested graphic levels!",
4559 result = tmp_result);
4560
4561 tmp_result = smu7_unfreeze_sclk_mclk_dpm(hwmgr);
4562 PP_ASSERT_WITH_CODE(!tmp_result,
4563 "Failed to unfreeze SCLK MCLK DPM!",
4564 result = tmp_result);
4565
4566 smu7_find_min_clock_masks(hwmgr, &sclk_mask, &mclk_mask,
4567 request->min_sclk, request->min_mclk);
4568
4569 if (sclk_mask) {
4570 if (!data->sclk_dpm_key_disabled)
d3f8c0ab 4571 smum_send_msg_to_smc_with_parameter(hwmgr,
ff3953d4
EH
4572 PPSMC_MSG_SCLKDPM_SetEnabledMask,
4573 data->dpm_level_enable_mask.
4574 sclk_dpm_enable_mask &
4575 sclk_mask);
4576 }
4577
4578 if (mclk_mask) {
4579 if (!data->mclk_dpm_key_disabled)
d3f8c0ab 4580 smum_send_msg_to_smc_with_parameter(hwmgr,
ff3953d4
EH
4581 PPSMC_MSG_MCLKDPM_SetEnabledMask,
4582 data->dpm_level_enable_mask.
4583 mclk_dpm_enable_mask &
4584 mclk_mask);
4585 }
4586
4587 return result;
4588}
4589
f9c993ce
EH
4590static int smu7_avfs_control(struct pp_hwmgr *hwmgr, bool enable)
4591{
b3b03052 4592 struct smu7_smumgr *smu_data = (struct smu7_smumgr *)(hwmgr->smu_backend);
74c31c6e
RZ
4593
4594 if (smu_data == NULL)
4595 return -EINVAL;
4596
4597 if (smu_data->avfs.avfs_btc_status == AVFS_BTC_NOTSUPPORTED)
4598 return 0;
4599
f9c993ce
EH
4600 if (enable) {
4601 if (!PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device,
4602 CGS_IND_REG__SMC, FEATURE_STATUS, AVS_ON))
4603 PP_ASSERT_WITH_CODE(!smum_send_msg_to_smc(
d3f8c0ab 4604 hwmgr, PPSMC_MSG_EnableAvfs),
f9c993ce
EH
4605 "Failed to enable AVFS!",
4606 return -EINVAL);
4607 } else if (PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device,
4608 CGS_IND_REG__SMC, FEATURE_STATUS, AVS_ON))
4609 PP_ASSERT_WITH_CODE(!smum_send_msg_to_smc(
d3f8c0ab 4610 hwmgr, PPSMC_MSG_DisableAvfs),
f9c993ce
EH
4611 "Failed to disable AVFS!",
4612 return -EINVAL);
4613
4614 return 0;
4615}
4616
26f52781
RZ
4617static int smu7_notify_cac_buffer_info(struct pp_hwmgr *hwmgr,
4618 uint32_t virtual_addr_low,
4619 uint32_t virtual_addr_hi,
4620 uint32_t mc_addr_low,
4621 uint32_t mc_addr_hi,
4622 uint32_t size)
4623{
4624 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
4625
4626 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
4627 data->soft_regs_start +
4628 smum_get_offsetof(hwmgr,
4629 SMU_SoftRegisters, DRAM_LOG_ADDR_H),
4630 mc_addr_hi);
4631
4632 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
4633 data->soft_regs_start +
4634 smum_get_offsetof(hwmgr,
4635 SMU_SoftRegisters, DRAM_LOG_ADDR_L),
4636 mc_addr_low);
4637
4638 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
4639 data->soft_regs_start +
4640 smum_get_offsetof(hwmgr,
4641 SMU_SoftRegisters, DRAM_LOG_PHY_ADDR_H),
4642 virtual_addr_hi);
4643
4644 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
4645 data->soft_regs_start +
4646 smum_get_offsetof(hwmgr,
4647 SMU_SoftRegisters, DRAM_LOG_PHY_ADDR_L),
4648 virtual_addr_low);
4649
4650 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
4651 data->soft_regs_start +
4652 smum_get_offsetof(hwmgr,
4653 SMU_SoftRegisters, DRAM_LOG_BUFF_SIZE),
4654 size);
4655 return 0;
4656}
4657
ad8cec7d
RZ
4658static int smu7_get_max_high_clocks(struct pp_hwmgr *hwmgr,
4659 struct amd_pp_simple_clock_info *clocks)
4660{
4661 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
4662 struct smu7_single_dpm_table *sclk_table = &(data->dpm_table.sclk_table);
4663 struct smu7_single_dpm_table *mclk_table = &(data->dpm_table.mclk_table);
4664
4665 if (clocks == NULL)
4666 return -EINVAL;
4667
4668 clocks->memory_max_clock = mclk_table->count > 1 ?
4669 mclk_table->dpm_levels[mclk_table->count-1].value :
4670 mclk_table->dpm_levels[0].value;
4671 clocks->engine_max_clock = sclk_table->count > 1 ?
4672 sclk_table->dpm_levels[sclk_table->count-1].value :
4673 sclk_table->dpm_levels[0].value;
4674 return 0;
4675}
4676
a1c1a1de 4677static const struct pp_hwmgr_func smu7_hwmgr_funcs = {
599a7e9f 4678 .backend_init = &smu7_hwmgr_backend_init,
a0aa7046 4679 .backend_fini = &smu7_hwmgr_backend_fini,
599a7e9f
RZ
4680 .asic_setup = &smu7_setup_asic_task,
4681 .dynamic_state_management_enable = &smu7_enable_dpm_tasks,
4682 .apply_state_adjust_rules = smu7_apply_state_adjust_rules,
4683 .force_dpm_level = &smu7_force_dpm_level,
4684 .power_state_set = smu7_set_power_state_tasks,
4685 .get_power_state_size = smu7_get_power_state_size,
4686 .get_mclk = smu7_dpm_get_mclk,
4687 .get_sclk = smu7_dpm_get_sclk,
4688 .patch_boot_state = smu7_dpm_patch_boot_state,
4689 .get_pp_table_entry = smu7_get_pp_table_entry,
4690 .get_num_of_pp_table_entries = smu7_get_number_of_powerplay_table_entries,
599a7e9f
RZ
4691 .powerdown_uvd = smu7_powerdown_uvd,
4692 .powergate_uvd = smu7_powergate_uvd,
4693 .powergate_vce = smu7_powergate_vce,
4694 .disable_clock_power_gating = smu7_disable_clock_power_gating,
4695 .update_clock_gatings = smu7_update_clock_gatings,
4696 .notify_smc_display_config_after_ps_adjustment = smu7_notify_smc_display_config_after_ps_adjustment,
4697 .display_config_changed = smu7_display_configuration_changed_task,
4698 .set_max_fan_pwm_output = smu7_set_max_fan_pwm_output,
4699 .set_max_fan_rpm_output = smu7_set_max_fan_rpm_output,
4700 .get_temperature = smu7_thermal_get_temperature,
4701 .stop_thermal_controller = smu7_thermal_stop_thermal_controller,
4702 .get_fan_speed_info = smu7_fan_ctrl_get_fan_speed_info,
4703 .get_fan_speed_percent = smu7_fan_ctrl_get_fan_speed_percent,
4704 .set_fan_speed_percent = smu7_fan_ctrl_set_fan_speed_percent,
4705 .reset_fan_speed_to_default = smu7_fan_ctrl_reset_fan_speed_to_default,
4706 .get_fan_speed_rpm = smu7_fan_ctrl_get_fan_speed_rpm,
4707 .set_fan_speed_rpm = smu7_fan_ctrl_set_fan_speed_rpm,
4708 .uninitialize_thermal_controller = smu7_thermal_ctrl_uninitialize_thermal_controller,
4709 .register_internal_thermal_interrupt = smu7_register_internal_thermal_interrupt,
4710 .check_smc_update_required_for_display_configuration = smu7_check_smc_update_required_for_display_configuration,
4711 .check_states_equal = smu7_check_states_equal,
4712 .set_fan_control_mode = smu7_set_fan_control_mode,
4713 .get_fan_control_mode = smu7_get_fan_control_mode,
4714 .force_clock_level = smu7_force_clock_level,
4715 .print_clock_levels = smu7_print_clock_levels,
4716 .enable_per_cu_power_gating = smu7_enable_per_cu_power_gating,
4717 .get_sclk_od = smu7_get_sclk_od,
4718 .set_sclk_od = smu7_set_sclk_od,
4719 .get_mclk_od = smu7_get_mclk_od,
4720 .set_mclk_od = smu7_set_mclk_od,
4721 .get_clock_by_type = smu7_get_clock_by_type,
a6e36952 4722 .read_sensor = smu7_read_sensor,
f28a9b65 4723 .dynamic_state_management_disable = smu7_disable_dpm_tasks,
ff3953d4 4724 .set_power_profile_state = smu7_set_power_profile_state,
f9c993ce 4725 .avfs_control = smu7_avfs_control,
1dfc41d4 4726 .disable_smc_firmware_ctf = smu7_thermal_disable_alert,
1ab47204 4727 .start_thermal_controller = smu7_start_thermal_controller,
26f52781 4728 .notify_cac_buffer_info = smu7_notify_cac_buffer_info,
ad8cec7d 4729 .get_max_high_clocks = smu7_get_max_high_clocks,
599a7e9f
RZ
4730};
4731
4732uint8_t smu7_get_sleep_divider_id_from_clock(uint32_t clock,
4733 uint32_t clock_insr)
4734{
4735 uint8_t i;
4736 uint32_t temp;
4737 uint32_t min = max(clock_insr, (uint32_t)SMU7_MINIMUM_ENGINE_CLOCK);
4738
4739 PP_ASSERT_WITH_CODE((clock >= min), "Engine clock can't satisfy stutter requirement!", return 0);
4740 for (i = SMU7_MAX_DEEPSLEEP_DIVIDER_ID; ; i--) {
4741 temp = clock >> i;
4742
4743 if (temp >= min || i == 0)
4744 break;
4745 }
4746 return i;
4747}
4748
a5b580e1 4749int smu7_init_function_pointers(struct pp_hwmgr *hwmgr)
599a7e9f
RZ
4750{
4751 int ret = 0;
4752
4753 hwmgr->hwmgr_func = &smu7_hwmgr_funcs;
4754 if (hwmgr->pp_table_version == PP_TABLE_V0)
4755 hwmgr->pptable_func = &pptable_funcs;
4756 else if (hwmgr->pp_table_version == PP_TABLE_V1)
4757 hwmgr->pptable_func = &pptable_v1_0_funcs;
4758
599a7e9f
RZ
4759 return ret;
4760}
4761