drm/amdgpu/pm: Change the member function name in pp_hwmgr_func and pptable_funcs
[linux-2.6-block.git] / drivers / gpu / drm / amd / pm / powerplay / amd_powerplay.c
CommitLineData
1f7371b2
AD
1/*
2 * Copyright 2015 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
7bd55429 23#include "pp_debug.h"
1f7371b2
AD
24#include <linux/types.h>
25#include <linux/kernel.h>
26#include <linux/gfp.h>
ac885b3a 27#include <linux/slab.h>
64f6db77 28#include <linux/firmware.h>
b75efe88 29#include <linux/reboot.h>
1f7371b2
AD
30#include "amd_shared.h"
31#include "amd_powerplay.h"
577bbe01 32#include "power_state.h"
a2c120ce 33#include "amdgpu.h"
65ad7cac 34#include "hwmgr.h"
6ddbd37f
EQ
35#include "amdgpu_dpm_internal.h"
36#include "amdgpu_display.h"
6d07fe7b 37
b905090d 38static const struct amd_pm_funcs pp_dpm_funcs;
3bace359 39
a2c120ce 40static int amd_powerplay_create(struct amdgpu_device *adev)
139a285f 41{
b905090d 42 struct pp_hwmgr *hwmgr;
139a285f 43
a2c120ce 44 if (adev == NULL)
139a285f
RZ
45 return -EINVAL;
46
b905090d
RZ
47 hwmgr = kzalloc(sizeof(struct pp_hwmgr), GFP_KERNEL);
48 if (hwmgr == NULL)
139a285f
RZ
49 return -ENOMEM;
50
b905090d 51 hwmgr->adev = adev;
8bb575a2 52 hwmgr->not_vf = !amdgpu_sriov_vf(adev);
b905090d 53 hwmgr->device = amdgpu_cgs_create_device(adev);
6b6706cd 54 mutex_init(&hwmgr->msg_lock);
b905090d
RZ
55 hwmgr->chip_family = adev->family;
56 hwmgr->chip_id = adev->asic_type;
3b94fb10 57 hwmgr->feature_mask = adev->pm.pp_feature;
555fd70c 58 hwmgr->display_config = &adev->pm.pm_display_cfg;
b905090d
RZ
59 adev->powerplay.pp_handle = hwmgr;
60 adev->powerplay.pp_funcs = &pp_dpm_funcs;
139a285f
RZ
61 return 0;
62}
63
a2c120ce 64
ba8ab90e 65static void amd_powerplay_destroy(struct amdgpu_device *adev)
139a285f 66{
b905090d 67 struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
139a285f 68
6b6706cd
EQ
69 mutex_destroy(&hwmgr->msg_lock);
70
b905090d
RZ
71 kfree(hwmgr->hardcode_pp_table);
72 hwmgr->hardcode_pp_table = NULL;
7b38a49d 73
b905090d
RZ
74 kfree(hwmgr);
75 hwmgr = NULL;
139a285f
RZ
76}
77
1c863802
RZ
78static int pp_early_init(void *handle)
79{
80 int ret;
b905090d 81 struct amdgpu_device *adev = handle;
139a285f 82
a2c120ce 83 ret = amd_powerplay_create(adev);
139a285f 84
a2c120ce
RZ
85 if (ret != 0)
86 return ret;
87
b905090d 88 ret = hwmgr_early_init(adev->powerplay.pp_handle);
9441f964 89 if (ret)
b3b03052 90 return -EINVAL;
1c863802 91
b4eeed59 92 return 0;
1f7371b2
AD
93}
94
b75efe88
EQ
95static void pp_swctf_delayed_work_handler(struct work_struct *work)
96{
97 struct pp_hwmgr *hwmgr =
98 container_of(work, struct pp_hwmgr, swctf_delayed_work.work);
99 struct amdgpu_device *adev = hwmgr->adev;
100 struct amdgpu_dpm_thermal *range =
101 &adev->pm.dpm.thermal;
102 uint32_t gpu_temperature, size;
103 int ret;
104
105 /*
106 * If the hotspot/edge temperature is confirmed as below SW CTF setting point
107 * after the delay enforced, nothing will be done.
108 * Otherwise, a graceful shutdown will be performed to prevent further damage.
109 */
110 if (range->sw_ctf_threshold &&
111 hwmgr->hwmgr_func->read_sensor) {
112 ret = hwmgr->hwmgr_func->read_sensor(hwmgr,
113 AMDGPU_PP_SENSOR_HOTSPOT_TEMP,
114 &gpu_temperature,
115 &size);
116 /*
117 * For some legacy ASICs, hotspot temperature retrieving might be not
118 * supported. Check the edge temperature instead then.
119 */
120 if (ret == -EOPNOTSUPP)
121 ret = hwmgr->hwmgr_func->read_sensor(hwmgr,
122 AMDGPU_PP_SENSOR_EDGE_TEMP,
123 &gpu_temperature,
124 &size);
125 if (!ret && gpu_temperature / 1000 < range->sw_ctf_threshold)
126 return;
127 }
128
129 dev_emerg(adev->dev, "ERROR: GPU over temperature range(SW CTF) detected!\n");
130 dev_emerg(adev->dev, "ERROR: System is going to shutdown due to GPU SW CTF!\n");
131 orderly_poweroff(true);
132}
133
1c863802 134static int pp_sw_init(void *handle)
1f7371b2 135{
b905090d
RZ
136 struct amdgpu_device *adev = handle;
137 struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
3bace359
JZ
138 int ret = 0;
139
ba8ab90e 140 ret = hwmgr_sw_init(hwmgr);
7383bcb9 141
ba8ab90e 142 pr_debug("powerplay sw init %s\n", ret ? "failed" : "successfully");
b905090d 143
b75efe88
EQ
144 if (!ret)
145 INIT_DELAYED_WORK(&hwmgr->swctf_delayed_work,
146 pp_swctf_delayed_work_handler);
147
1c863802
RZ
148 return ret;
149}
3bace359 150
1c863802
RZ
151static int pp_sw_fini(void *handle)
152{
b905090d
RZ
153 struct amdgpu_device *adev = handle;
154 struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
1c863802 155
ba8ab90e 156 hwmgr_sw_fini(hwmgr);
2dac5936 157
778af666 158 amdgpu_ucode_release(&adev->pm.fw);
2dac5936 159
b905090d 160 return 0;
1f7371b2
AD
161}
162
163static int pp_hw_init(void *handle)
164{
ac885b3a 165 int ret = 0;
b905090d
RZ
166 struct amdgpu_device *adev = handle;
167 struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
ac885b3a 168
ba8ab90e 169 ret = hwmgr_hw_init(hwmgr);
ac885b3a 170
ba8ab90e
RZ
171 if (ret)
172 pr_err("powerplay hw init failed\n");
ac885b3a 173
e5f23736 174 return ret;
1f7371b2
AD
175}
176
177static int pp_hw_fini(void *handle)
178{
b905090d
RZ
179 struct amdgpu_device *adev = handle;
180 struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
ac885b3a 181
b75efe88
EQ
182 cancel_delayed_work_sync(&hwmgr->swctf_delayed_work);
183
ba8ab90e 184 hwmgr_hw_fini(hwmgr);
df1e6394 185
1f7371b2
AD
186 return 0;
187}
188
7951e376
RZ
189static void pp_reserve_vram_for_smu(struct amdgpu_device *adev)
190{
191 int r = -EINVAL;
192 void *cpu_ptr = NULL;
193 uint64_t gpu_addr;
194 struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
195
196 if (amdgpu_bo_create_kernel(adev, adev->pm.smu_prv_buffer_size,
197 PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT,
198 &adev->pm.smu_prv_buffer,
199 &gpu_addr,
200 &cpu_ptr)) {
201 DRM_ERROR("amdgpu: failed to create smu prv buffer\n");
202 return;
203 }
204
205 if (hwmgr->hwmgr_func->notify_cac_buffer_info)
206 r = hwmgr->hwmgr_func->notify_cac_buffer_info(hwmgr,
207 lower_32_bits((unsigned long)cpu_ptr),
208 upper_32_bits((unsigned long)cpu_ptr),
209 lower_32_bits(gpu_addr),
210 upper_32_bits(gpu_addr),
211 adev->pm.smu_prv_buffer_size);
212
213 if (r) {
214 amdgpu_bo_free_kernel(&adev->pm.smu_prv_buffer, NULL, NULL);
215 adev->pm.smu_prv_buffer = NULL;
216 DRM_ERROR("amdgpu: failed to notify SMU buffer address\n");
217 }
218}
219
6d07fe7b
RZ
220static int pp_late_init(void *handle)
221{
b905090d
RZ
222 struct amdgpu_device *adev = handle;
223 struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
b905090d 224
a746c77e 225 if (hwmgr && hwmgr->pm_en)
b61e54cb 226 hwmgr_handle_task(hwmgr,
39199b80 227 AMD_PP_TASK_COMPLETE_INIT, NULL);
7951e376
RZ
228 if (adev->pm.smu_prv_buffer_size != 0)
229 pp_reserve_vram_for_smu(adev);
9667849b 230
6d07fe7b
RZ
231 return 0;
232}
233
139a285f
RZ
234static void pp_late_fini(void *handle)
235{
2dac5936
RZ
236 struct amdgpu_device *adev = handle;
237
7951e376
RZ
238 if (adev->pm.smu_prv_buffer)
239 amdgpu_bo_free_kernel(&adev->pm.smu_prv_buffer, NULL, NULL);
2dac5936 240 amd_powerplay_destroy(adev);
139a285f
RZ
241}
242
243
1f7371b2
AD
244static bool pp_is_idle(void *handle)
245{
ed5121a3 246 return false;
1f7371b2
AD
247}
248
249static int pp_wait_for_idle(void *handle)
250{
251 return 0;
252}
253
254static int pp_sw_reset(void *handle)
255{
256 return 0;
257}
258
1f7371b2
AD
259static int pp_set_powergating_state(void *handle,
260 enum amd_powergating_state state)
261{
85f80cb3 262 return 0;
1f7371b2
AD
263}
264
265static int pp_suspend(void *handle)
266{
b905090d
RZ
267 struct amdgpu_device *adev = handle;
268 struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
577bbe01 269
b75efe88
EQ
270 cancel_delayed_work_sync(&hwmgr->swctf_delayed_work);
271
ba8ab90e 272 return hwmgr_suspend(hwmgr);
1f7371b2
AD
273}
274
275static int pp_resume(void *handle)
276{
b905090d
RZ
277 struct amdgpu_device *adev = handle;
278 struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
1c863802 279
ba8ab90e 280 return hwmgr_resume(hwmgr);
1f7371b2
AD
281}
282
f004938f
AG
283static int pp_set_clockgating_state(void *handle,
284 enum amd_clockgating_state state)
285{
286 return 0;
287}
288
b905090d 289static const struct amd_ip_funcs pp_ip_funcs = {
88a907d6 290 .name = "powerplay",
1f7371b2 291 .early_init = pp_early_init,
6d07fe7b 292 .late_init = pp_late_init,
1f7371b2
AD
293 .sw_init = pp_sw_init,
294 .sw_fini = pp_sw_fini,
295 .hw_init = pp_hw_init,
296 .hw_fini = pp_hw_fini,
139a285f 297 .late_fini = pp_late_fini,
1f7371b2
AD
298 .suspend = pp_suspend,
299 .resume = pp_resume,
300 .is_idle = pp_is_idle,
301 .wait_for_idle = pp_wait_for_idle,
302 .soft_reset = pp_sw_reset,
f004938f 303 .set_clockgating_state = pp_set_clockgating_state,
1f7371b2
AD
304 .set_powergating_state = pp_set_powergating_state,
305};
306
b905090d
RZ
307const struct amdgpu_ip_block_version pp_smu_ip_block =
308{
309 .type = AMD_IP_BLOCK_TYPE_SMC,
310 .major = 1,
311 .minor = 0,
312 .rev = 0,
313 .funcs = &pp_ip_funcs,
314};
315
9c8bc8d3
RZ
316/* This interface only be supported On Vi,
317 * because only smu7/8 can help to load gfx/sdma fw,
318 * smu need to be enabled before load other ip's fw.
319 * so call start smu to load smu7 fw and other ip's fw
320 */
1f7371b2
AD
321static int pp_dpm_load_fw(void *handle)
322{
9c8bc8d3
RZ
323 struct pp_hwmgr *hwmgr = handle;
324
325 if (!hwmgr || !hwmgr->smumgr_funcs || !hwmgr->smumgr_funcs->start_smu)
326 return -EINVAL;
327
328 if (hwmgr->smumgr_funcs->start_smu(hwmgr)) {
329 pr_err("fw load failed\n");
330 return -EINVAL;
331 }
332
1f7371b2
AD
333 return 0;
334}
335
336static int pp_dpm_fw_loading_complete(void *handle)
337{
338 return 0;
339}
340
3811f8f0
RZ
341static int pp_set_clockgating_by_smu(void *handle, uint32_t msg_id)
342{
b905090d 343 struct pp_hwmgr *hwmgr = handle;
3811f8f0 344
ba8ab90e
RZ
345 if (!hwmgr || !hwmgr->pm_en)
346 return -EINVAL;
3811f8f0 347
3811f8f0 348 if (hwmgr->hwmgr_func->update_clock_gatings == NULL) {
527aa2a0 349 pr_info_ratelimited("%s was not implemented.\n", __func__);
3811f8f0
RZ
350 return 0;
351 }
352
353 return hwmgr->hwmgr_func->update_clock_gatings(hwmgr, &msg_id);
354}
355
9947f704
RZ
356static void pp_dpm_en_umd_pstate(struct pp_hwmgr *hwmgr,
357 enum amd_dpm_forced_level *level)
358{
359 uint32_t profile_mode_mask = AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD |
360 AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK |
361 AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK |
362 AMD_DPM_FORCED_LEVEL_PROFILE_PEAK;
363
364 if (!(hwmgr->dpm_level & profile_mode_mask)) {
365 /* enter umd pstate, save current level, disable gfx cg*/
366 if (*level & profile_mode_mask) {
367 hwmgr->saved_dpm_level = hwmgr->dpm_level;
368 hwmgr->en_umd_pstate = true;
9947f704
RZ
369 }
370 } else {
371 /* exit umd pstate, restore level, enable gfx cg*/
372 if (!(*level & profile_mode_mask)) {
373 if (*level == AMD_DPM_FORCED_LEVEL_PROFILE_EXIT)
374 *level = hwmgr->saved_dpm_level;
375 hwmgr->en_umd_pstate = false;
9947f704
RZ
376 }
377 }
378}
379
1f7371b2
AD
380static int pp_dpm_force_performance_level(void *handle,
381 enum amd_dpm_forced_level level)
382{
b905090d 383 struct pp_hwmgr *hwmgr = handle;
577bbe01 384
ba8ab90e
RZ
385 if (!hwmgr || !hwmgr->pm_en)
386 return -EINVAL;
577bbe01 387
9947f704
RZ
388 if (level == hwmgr->dpm_level)
389 return 0;
390
9947f704
RZ
391 pp_dpm_en_umd_pstate(hwmgr, &level);
392 hwmgr->request_dpm_level = level;
b905090d 393 hwmgr_handle_task(hwmgr, AMD_PP_TASK_READJUST_POWER_STATE, NULL);
8621bbbb 394
1f7371b2
AD
395 return 0;
396}
577bbe01 397
1f7371b2
AD
398static enum amd_dpm_forced_level pp_dpm_get_performance_level(
399 void *handle)
400{
b905090d 401 struct pp_hwmgr *hwmgr = handle;
577bbe01 402
ba8ab90e
RZ
403 if (!hwmgr || !hwmgr->pm_en)
404 return -EINVAL;
577bbe01 405
a746c77e 406 return hwmgr->dpm_level;
1f7371b2 407}
577bbe01 408
f93f0c3a 409static uint32_t pp_dpm_get_sclk(void *handle, bool low)
1f7371b2 410{
b905090d 411 struct pp_hwmgr *hwmgr = handle;
577bbe01 412
ba8ab90e
RZ
413 if (!hwmgr || !hwmgr->pm_en)
414 return 0;
577bbe01 415
7383bcb9 416 if (hwmgr->hwmgr_func->get_sclk == NULL) {
527aa2a0 417 pr_info_ratelimited("%s was not implemented.\n", __func__);
7383bcb9
RZ
418 return 0;
419 }
a746c77e 420 return hwmgr->hwmgr_func->get_sclk(hwmgr, low);
1f7371b2 421}
577bbe01 422
f93f0c3a 423static uint32_t pp_dpm_get_mclk(void *handle, bool low)
1f7371b2 424{
b905090d 425 struct pp_hwmgr *hwmgr = handle;
577bbe01 426
ba8ab90e
RZ
427 if (!hwmgr || !hwmgr->pm_en)
428 return 0;
577bbe01 429
7383bcb9 430 if (hwmgr->hwmgr_func->get_mclk == NULL) {
527aa2a0 431 pr_info_ratelimited("%s was not implemented.\n", __func__);
7383bcb9
RZ
432 return 0;
433 }
a746c77e 434 return hwmgr->hwmgr_func->get_mclk(hwmgr, low);
1f7371b2 435}
577bbe01 436
f93f0c3a 437static void pp_dpm_powergate_vce(void *handle, bool gate)
1f7371b2 438{
b905090d 439 struct pp_hwmgr *hwmgr = handle;
577bbe01 440
ba8ab90e 441 if (!hwmgr || !hwmgr->pm_en)
f93f0c3a 442 return;
577bbe01 443
7383bcb9 444 if (hwmgr->hwmgr_func->powergate_vce == NULL) {
527aa2a0 445 pr_info_ratelimited("%s was not implemented.\n", __func__);
f93f0c3a 446 return;
7383bcb9 447 }
f93f0c3a 448 hwmgr->hwmgr_func->powergate_vce(hwmgr, gate);
1f7371b2 449}
577bbe01 450
f93f0c3a 451static void pp_dpm_powergate_uvd(void *handle, bool gate)
1f7371b2 452{
b905090d 453 struct pp_hwmgr *hwmgr = handle;
577bbe01 454
ba8ab90e 455 if (!hwmgr || !hwmgr->pm_en)
f93f0c3a 456 return;
577bbe01 457
7383bcb9 458 if (hwmgr->hwmgr_func->powergate_uvd == NULL) {
527aa2a0 459 pr_info_ratelimited("%s was not implemented.\n", __func__);
f93f0c3a 460 return;
7383bcb9 461 }
f93f0c3a 462 hwmgr->hwmgr_func->powergate_uvd(hwmgr, gate);
577bbe01
RZ
463}
464
df1e6394 465static int pp_dpm_dispatch_tasks(void *handle, enum amd_pp_task task_id,
39199b80 466 enum amd_pm_state_type *user_state)
1f7371b2 467{
b905090d 468 struct pp_hwmgr *hwmgr = handle;
577bbe01 469
ba8ab90e
RZ
470 if (!hwmgr || !hwmgr->pm_en)
471 return -EINVAL;
577bbe01 472
a746c77e 473 return hwmgr_handle_task(hwmgr, task_id, user_state);
1f7371b2 474}
577bbe01 475
f8a4c11b 476static enum amd_pm_state_type pp_dpm_get_current_power_state(void *handle)
1f7371b2 477{
b905090d 478 struct pp_hwmgr *hwmgr = handle;
577bbe01 479 struct pp_power_state *state;
2a507105 480 enum amd_pm_state_type pm_type;
577bbe01 481
ba8ab90e 482 if (!hwmgr || !hwmgr->pm_en || !hwmgr->current_ps)
577bbe01
RZ
483 return -EINVAL;
484
485 state = hwmgr->current_ps;
486
487 switch (state->classification.ui_label) {
488 case PP_StateUILabel_Battery:
2a507105 489 pm_type = POWER_STATE_TYPE_BATTERY;
0f987cd0 490 break;
577bbe01 491 case PP_StateUILabel_Balanced:
2a507105 492 pm_type = POWER_STATE_TYPE_BALANCED;
0f987cd0 493 break;
577bbe01 494 case PP_StateUILabel_Performance:
2a507105 495 pm_type = POWER_STATE_TYPE_PERFORMANCE;
0f987cd0 496 break;
577bbe01 497 default:
f3898ea1 498 if (state->classification.flags & PP_StateClassificationFlag_Boot)
2a507105 499 pm_type = POWER_STATE_TYPE_INTERNAL_BOOT;
f3898ea1 500 else
2a507105 501 pm_type = POWER_STATE_TYPE_DEFAULT;
0f987cd0 502 break;
577bbe01 503 }
2a507105
RZ
504
505 return pm_type;
1f7371b2 506}
577bbe01 507
685fae24 508static int pp_dpm_set_fan_control_mode(void *handle, uint32_t mode)
cac9a199 509{
b905090d 510 struct pp_hwmgr *hwmgr = handle;
cac9a199 511
ba8ab90e 512 if (!hwmgr || !hwmgr->pm_en)
685fae24
EQ
513 return -EOPNOTSUPP;
514
515 if (hwmgr->hwmgr_func->set_fan_control_mode == NULL)
516 return -EOPNOTSUPP;
517
518 if (mode == U32_MAX)
519 return -EINVAL;
cac9a199 520
f93f0c3a 521 hwmgr->hwmgr_func->set_fan_control_mode(hwmgr, mode);
685fae24
EQ
522
523 return 0;
cac9a199
RZ
524}
525
685fae24 526static int pp_dpm_get_fan_control_mode(void *handle, uint32_t *fan_mode)
cac9a199 527{
b905090d 528 struct pp_hwmgr *hwmgr = handle;
cac9a199 529
ba8ab90e 530 if (!hwmgr || !hwmgr->pm_en)
685fae24
EQ
531 return -EOPNOTSUPP;
532
533 if (hwmgr->hwmgr_func->get_fan_control_mode == NULL)
534 return -EOPNOTSUPP;
535
536 if (!fan_mode)
537 return -EINVAL;
cac9a199 538
685fae24 539 *fan_mode = hwmgr->hwmgr_func->get_fan_control_mode(hwmgr);
685fae24 540 return 0;
cac9a199
RZ
541}
542
0d8318e1 543static int pp_dpm_set_fan_speed_pwm(void *handle, uint32_t speed)
cac9a199 544{
b905090d 545 struct pp_hwmgr *hwmgr = handle;
cac9a199 546
ba8ab90e 547 if (!hwmgr || !hwmgr->pm_en)
685fae24
EQ
548 return -EOPNOTSUPP;
549
550 if (hwmgr->hwmgr_func->set_fan_speed_pwm == NULL)
551 return -EOPNOTSUPP;
552
553 if (speed == U32_MAX)
ba8ab90e 554 return -EINVAL;
cac9a199 555
a746c77e 556 return hwmgr->hwmgr_func->set_fan_speed_pwm(hwmgr, speed);
cac9a199
RZ
557}
558
0d8318e1 559static int pp_dpm_get_fan_speed_pwm(void *handle, uint32_t *speed)
cac9a199 560{
b905090d 561 struct pp_hwmgr *hwmgr = handle;
cac9a199 562
ba8ab90e 563 if (!hwmgr || !hwmgr->pm_en)
685fae24 564 return -EOPNOTSUPP;
cac9a199 565
685fae24
EQ
566 if (hwmgr->hwmgr_func->get_fan_speed_pwm == NULL)
567 return -EOPNOTSUPP;
568
569 if (!speed)
570 return -EINVAL;
cac9a199 571
a746c77e 572 return hwmgr->hwmgr_func->get_fan_speed_pwm(hwmgr, speed);
cac9a199
RZ
573}
574
72a16a9d
GI
575static int pp_dpm_get_fan_speed_rpm(void *handle, uint32_t *rpm)
576{
b905090d 577 struct pp_hwmgr *hwmgr = handle;
72a16a9d 578
ba8ab90e 579 if (!hwmgr || !hwmgr->pm_en)
685fae24 580 return -EOPNOTSUPP;
72a16a9d 581
72a16a9d 582 if (hwmgr->hwmgr_func->get_fan_speed_rpm == NULL)
685fae24
EQ
583 return -EOPNOTSUPP;
584
585 if (!rpm)
72a16a9d
GI
586 return -EINVAL;
587
a746c77e 588 return hwmgr->hwmgr_func->get_fan_speed_rpm(hwmgr, rpm);
72a16a9d
GI
589}
590
c2870527
RZ
591static int pp_dpm_set_fan_speed_rpm(void *handle, uint32_t rpm)
592{
593 struct pp_hwmgr *hwmgr = handle;
c2870527
RZ
594
595 if (!hwmgr || !hwmgr->pm_en)
685fae24
EQ
596 return -EOPNOTSUPP;
597
598 if (hwmgr->hwmgr_func->set_fan_speed_rpm == NULL)
599 return -EOPNOTSUPP;
600
601 if (rpm == U32_MAX)
c2870527
RZ
602 return -EINVAL;
603
a746c77e 604 return hwmgr->hwmgr_func->set_fan_speed_rpm(hwmgr, rpm);
c2870527
RZ
605}
606
f3898ea1
EH
607static int pp_dpm_get_pp_num_states(void *handle,
608 struct pp_states_info *data)
609{
b905090d 610 struct pp_hwmgr *hwmgr = handle;
f3898ea1
EH
611 int i;
612
4dbda35f
EQ
613 memset(data, 0, sizeof(*data));
614
37d67a7a 615 if (!hwmgr || !hwmgr->pm_en || !hwmgr->ps)
f3898ea1
EH
616 return -EINVAL;
617
618 data->nums = hwmgr->num_ps;
619
620 for (i = 0; i < hwmgr->num_ps; i++) {
621 struct pp_power_state *state = (struct pp_power_state *)
622 ((unsigned long)hwmgr->ps + i * hwmgr->ps_size);
623 switch (state->classification.ui_label) {
624 case PP_StateUILabel_Battery:
625 data->states[i] = POWER_STATE_TYPE_BATTERY;
626 break;
627 case PP_StateUILabel_Balanced:
628 data->states[i] = POWER_STATE_TYPE_BALANCED;
629 break;
630 case PP_StateUILabel_Performance:
631 data->states[i] = POWER_STATE_TYPE_PERFORMANCE;
632 break;
633 default:
634 if (state->classification.flags & PP_StateClassificationFlag_Boot)
635 data->states[i] = POWER_STATE_TYPE_INTERNAL_BOOT;
636 else
637 data->states[i] = POWER_STATE_TYPE_DEFAULT;
638 }
639 }
f3898ea1
EH
640 return 0;
641}
642
643static int pp_dpm_get_pp_table(void *handle, char **table)
644{
b905090d 645 struct pp_hwmgr *hwmgr = handle;
f3898ea1 646
37d67a7a 647 if (!hwmgr || !hwmgr->pm_en || !hwmgr->soft_pp_table)
4dcf9e6f
EH
648 return -EINVAL;
649
650 *table = (char *)hwmgr->soft_pp_table;
a746c77e 651 return hwmgr->soft_pp_table_size;
f3898ea1
EH
652}
653
f685d714
RZ
654static int amd_powerplay_reset(void *handle)
655{
b905090d 656 struct pp_hwmgr *hwmgr = handle;
f685d714
RZ
657 int ret;
658
46b27ee9 659 ret = hwmgr_hw_fini(hwmgr);
f685d714
RZ
660 if (ret)
661 return ret;
662
b905090d 663 ret = hwmgr_hw_init(hwmgr);
f685d714
RZ
664 if (ret)
665 return ret;
666
b905090d 667 return hwmgr_handle_task(hwmgr, AMD_PP_TASK_COMPLETE_INIT, NULL);
f685d714
RZ
668}
669
f3898ea1
EH
670static int pp_dpm_set_pp_table(void *handle, const char *buf, size_t size)
671{
b905090d 672 struct pp_hwmgr *hwmgr = handle;
b61e54cb 673 int ret = -ENOMEM;
f3898ea1 674
ba8ab90e
RZ
675 if (!hwmgr || !hwmgr->pm_en)
676 return -EINVAL;
f3898ea1 677
4dcf9e6f 678 if (!hwmgr->hardcode_pp_table) {
efdf7a93
EC
679 hwmgr->hardcode_pp_table = kmemdup(hwmgr->soft_pp_table,
680 hwmgr->soft_pp_table_size,
681 GFP_KERNEL);
b61e54cb 682 if (!hwmgr->hardcode_pp_table)
a746c77e 683 return ret;
7383bcb9 684 }
f3898ea1 685
4dcf9e6f
EH
686 memcpy(hwmgr->hardcode_pp_table, buf, size);
687
688 hwmgr->soft_pp_table = hwmgr->hardcode_pp_table;
689
dd4bdf3b
EH
690 ret = amd_powerplay_reset(handle);
691 if (ret)
a746c77e 692 return ret;
dd4bdf3b 693
a746c77e 694 if (hwmgr->hwmgr_func->avfs_control)
dd4bdf3b 695 ret = hwmgr->hwmgr_func->avfs_control(hwmgr, false);
a746c77e 696
b61e54cb 697 return ret;
f3898ea1
EH
698}
699
700static int pp_dpm_force_clock_level(void *handle,
5632708f 701 enum pp_clock_type type, uint32_t mask)
f3898ea1 702{
b905090d 703 struct pp_hwmgr *hwmgr = handle;
f3898ea1 704
ba8ab90e
RZ
705 if (!hwmgr || !hwmgr->pm_en)
706 return -EINVAL;
f3898ea1 707
7383bcb9 708 if (hwmgr->hwmgr_func->force_clock_level == NULL) {
527aa2a0 709 pr_info_ratelimited("%s was not implemented.\n", __func__);
7383bcb9
RZ
710 return 0;
711 }
241dbbb1
EQ
712
713 if (hwmgr->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL) {
9ed9203c 714 pr_debug("force clock level is for dpm manual mode only.\n");
241dbbb1
EQ
715 return -EINVAL;
716 }
717
a746c77e 718 return hwmgr->hwmgr_func->force_clock_level(hwmgr, type, mask);
f3898ea1
EH
719}
720
5d8539d2
DP
721static int pp_dpm_emit_clock_levels(void *handle,
722 enum pp_clock_type type,
723 char *buf,
724 int *offset)
725{
726 struct pp_hwmgr *hwmgr = handle;
727
728 if (!hwmgr || !hwmgr->pm_en)
729 return -EOPNOTSUPP;
730
731 if (!hwmgr->hwmgr_func->emit_clock_levels)
732 return -ENOENT;
733
734 return hwmgr->hwmgr_func->emit_clock_levels(hwmgr, type, buf, offset);
735}
736
f3898ea1
EH
737static int pp_dpm_print_clock_levels(void *handle,
738 enum pp_clock_type type, char *buf)
739{
b905090d 740 struct pp_hwmgr *hwmgr = handle;
f3898ea1 741
ba8ab90e
RZ
742 if (!hwmgr || !hwmgr->pm_en)
743 return -EINVAL;
f3898ea1 744
7383bcb9 745 if (hwmgr->hwmgr_func->print_clock_levels == NULL) {
527aa2a0 746 pr_info_ratelimited("%s was not implemented.\n", __func__);
7383bcb9
RZ
747 return 0;
748 }
a746c77e 749 return hwmgr->hwmgr_func->print_clock_levels(hwmgr, type, buf);
f3898ea1
EH
750}
751
428bafa8
EH
752static int pp_dpm_get_sclk_od(void *handle)
753{
b905090d 754 struct pp_hwmgr *hwmgr = handle;
428bafa8 755
ba8ab90e
RZ
756 if (!hwmgr || !hwmgr->pm_en)
757 return -EINVAL;
428bafa8 758
428bafa8 759 if (hwmgr->hwmgr_func->get_sclk_od == NULL) {
527aa2a0 760 pr_info_ratelimited("%s was not implemented.\n", __func__);
428bafa8
EH
761 return 0;
762 }
a746c77e 763 return hwmgr->hwmgr_func->get_sclk_od(hwmgr);
428bafa8
EH
764}
765
766static int pp_dpm_set_sclk_od(void *handle, uint32_t value)
767{
b905090d 768 struct pp_hwmgr *hwmgr = handle;
428bafa8 769
ba8ab90e
RZ
770 if (!hwmgr || !hwmgr->pm_en)
771 return -EINVAL;
428bafa8 772
428bafa8 773 if (hwmgr->hwmgr_func->set_sclk_od == NULL) {
527aa2a0 774 pr_info_ratelimited("%s was not implemented.\n", __func__);
428bafa8
EH
775 return 0;
776 }
777
a746c77e 778 return hwmgr->hwmgr_func->set_sclk_od(hwmgr, value);
428bafa8
EH
779}
780
f2bdc05f
EH
781static int pp_dpm_get_mclk_od(void *handle)
782{
b905090d 783 struct pp_hwmgr *hwmgr = handle;
f2bdc05f 784
ba8ab90e
RZ
785 if (!hwmgr || !hwmgr->pm_en)
786 return -EINVAL;
f2bdc05f 787
f2bdc05f 788 if (hwmgr->hwmgr_func->get_mclk_od == NULL) {
527aa2a0 789 pr_info_ratelimited("%s was not implemented.\n", __func__);
f2bdc05f
EH
790 return 0;
791 }
a746c77e 792 return hwmgr->hwmgr_func->get_mclk_od(hwmgr);
f2bdc05f
EH
793}
794
795static int pp_dpm_set_mclk_od(void *handle, uint32_t value)
796{
b905090d 797 struct pp_hwmgr *hwmgr = handle;
f2bdc05f 798
ba8ab90e
RZ
799 if (!hwmgr || !hwmgr->pm_en)
800 return -EINVAL;
f2bdc05f 801
f2bdc05f 802 if (hwmgr->hwmgr_func->set_mclk_od == NULL) {
527aa2a0 803 pr_info_ratelimited("%s was not implemented.\n", __func__);
f2bdc05f
EH
804 return 0;
805 }
a746c77e 806 return hwmgr->hwmgr_func->set_mclk_od(hwmgr, value);
f2bdc05f
EH
807}
808
9f8df7d7
TSD
809static int pp_dpm_read_sensor(void *handle, int idx,
810 void *value, int *size)
a6e36952 811{
b905090d 812 struct pp_hwmgr *hwmgr = handle;
a6e36952 813
ba8ab90e 814 if (!hwmgr || !hwmgr->pm_en || !value)
5ed8d656
RZ
815 return -EINVAL;
816
5ed8d656
RZ
817 switch (idx) {
818 case AMDGPU_PP_SENSOR_STABLE_PSTATE_SCLK:
b1a9557a 819 *((uint32_t *)value) = hwmgr->pstate_sclk * 100;
5ed8d656
RZ
820 return 0;
821 case AMDGPU_PP_SENSOR_STABLE_PSTATE_MCLK:
b1a9557a
EQ
822 *((uint32_t *)value) = hwmgr->pstate_mclk * 100;
823 return 0;
824 case AMDGPU_PP_SENSOR_PEAK_PSTATE_SCLK:
825 *((uint32_t *)value) = hwmgr->pstate_sclk_peak * 100;
826 return 0;
827 case AMDGPU_PP_SENSOR_PEAK_PSTATE_MCLK:
828 *((uint32_t *)value) = hwmgr->pstate_mclk_peak * 100;
a6e36952 829 return 0;
d5f48037
RZ
830 case AMDGPU_PP_SENSOR_MIN_FAN_RPM:
831 *((uint32_t *)value) = hwmgr->thermal_controller.fanInfo.ulMinRPM;
832 return 0;
833 case AMDGPU_PP_SENSOR_MAX_FAN_RPM:
834 *((uint32_t *)value) = hwmgr->thermal_controller.fanInfo.ulMaxRPM;
835 return 0;
5ed8d656 836 default:
a746c77e 837 return hwmgr->hwmgr_func->read_sensor(hwmgr, idx, value, size);
a6e36952 838 }
a6e36952
TSD
839}
840
597be302
AD
841static struct amd_vce_state*
842pp_dpm_get_vce_clock_state(void *handle, unsigned idx)
843{
b905090d 844 struct pp_hwmgr *hwmgr = handle;
597be302 845
ba8ab90e 846 if (!hwmgr || !hwmgr->pm_en)
1c863802
RZ
847 return NULL;
848
ba8ab90e 849 if (idx < hwmgr->num_vce_state_tables)
1c863802 850 return &hwmgr->vce_states[idx];
597be302
AD
851 return NULL;
852}
853
6390258a
RZ
854static int pp_get_power_profile_mode(void *handle, char *buf)
855{
b905090d 856 struct pp_hwmgr *hwmgr = handle;
6390258a 857
a7505591 858 if (!hwmgr || !hwmgr->pm_en || !hwmgr->hwmgr_func->get_power_profile_mode)
a035be8a
ML
859 return -EOPNOTSUPP;
860 if (!buf)
6390258a
RZ
861 return -EINVAL;
862
a746c77e 863 return hwmgr->hwmgr_func->get_power_profile_mode(hwmgr, buf);
6390258a
RZ
864}
865
866static int pp_set_power_profile_mode(void *handle, long *input, uint32_t size)
867{
b905090d 868 struct pp_hwmgr *hwmgr = handle;
6390258a 869
a7505591 870 if (!hwmgr || !hwmgr->pm_en || !hwmgr->hwmgr_func->set_power_profile_mode)
a746c77e 871 return -EOPNOTSUPP;
6390258a 872
7a862028 873 if (hwmgr->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL) {
9ed9203c 874 pr_debug("power profile setting is for manual dpm mode only.\n");
a035be8a 875 return -EINVAL;
7a862028
EQ
876 }
877
a746c77e 878 return hwmgr->hwmgr_func->set_power_profile_mode(hwmgr, input, size);
6390258a
RZ
879}
880
12a6727d
XD
881static int pp_set_fine_grain_clk_vol(void *handle, uint32_t type, long *input, uint32_t size)
882{
883 struct pp_hwmgr *hwmgr = handle;
884
885 if (!hwmgr || !hwmgr->pm_en)
886 return -EINVAL;
887
888 if (hwmgr->hwmgr_func->set_fine_grain_clk_vol == NULL)
889 return 0;
890
891 return hwmgr->hwmgr_func->set_fine_grain_clk_vol(hwmgr, type, input, size);
892}
893
e4d0ef75
NC
894static int pp_odn_edit_dpm_table(void *handle, enum PP_OD_DPM_TABLE_COMMAND type,
895 long *input, uint32_t size)
e3933f26 896{
b905090d 897 struct pp_hwmgr *hwmgr = handle;
e3933f26 898
ba8ab90e 899 if (!hwmgr || !hwmgr->pm_en)
e3933f26
RZ
900 return -EINVAL;
901
e3933f26 902 if (hwmgr->hwmgr_func->odn_edit_dpm_table == NULL) {
527aa2a0 903 pr_info_ratelimited("%s was not implemented.\n", __func__);
12a6727d 904 return 0;
e3933f26
RZ
905 }
906
907 return hwmgr->hwmgr_func->odn_edit_dpm_table(hwmgr, type, input, size);
908}
909
a2c28e34
AD
910static int pp_dpm_set_mp1_state(void *handle, enum pp_mp1_state mp1_state)
911{
912 struct pp_hwmgr *hwmgr = handle;
913
29a45960 914 if (!hwmgr)
a2c28e34
AD
915 return -EINVAL;
916
29a45960
EQ
917 if (!hwmgr->pm_en)
918 return 0;
919
479baeac
AD
920 if (hwmgr->hwmgr_func->set_mp1_state)
921 return hwmgr->hwmgr_func->set_mp1_state(hwmgr, mp1_state);
a2c28e34 922
479baeac 923 return 0;
a2c28e34
AD
924}
925
34bb2734 926static int pp_dpm_switch_power_profile(void *handle,
052fe96d 927 enum PP_SMC_POWER_PROFILE type, bool en)
34bb2734 928{
b905090d 929 struct pp_hwmgr *hwmgr = handle;
052fe96d
RZ
930 long workload;
931 uint32_t index;
34bb2734 932
ba8ab90e 933 if (!hwmgr || !hwmgr->pm_en)
34bb2734
EH
934 return -EINVAL;
935
052fe96d 936 if (hwmgr->hwmgr_func->set_power_profile_mode == NULL) {
527aa2a0 937 pr_info_ratelimited("%s was not implemented.\n", __func__);
052fe96d
RZ
938 return -EINVAL;
939 }
940
941 if (!(type < PP_SMC_POWER_PROFILE_CUSTOM))
942 return -EINVAL;
943
052fe96d
RZ
944 if (!en) {
945 hwmgr->workload_mask &= ~(1 << hwmgr->workload_prority[type]);
946 index = fls(hwmgr->workload_mask);
947 index = index > 0 && index <= Workload_Policy_Max ? index - 1 : 0;
948 workload = hwmgr->workload_setting[index];
949 } else {
950 hwmgr->workload_mask |= (1 << hwmgr->workload_prority[type]);
951 index = fls(hwmgr->workload_mask);
952 index = index <= Workload_Policy_Max ? index - 1 : 0;
953 workload = hwmgr->workload_setting[index];
34bb2734
EH
954 }
955
558491dd
KF
956 if (type == PP_SMC_POWER_PROFILE_COMPUTE &&
957 hwmgr->hwmgr_func->disable_power_features_for_compute_performance) {
a746c77e 958 if (hwmgr->hwmgr_func->disable_power_features_for_compute_performance(hwmgr, en))
558491dd 959 return -EINVAL;
558491dd
KF
960 }
961
052fe96d
RZ
962 if (hwmgr->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL)
963 hwmgr->hwmgr_func->set_power_profile_mode(hwmgr, &workload, 0);
052fe96d 964
34bb2734
EH
965 return 0;
966}
967
6ab8555e
RZ
968static int pp_set_power_limit(void *handle, uint32_t limit)
969{
b905090d 970 struct pp_hwmgr *hwmgr = handle;
f7becf9a 971 uint32_t max_power_limit;
6ab8555e 972
ba8ab90e
RZ
973 if (!hwmgr || !hwmgr->pm_en)
974 return -EINVAL;
6ab8555e 975
6ab8555e 976 if (hwmgr->hwmgr_func->set_power_limit == NULL) {
527aa2a0 977 pr_info_ratelimited("%s was not implemented.\n", __func__);
6ab8555e
RZ
978 return -EINVAL;
979 }
980
981 if (limit == 0)
982 limit = hwmgr->default_power_limit;
983
f7becf9a
JG
984 max_power_limit = hwmgr->default_power_limit;
985 if (hwmgr->od_enabled) {
986 max_power_limit *= (100 + hwmgr->platform_descriptor.TDPODLimit);
987 max_power_limit /= 100;
988 }
989
990 if (limit > max_power_limit)
6ab8555e
RZ
991 return -EINVAL;
992
6ab8555e
RZ
993 hwmgr->hwmgr_func->set_power_limit(hwmgr, limit);
994 hwmgr->power_limit = limit;
ba8ab90e 995 return 0;
6ab8555e
RZ
996}
997
6e58941c 998static int pp_get_power_limit(void *handle, uint32_t *limit,
04bec521
DP
999 enum pp_power_limit_level pp_limit_level,
1000 enum pp_power_type power_type)
6ab8555e 1001{
b905090d 1002 struct pp_hwmgr *hwmgr = handle;
04bec521 1003 int ret = 0;
6ab8555e 1004
37d67a7a 1005 if (!hwmgr || !hwmgr->pm_en || !limit)
6ab8555e
RZ
1006 return -EINVAL;
1007
04bec521
DP
1008 if (power_type != PP_PWR_TYPE_SUSTAINED)
1009 return -EOPNOTSUPP;
1010
04bec521
DP
1011 switch (pp_limit_level) {
1012 case PP_PWR_LIMIT_CURRENT:
1013 *limit = hwmgr->power_limit;
1014 break;
1015 case PP_PWR_LIMIT_DEFAULT:
1016 *limit = hwmgr->default_power_limit;
1017 break;
1018 case PP_PWR_LIMIT_MAX:
1019 *limit = hwmgr->default_power_limit;
6e58941c 1020 if (hwmgr->od_enabled) {
04bec521
DP
1021 *limit *= (100 + hwmgr->platform_descriptor.TDPODLimit);
1022 *limit /= 100;
6e58941c 1023 }
04bec521 1024 break;
42ef3137
MJ
1025 case PP_PWR_LIMIT_MIN:
1026 *limit = 0;
1027 break;
04bec521
DP
1028 default:
1029 ret = -EOPNOTSUPP;
1030 break;
f7becf9a 1031 }
6ab8555e 1032
04bec521 1033 return ret;
6ab8555e
RZ
1034}
1035
f685d714 1036static int pp_display_configuration_change(void *handle,
155f1127 1037 const struct amd_pp_display_configuration *display_config)
7fb72a1f 1038{
b905090d 1039 struct pp_hwmgr *hwmgr = handle;
7fb72a1f 1040
ba8ab90e
RZ
1041 if (!hwmgr || !hwmgr->pm_en)
1042 return -EINVAL;
7fb72a1f
RZ
1043
1044 phm_store_dal_configuration_data(hwmgr, display_config);
1045 return 0;
1046}
c4dd206b 1047
f685d714 1048static int pp_get_display_power_level(void *handle,
47329134 1049 struct amd_pp_simple_clock_info *output)
c4dd206b 1050{
b905090d 1051 struct pp_hwmgr *hwmgr = handle;
c4dd206b 1052
37d67a7a 1053 if (!hwmgr || !hwmgr->pm_en || !output)
1c863802 1054 return -EINVAL;
ba5f884c 1055
a746c77e 1056 return phm_get_dal_power_level(hwmgr, output);
c4dd206b 1057}
e273b041 1058
f685d714 1059static int pp_get_current_clocks(void *handle,
155f1127 1060 struct amd_pp_clock_info *clocks)
e273b041 1061{
97e8f102 1062 struct amd_pp_simple_clock_info simple_clocks = { 0 };
e273b041 1063 struct pp_clock_info hw_clocks;
b905090d 1064 struct pp_hwmgr *hwmgr = handle;
1c863802 1065 int ret = 0;
e273b041 1066
ba8ab90e
RZ
1067 if (!hwmgr || !hwmgr->pm_en)
1068 return -EINVAL;
e273b041
RZ
1069
1070 phm_get_dal_power_level(hwmgr, &simple_clocks);
1071
2a507105
RZ
1072 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
1073 PHM_PlatformCaps_PowerContainment))
1074 ret = phm_get_clock_info(hwmgr, &hwmgr->current_ps->hardware,
1075 &hw_clocks, PHM_PerformanceLevelDesignation_PowerContainment);
1076 else
1077 ret = phm_get_clock_info(hwmgr, &hwmgr->current_ps->hardware,
1078 &hw_clocks, PHM_PerformanceLevelDesignation_Activity);
1079
ae97988f 1080 if (ret) {
9ed9203c 1081 pr_debug("Error in phm_get_clock_info \n");
2a507105 1082 return -EINVAL;
e273b041
RZ
1083 }
1084
1085 clocks->min_engine_clock = hw_clocks.min_eng_clk;
1086 clocks->max_engine_clock = hw_clocks.max_eng_clk;
1087 clocks->min_memory_clock = hw_clocks.min_mem_clk;
1088 clocks->max_memory_clock = hw_clocks.max_mem_clk;
1089 clocks->min_bus_bandwidth = hw_clocks.min_bus_bandwidth;
1090 clocks->max_bus_bandwidth = hw_clocks.max_bus_bandwidth;
1091
1092 clocks->max_engine_clock_in_sr = hw_clocks.max_eng_clk;
1093 clocks->min_engine_clock_in_sr = hw_clocks.min_eng_clk;
1094
97e8f102
RZ
1095 if (simple_clocks.level == 0)
1096 clocks->max_clocks_state = PP_DAL_POWERLEVEL_7;
1097 else
1098 clocks->max_clocks_state = simple_clocks.level;
e273b041
RZ
1099
1100 if (0 == phm_get_current_shallow_sleep_clocks(hwmgr, &hwmgr->current_ps->hardware, &hw_clocks)) {
1101 clocks->max_engine_clock_in_sr = hw_clocks.max_eng_clk;
1102 clocks->min_engine_clock_in_sr = hw_clocks.min_eng_clk;
1103 }
e273b041 1104 return 0;
e273b041
RZ
1105}
1106
f685d714 1107static int pp_get_clock_by_type(void *handle, enum amd_pp_clock_type type, struct amd_pp_clocks *clocks)
e273b041 1108{
b905090d 1109 struct pp_hwmgr *hwmgr = handle;
e273b041 1110
ba8ab90e
RZ
1111 if (!hwmgr || !hwmgr->pm_en)
1112 return -EINVAL;
1c863802 1113
fa9e6991 1114 if (clocks == NULL)
e273b041
RZ
1115 return -EINVAL;
1116
a746c77e 1117 return phm_get_clock_by_type(hwmgr, type, clocks);
e273b041
RZ
1118}
1119
f685d714 1120static int pp_get_clock_by_type_with_latency(void *handle,
d0187727
EH
1121 enum amd_pp_clock_type type,
1122 struct pp_clock_levels_with_latency *clocks)
1123{
b905090d 1124 struct pp_hwmgr *hwmgr = handle;
d0187727 1125
37d67a7a 1126 if (!hwmgr || !hwmgr->pm_en || !clocks)
d0187727
EH
1127 return -EINVAL;
1128
a746c77e 1129 return phm_get_clock_by_type_with_latency(hwmgr, type, clocks);
d0187727
EH
1130}
1131
f685d714 1132static int pp_get_clock_by_type_with_voltage(void *handle,
d0187727
EH
1133 enum amd_pp_clock_type type,
1134 struct pp_clock_levels_with_voltage *clocks)
1135{
b905090d 1136 struct pp_hwmgr *hwmgr = handle;
d0187727 1137
37d67a7a 1138 if (!hwmgr || !hwmgr->pm_en || !clocks)
d0187727
EH
1139 return -EINVAL;
1140
a746c77e 1141 return phm_get_clock_by_type_with_voltage(hwmgr, type, clocks);
d0187727
EH
1142}
1143
f685d714 1144static int pp_set_watermarks_for_clocks_ranges(void *handle,
99c5e27d 1145 void *clock_ranges)
d0187727 1146{
b905090d 1147 struct pp_hwmgr *hwmgr = handle;
d0187727 1148
99c5e27d 1149 if (!hwmgr || !hwmgr->pm_en || !clock_ranges)
d0187727
EH
1150 return -EINVAL;
1151
a746c77e
EQ
1152 return phm_set_watermarks_for_clocks_ranges(hwmgr,
1153 clock_ranges);
d0187727
EH
1154}
1155
f685d714 1156static int pp_display_clock_voltage_request(void *handle,
d0187727
EH
1157 struct pp_display_clock_request *clock)
1158{
b905090d 1159 struct pp_hwmgr *hwmgr = handle;
d0187727 1160
37d67a7a 1161 if (!hwmgr || !hwmgr->pm_en || !clock)
d0187727
EH
1162 return -EINVAL;
1163
a746c77e 1164 return phm_display_clock_voltage_request(hwmgr, clock);
d0187727
EH
1165}
1166
f685d714 1167static int pp_get_display_mode_validation_clocks(void *handle,
155f1127 1168 struct amd_pp_simple_clock_info *clocks)
e273b041 1169{
b905090d 1170 struct pp_hwmgr *hwmgr = handle;
1c863802 1171 int ret = 0;
e273b041 1172
37d67a7a 1173 if (!hwmgr || !hwmgr->pm_en || !clocks)
1c863802 1174 return -EINVAL;
ba5f884c 1175
97e8f102
RZ
1176 clocks->level = PP_DAL_POWERLEVEL_7;
1177
e273b041 1178 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DynamicPatchPowerState))
1c863802 1179 ret = phm_get_max_high_clocks(hwmgr, clocks);
e273b041 1180
1c863802 1181 return ret;
e273b041
RZ
1182}
1183
a8da8ff3 1184static int pp_dpm_powergate_mmhub(void *handle)
72d76191 1185{
b905090d 1186 struct pp_hwmgr *hwmgr = handle;
72d76191 1187
ba8ab90e
RZ
1188 if (!hwmgr || !hwmgr->pm_en)
1189 return -EINVAL;
72d76191 1190
a8da8ff3 1191 if (hwmgr->hwmgr_func->powergate_mmhub == NULL) {
527aa2a0 1192 pr_info_ratelimited("%s was not implemented.\n", __func__);
72d76191
EH
1193 return 0;
1194 }
1195
a8da8ff3 1196 return hwmgr->hwmgr_func->powergate_mmhub(hwmgr);
72d76191
EH
1197}
1198
85f80cb3
RZ
1199static int pp_dpm_powergate_gfx(void *handle, bool gate)
1200{
1201 struct pp_hwmgr *hwmgr = handle;
1202
1203 if (!hwmgr || !hwmgr->pm_en)
1204 return 0;
1205
1206 if (hwmgr->hwmgr_func->powergate_gfx == NULL) {
527aa2a0 1207 pr_info_ratelimited("%s was not implemented.\n", __func__);
85f80cb3
RZ
1208 return 0;
1209 }
1210
1211 return hwmgr->hwmgr_func->powergate_gfx(hwmgr, gate);
1212}
1213
982976d9
RZ
1214static void pp_dpm_powergate_acp(void *handle, bool gate)
1215{
1216 struct pp_hwmgr *hwmgr = handle;
1217
1218 if (!hwmgr || !hwmgr->pm_en)
1219 return;
1220
1221 if (hwmgr->hwmgr_func->powergate_acp == NULL) {
527aa2a0 1222 pr_info_ratelimited("%s was not implemented.\n", __func__);
982976d9
RZ
1223 return;
1224 }
1225
1226 hwmgr->hwmgr_func->powergate_acp(hwmgr, gate);
1227}
1228
40bea02f
RZ
1229static void pp_dpm_powergate_sdma(void *handle, bool gate)
1230{
1231 struct pp_hwmgr *hwmgr = handle;
1232
1233 if (!hwmgr)
1234 return;
1235
1236 if (hwmgr->hwmgr_func->powergate_sdma == NULL) {
527aa2a0 1237 pr_info_ratelimited("%s was not implemented.\n", __func__);
40bea02f
RZ
1238 return;
1239 }
1240
1241 hwmgr->hwmgr_func->powergate_sdma(hwmgr, gate);
1242}
1243
b92c6287
RZ
1244static int pp_set_powergating_by_smu(void *handle,
1245 uint32_t block_type, bool gate)
1246{
1247 int ret = 0;
1248
1249 switch (block_type) {
1250 case AMD_IP_BLOCK_TYPE_UVD:
1251 case AMD_IP_BLOCK_TYPE_VCN:
1252 pp_dpm_powergate_uvd(handle, gate);
1253 break;
1254 case AMD_IP_BLOCK_TYPE_VCE:
1255 pp_dpm_powergate_vce(handle, gate);
1256 break;
1257 case AMD_IP_BLOCK_TYPE_GMC:
17252701
EQ
1258 /*
1259 * For now, this is only used on PICASSO.
1260 * And only "gate" operation is supported.
1261 */
1262 if (gate)
1263 pp_dpm_powergate_mmhub(handle);
b92c6287
RZ
1264 break;
1265 case AMD_IP_BLOCK_TYPE_GFX:
85f80cb3 1266 ret = pp_dpm_powergate_gfx(handle, gate);
b92c6287 1267 break;
982976d9
RZ
1268 case AMD_IP_BLOCK_TYPE_ACP:
1269 pp_dpm_powergate_acp(handle, gate);
1270 break;
40bea02f
RZ
1271 case AMD_IP_BLOCK_TYPE_SDMA:
1272 pp_dpm_powergate_sdma(handle, gate);
1273 break;
b92c6287
RZ
1274 default:
1275 break;
1276 }
1277 return ret;
1278}
1279
ea870e44
RZ
1280static int pp_notify_smu_enable_pwe(void *handle)
1281{
1282 struct pp_hwmgr *hwmgr = handle;
1283
1284 if (!hwmgr || !hwmgr->pm_en)
0d7f824b 1285 return -EINVAL;
ea870e44
RZ
1286
1287 if (hwmgr->hwmgr_func->smus_notify_pwe == NULL) {
527aa2a0 1288 pr_info_ratelimited("%s was not implemented.\n", __func__);
2a782140 1289 return -EINVAL;
ea870e44
RZ
1290 }
1291
ea870e44 1292 hwmgr->hwmgr_func->smus_notify_pwe(hwmgr);
ea870e44
RZ
1293
1294 return 0;
1295}
1296
b55c9e7a
EQ
1297static int pp_enable_mgpu_fan_boost(void *handle)
1298{
1299 struct pp_hwmgr *hwmgr = handle;
1300
5be3bb6e 1301 if (!hwmgr)
b55c9e7a
EQ
1302 return -EINVAL;
1303
5be3bb6e
EQ
1304 if (!hwmgr->pm_en ||
1305 hwmgr->hwmgr_func->enable_mgpu_fan_boost == NULL)
b55c9e7a 1306 return 0;
b55c9e7a 1307
b55c9e7a 1308 hwmgr->hwmgr_func->enable_mgpu_fan_boost(hwmgr);
b55c9e7a
EQ
1309
1310 return 0;
1311}
1312
9ed9203c 1313static int pp_set_min_deep_sleep_dcefclk(void *handle, uint32_t clock)
1314{
1315 struct pp_hwmgr *hwmgr = handle;
1316
1317 if (!hwmgr || !hwmgr->pm_en)
1318 return -EINVAL;
1319
1320 if (hwmgr->hwmgr_func->set_min_deep_sleep_dcefclk == NULL) {
1321 pr_debug("%s was not implemented.\n", __func__);
2a782140 1322 return -EINVAL;
9ed9203c 1323 }
1324
9ed9203c 1325 hwmgr->hwmgr_func->set_min_deep_sleep_dcefclk(hwmgr, clock);
9ed9203c 1326
1327 return 0;
1328}
1329
1330static int pp_set_hard_min_dcefclk_by_freq(void *handle, uint32_t clock)
1331{
1332 struct pp_hwmgr *hwmgr = handle;
1333
1334 if (!hwmgr || !hwmgr->pm_en)
1335 return -EINVAL;
1336
1337 if (hwmgr->hwmgr_func->set_hard_min_dcefclk_by_freq == NULL) {
1338 pr_debug("%s was not implemented.\n", __func__);
2a782140 1339 return -EINVAL;
9ed9203c 1340 }
1341
9ed9203c 1342 hwmgr->hwmgr_func->set_hard_min_dcefclk_by_freq(hwmgr, clock);
9ed9203c 1343
1344 return 0;
1345}
1346
1347static int pp_set_hard_min_fclk_by_freq(void *handle, uint32_t clock)
1348{
1349 struct pp_hwmgr *hwmgr = handle;
1350
1351 if (!hwmgr || !hwmgr->pm_en)
1352 return -EINVAL;
1353
1354 if (hwmgr->hwmgr_func->set_hard_min_fclk_by_freq == NULL) {
1355 pr_debug("%s was not implemented.\n", __func__);
2a782140 1356 return -EINVAL;
9ed9203c 1357 }
1358
9ed9203c 1359 hwmgr->hwmgr_func->set_hard_min_fclk_by_freq(hwmgr, clock);
9ed9203c 1360
1361 return 0;
1362}
1363
1364static int pp_set_active_display_count(void *handle, uint32_t count)
1365{
1366 struct pp_hwmgr *hwmgr = handle;
9ed9203c 1367
1368 if (!hwmgr || !hwmgr->pm_en)
1369 return -EINVAL;
1370
a746c77e 1371 return phm_set_active_display_count(hwmgr, count);
9ed9203c 1372}
1373
fbbcb3f2 1374static bool pp_get_asic_baco_capability(void *handle)
425db255
JQ
1375{
1376 struct pp_hwmgr *hwmgr = handle;
1377
1378 if (!hwmgr)
fbbcb3f2 1379 return false;
425db255 1380
c7833d33 1381 if (!(hwmgr->not_vf && amdgpu_dpm) ||
1b199594 1382 !hwmgr->hwmgr_func->get_bamaco_support)
fbbcb3f2 1383 return false;
425db255 1384
1b199594 1385 return hwmgr->hwmgr_func->get_bamaco_support(hwmgr);
425db255
JQ
1386}
1387
1388static int pp_get_asic_baco_state(void *handle, int *state)
1389{
1390 struct pp_hwmgr *hwmgr = handle;
1391
1392 if (!hwmgr)
1393 return -EINVAL;
1394
a1cd1289 1395 if (!hwmgr->pm_en || !hwmgr->hwmgr_func->get_asic_baco_state)
425db255
JQ
1396 return 0;
1397
425db255 1398 hwmgr->hwmgr_func->get_asic_baco_state(hwmgr, (enum BACO_STATE *)state);
425db255
JQ
1399
1400 return 0;
1401}
1402
1403static int pp_set_asic_baco_state(void *handle, int state)
1404{
1405 struct pp_hwmgr *hwmgr = handle;
1406
1407 if (!hwmgr)
1408 return -EINVAL;
1409
c7833d33
TZ
1410 if (!(hwmgr->not_vf && amdgpu_dpm) ||
1411 !hwmgr->hwmgr_func->set_asic_baco_state)
425db255
JQ
1412 return 0;
1413
425db255 1414 hwmgr->hwmgr_func->set_asic_baco_state(hwmgr, (enum BACO_STATE)state);
425db255
JQ
1415
1416 return 0;
1417}
1418
7ca881a8
EQ
1419static int pp_get_ppfeature_status(void *handle, char *buf)
1420{
1421 struct pp_hwmgr *hwmgr = handle;
7ca881a8
EQ
1422
1423 if (!hwmgr || !hwmgr->pm_en || !buf)
1424 return -EINVAL;
1425
1426 if (hwmgr->hwmgr_func->get_ppfeature_status == NULL) {
1427 pr_info_ratelimited("%s was not implemented.\n", __func__);
1428 return -EINVAL;
1429 }
1430
a746c77e 1431 return hwmgr->hwmgr_func->get_ppfeature_status(hwmgr, buf);
7ca881a8
EQ
1432}
1433
1434static int pp_set_ppfeature_status(void *handle, uint64_t ppfeature_masks)
1435{
1436 struct pp_hwmgr *hwmgr = handle;
7ca881a8
EQ
1437
1438 if (!hwmgr || !hwmgr->pm_en)
1439 return -EINVAL;
1440
1441 if (hwmgr->hwmgr_func->set_ppfeature_status == NULL) {
1442 pr_info_ratelimited("%s was not implemented.\n", __func__);
1443 return -EINVAL;
1444 }
1445
a746c77e 1446 return hwmgr->hwmgr_func->set_ppfeature_status(hwmgr, ppfeature_masks);
7ca881a8
EQ
1447}
1448
e97204ea
AG
1449static int pp_asic_reset_mode_2(void *handle)
1450{
1451 struct pp_hwmgr *hwmgr = handle;
e97204ea
AG
1452
1453 if (!hwmgr || !hwmgr->pm_en)
1454 return -EINVAL;
1455
1456 if (hwmgr->hwmgr_func->asic_reset == NULL) {
1457 pr_info_ratelimited("%s was not implemented.\n", __func__);
1458 return -EINVAL;
1459 }
1460
a746c77e 1461 return hwmgr->hwmgr_func->asic_reset(hwmgr, SMU_ASIC_RESET_MODE_2);
e97204ea
AG
1462}
1463
6acaa6af
AG
1464static int pp_smu_i2c_bus_access(void *handle, bool acquire)
1465{
1466 struct pp_hwmgr *hwmgr = handle;
1467
1468 if (!hwmgr || !hwmgr->pm_en)
1469 return -EINVAL;
1470
1471 if (hwmgr->hwmgr_func->smu_i2c_bus_access == NULL) {
1472 pr_info_ratelimited("%s was not implemented.\n", __func__);
1473 return -EINVAL;
1474 }
1475
a746c77e 1476 return hwmgr->hwmgr_func->smu_i2c_bus_access(hwmgr, acquire);
6acaa6af
AG
1477}
1478
06615f9a
EQ
1479static int pp_set_df_cstate(void *handle, enum pp_df_cstate state)
1480{
1481 struct pp_hwmgr *hwmgr = handle;
1482
1483 if (!hwmgr)
1484 return -EINVAL;
1485
1486 if (!hwmgr->pm_en || !hwmgr->hwmgr_func->set_df_cstate)
1487 return 0;
1488
06615f9a 1489 hwmgr->hwmgr_func->set_df_cstate(hwmgr, state);
06615f9a
EQ
1490
1491 return 0;
1492}
1493
3e454860
EQ
1494static int pp_set_xgmi_pstate(void *handle, uint32_t pstate)
1495{
1496 struct pp_hwmgr *hwmgr = handle;
1497
1498 if (!hwmgr)
1499 return -EINVAL;
1500
1501 if (!hwmgr->pm_en || !hwmgr->hwmgr_func->set_xgmi_pstate)
1502 return 0;
1503
3e454860 1504 hwmgr->hwmgr_func->set_xgmi_pstate(hwmgr, pstate);
3e454860
EQ
1505
1506 return 0;
1507}
1508
0b01b830
EQ
1509static ssize_t pp_get_gpu_metrics(void *handle, void **table)
1510{
1511 struct pp_hwmgr *hwmgr = handle;
0b01b830
EQ
1512
1513 if (!hwmgr)
1514 return -EINVAL;
1515
1516 if (!hwmgr->pm_en || !hwmgr->hwmgr_func->get_gpu_metrics)
1517 return -EOPNOTSUPP;
1518
a746c77e 1519 return hwmgr->hwmgr_func->get_gpu_metrics(hwmgr, table);
0b01b830
EQ
1520}
1521
d90a53d6
PL
1522static int pp_gfx_state_change_set(void *handle, uint32_t state)
1523{
1524 struct pp_hwmgr *hwmgr = handle;
1525
1526 if (!hwmgr || !hwmgr->pm_en)
1527 return -EINVAL;
1528
1529 if (hwmgr->hwmgr_func->gfx_state_change == NULL) {
1530 pr_info_ratelimited("%s was not implemented.\n", __func__);
1531 return -EINVAL;
1532 }
1533
d90a53d6 1534 hwmgr->hwmgr_func->gfx_state_change(hwmgr, state);
d90a53d6
PL
1535 return 0;
1536}
1537
b8c78bdb
LL
1538static int pp_get_prv_buffer_details(void *handle, void **addr, size_t *size)
1539{
1540 struct pp_hwmgr *hwmgr = handle;
1541 struct amdgpu_device *adev = hwmgr->adev;
629c30db 1542 int err;
b8c78bdb
LL
1543
1544 if (!addr || !size)
1545 return -EINVAL;
1546
1547 *addr = NULL;
1548 *size = 0;
b8c78bdb 1549 if (adev->pm.smu_prv_buffer) {
629c30db
LZ
1550 err = amdgpu_bo_kmap(adev->pm.smu_prv_buffer, addr);
1551 if (err)
1552 return err;
b8c78bdb
LL
1553 *size = adev->pm.smu_prv_buffer_size;
1554 }
b8c78bdb
LL
1555
1556 return 0;
1557}
1558
6ddbd37f
EQ
1559static void pp_pm_compute_clocks(void *handle)
1560{
1561 struct pp_hwmgr *hwmgr = handle;
1562 struct amdgpu_device *adev = hwmgr->adev;
6ddbd37f 1563
d09ef243 1564 if (!adev->dc_enabled) {
6ddbd37f
EQ
1565 amdgpu_dpm_get_active_displays(adev);
1566 adev->pm.pm_display_cfg.num_display = adev->pm.dpm.new_active_crtc_count;
1567 adev->pm.pm_display_cfg.vrefresh = amdgpu_dpm_get_vrefresh(adev);
1568 adev->pm.pm_display_cfg.min_vblank_time = amdgpu_dpm_get_vblank_time(adev);
1569 /* we have issues with mclk switching with
1570 * refresh rates over 120 hz on the non-DC code.
1571 */
1572 if (adev->pm.pm_display_cfg.vrefresh > 120)
1573 adev->pm.pm_display_cfg.min_vblank_time = 0;
1574
1575 pp_display_configuration_change(handle,
1576 &adev->pm.pm_display_cfg);
1577 }
1578
1579 pp_dpm_dispatch_tasks(handle,
1580 AMD_PP_TASK_DISPLAY_CONFIG_CHANGE,
1581 NULL);
1582}
1583
b905090d 1584static const struct amd_pm_funcs pp_dpm_funcs = {
f685d714
RZ
1585 .load_firmware = pp_dpm_load_fw,
1586 .wait_for_fw_loading_complete = pp_dpm_fw_loading_complete,
1587 .force_performance_level = pp_dpm_force_performance_level,
1588 .get_performance_level = pp_dpm_get_performance_level,
1589 .get_current_power_state = pp_dpm_get_current_power_state,
f685d714
RZ
1590 .dispatch_tasks = pp_dpm_dispatch_tasks,
1591 .set_fan_control_mode = pp_dpm_set_fan_control_mode,
1592 .get_fan_control_mode = pp_dpm_get_fan_control_mode,
0d8318e1
EQ
1593 .set_fan_speed_pwm = pp_dpm_set_fan_speed_pwm,
1594 .get_fan_speed_pwm = pp_dpm_get_fan_speed_pwm,
f685d714 1595 .get_fan_speed_rpm = pp_dpm_get_fan_speed_rpm,
c2870527 1596 .set_fan_speed_rpm = pp_dpm_set_fan_speed_rpm,
f685d714
RZ
1597 .get_pp_num_states = pp_dpm_get_pp_num_states,
1598 .get_pp_table = pp_dpm_get_pp_table,
1599 .set_pp_table = pp_dpm_set_pp_table,
1600 .force_clock_level = pp_dpm_force_clock_level,
5d8539d2 1601 .emit_clock_levels = pp_dpm_emit_clock_levels,
f685d714
RZ
1602 .print_clock_levels = pp_dpm_print_clock_levels,
1603 .get_sclk_od = pp_dpm_get_sclk_od,
1604 .set_sclk_od = pp_dpm_set_sclk_od,
1605 .get_mclk_od = pp_dpm_get_mclk_od,
1606 .set_mclk_od = pp_dpm_set_mclk_od,
1607 .read_sensor = pp_dpm_read_sensor,
1608 .get_vce_clock_state = pp_dpm_get_vce_clock_state,
f685d714
RZ
1609 .switch_power_profile = pp_dpm_switch_power_profile,
1610 .set_clockgating_by_smu = pp_set_clockgating_by_smu,
b92c6287 1611 .set_powergating_by_smu = pp_set_powergating_by_smu,
6390258a
RZ
1612 .get_power_profile_mode = pp_get_power_profile_mode,
1613 .set_power_profile_mode = pp_set_power_profile_mode,
12a6727d 1614 .set_fine_grain_clk_vol = pp_set_fine_grain_clk_vol,
e3933f26 1615 .odn_edit_dpm_table = pp_odn_edit_dpm_table,
a2c28e34 1616 .set_mp1_state = pp_dpm_set_mp1_state,
6ab8555e
RZ
1617 .set_power_limit = pp_set_power_limit,
1618 .get_power_limit = pp_get_power_limit,
f685d714
RZ
1619/* export to DC */
1620 .get_sclk = pp_dpm_get_sclk,
1621 .get_mclk = pp_dpm_get_mclk,
1622 .display_configuration_change = pp_display_configuration_change,
1623 .get_display_power_level = pp_get_display_power_level,
1624 .get_current_clocks = pp_get_current_clocks,
1625 .get_clock_by_type = pp_get_clock_by_type,
1626 .get_clock_by_type_with_latency = pp_get_clock_by_type_with_latency,
1627 .get_clock_by_type_with_voltage = pp_get_clock_by_type_with_voltage,
1628 .set_watermarks_for_clocks_ranges = pp_set_watermarks_for_clocks_ranges,
1629 .display_clock_voltage_request = pp_display_clock_voltage_request,
1630 .get_display_mode_validation_clocks = pp_get_display_mode_validation_clocks,
ea870e44 1631 .notify_smu_enable_pwe = pp_notify_smu_enable_pwe,
b55c9e7a 1632 .enable_mgpu_fan_boost = pp_enable_mgpu_fan_boost,
9ed9203c 1633 .set_active_display_count = pp_set_active_display_count,
1634 .set_min_deep_sleep_dcefclk = pp_set_min_deep_sleep_dcefclk,
1635 .set_hard_min_dcefclk_by_freq = pp_set_hard_min_dcefclk_by_freq,
1636 .set_hard_min_fclk_by_freq = pp_set_hard_min_fclk_by_freq,
425db255
JQ
1637 .get_asic_baco_capability = pp_get_asic_baco_capability,
1638 .get_asic_baco_state = pp_get_asic_baco_state,
1639 .set_asic_baco_state = pp_set_asic_baco_state,
7ca881a8
EQ
1640 .get_ppfeature_status = pp_get_ppfeature_status,
1641 .set_ppfeature_status = pp_set_ppfeature_status,
e97204ea 1642 .asic_reset_mode_2 = pp_asic_reset_mode_2,
6acaa6af 1643 .smu_i2c_bus_access = pp_smu_i2c_bus_access,
06615f9a 1644 .set_df_cstate = pp_set_df_cstate,
3e454860 1645 .set_xgmi_pstate = pp_set_xgmi_pstate,
0b01b830 1646 .get_gpu_metrics = pp_get_gpu_metrics,
d90a53d6 1647 .gfx_state_change_set = pp_gfx_state_change_set,
b8c78bdb 1648 .get_smu_prv_buf_details = pp_get_prv_buffer_details,
6ddbd37f 1649 .pm_compute_clocks = pp_pm_compute_clocks,
f685d714 1650};