drm/amdgpu: Skip ring soft recovery when fence was NULL
[linux-2.6-block.git] / drivers / gpu / drm / amd / powerplay / amd_powerplay.c
CommitLineData
1f7371b2
AD
1/*
2 * Copyright 2015 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
7bd55429 23#include "pp_debug.h"
1f7371b2
AD
24#include <linux/types.h>
25#include <linux/kernel.h>
26#include <linux/gfp.h>
ac885b3a 27#include <linux/slab.h>
64f6db77 28#include <linux/firmware.h>
1f7371b2
AD
29#include "amd_shared.h"
30#include "amd_powerplay.h"
577bbe01 31#include "power_state.h"
a2c120ce 32#include "amdgpu.h"
65ad7cac 33#include "hwmgr.h"
1f7371b2 34
6d07fe7b 35
b905090d 36static const struct amd_pm_funcs pp_dpm_funcs;
3bace359 37
a2c120ce 38static int amd_powerplay_create(struct amdgpu_device *adev)
139a285f 39{
b905090d 40 struct pp_hwmgr *hwmgr;
139a285f 41
a2c120ce 42 if (adev == NULL)
139a285f
RZ
43 return -EINVAL;
44
b905090d
RZ
45 hwmgr = kzalloc(sizeof(struct pp_hwmgr), GFP_KERNEL);
46 if (hwmgr == NULL)
139a285f
RZ
47 return -ENOMEM;
48
b905090d 49 hwmgr->adev = adev;
8bb575a2
RZ
50 hwmgr->not_vf = !amdgpu_sriov_vf(adev);
51 hwmgr->pm_en = (amdgpu_dpm && hwmgr->not_vf) ? true : false;
b905090d
RZ
52 hwmgr->device = amdgpu_cgs_create_device(adev);
53 mutex_init(&hwmgr->smu_lock);
54 hwmgr->chip_family = adev->family;
55 hwmgr->chip_id = adev->asic_type;
00f54b97 56 hwmgr->feature_mask = adev->powerplay.pp_feature;
555fd70c 57 hwmgr->display_config = &adev->pm.pm_display_cfg;
b905090d
RZ
58 adev->powerplay.pp_handle = hwmgr;
59 adev->powerplay.pp_funcs = &pp_dpm_funcs;
139a285f
RZ
60 return 0;
61}
62
a2c120ce 63
ba8ab90e 64static void amd_powerplay_destroy(struct amdgpu_device *adev)
139a285f 65{
b905090d 66 struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
139a285f 67
b905090d
RZ
68 kfree(hwmgr->hardcode_pp_table);
69 hwmgr->hardcode_pp_table = NULL;
7b38a49d 70
b905090d
RZ
71 kfree(hwmgr);
72 hwmgr = NULL;
139a285f
RZ
73}
74
1c863802
RZ
75static int pp_early_init(void *handle)
76{
77 int ret;
b905090d 78 struct amdgpu_device *adev = handle;
139a285f 79
a2c120ce 80 ret = amd_powerplay_create(adev);
139a285f 81
a2c120ce
RZ
82 if (ret != 0)
83 return ret;
84
b905090d 85 ret = hwmgr_early_init(adev->powerplay.pp_handle);
9441f964 86 if (ret)
b3b03052 87 return -EINVAL;
1c863802 88
b4eeed59 89 return 0;
1f7371b2
AD
90}
91
1c863802 92static int pp_sw_init(void *handle)
1f7371b2 93{
b905090d
RZ
94 struct amdgpu_device *adev = handle;
95 struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
3bace359
JZ
96 int ret = 0;
97
ba8ab90e 98 ret = hwmgr_sw_init(hwmgr);
7383bcb9 99
ba8ab90e 100 pr_debug("powerplay sw init %s\n", ret ? "failed" : "successfully");
b905090d 101
1c863802
RZ
102 return ret;
103}
3bace359 104
1c863802
RZ
105static int pp_sw_fini(void *handle)
106{
b905090d
RZ
107 struct amdgpu_device *adev = handle;
108 struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
1c863802 109
ba8ab90e 110 hwmgr_sw_fini(hwmgr);
2dac5936 111
3023015f
RZ
112 release_firmware(adev->pm.fw);
113 adev->pm.fw = NULL;
2dac5936 114
b905090d 115 return 0;
1f7371b2
AD
116}
117
118static int pp_hw_init(void *handle)
119{
ac885b3a 120 int ret = 0;
b905090d
RZ
121 struct amdgpu_device *adev = handle;
122 struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
ac885b3a 123
ba8ab90e 124 ret = hwmgr_hw_init(hwmgr);
ac885b3a 125
ba8ab90e
RZ
126 if (ret)
127 pr_err("powerplay hw init failed\n");
ac885b3a 128
e5f23736 129 return ret;
1f7371b2
AD
130}
131
132static int pp_hw_fini(void *handle)
133{
b905090d
RZ
134 struct amdgpu_device *adev = handle;
135 struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
ac885b3a 136
ba8ab90e 137 hwmgr_hw_fini(hwmgr);
df1e6394 138
1f7371b2
AD
139 return 0;
140}
141
7951e376
RZ
142static void pp_reserve_vram_for_smu(struct amdgpu_device *adev)
143{
144 int r = -EINVAL;
145 void *cpu_ptr = NULL;
146 uint64_t gpu_addr;
147 struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
148
149 if (amdgpu_bo_create_kernel(adev, adev->pm.smu_prv_buffer_size,
150 PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT,
151 &adev->pm.smu_prv_buffer,
152 &gpu_addr,
153 &cpu_ptr)) {
154 DRM_ERROR("amdgpu: failed to create smu prv buffer\n");
155 return;
156 }
157
158 if (hwmgr->hwmgr_func->notify_cac_buffer_info)
159 r = hwmgr->hwmgr_func->notify_cac_buffer_info(hwmgr,
160 lower_32_bits((unsigned long)cpu_ptr),
161 upper_32_bits((unsigned long)cpu_ptr),
162 lower_32_bits(gpu_addr),
163 upper_32_bits(gpu_addr),
164 adev->pm.smu_prv_buffer_size);
165
166 if (r) {
167 amdgpu_bo_free_kernel(&adev->pm.smu_prv_buffer, NULL, NULL);
168 adev->pm.smu_prv_buffer = NULL;
169 DRM_ERROR("amdgpu: failed to notify SMU buffer address\n");
170 }
171}
172
6d07fe7b
RZ
173static int pp_late_init(void *handle)
174{
b905090d
RZ
175 struct amdgpu_device *adev = handle;
176 struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
b905090d 177
b61e54cb
RZ
178 if (hwmgr && hwmgr->pm_en) {
179 mutex_lock(&hwmgr->smu_lock);
180 hwmgr_handle_task(hwmgr,
39199b80 181 AMD_PP_TASK_COMPLETE_INIT, NULL);
b61e54cb
RZ
182 mutex_unlock(&hwmgr->smu_lock);
183 }
7951e376
RZ
184 if (adev->pm.smu_prv_buffer_size != 0)
185 pp_reserve_vram_for_smu(adev);
9667849b 186
6d07fe7b
RZ
187 return 0;
188}
189
139a285f
RZ
190static void pp_late_fini(void *handle)
191{
2dac5936
RZ
192 struct amdgpu_device *adev = handle;
193
7951e376
RZ
194 if (adev->pm.smu_prv_buffer)
195 amdgpu_bo_free_kernel(&adev->pm.smu_prv_buffer, NULL, NULL);
2dac5936 196 amd_powerplay_destroy(adev);
139a285f
RZ
197}
198
199
1f7371b2
AD
200static bool pp_is_idle(void *handle)
201{
ed5121a3 202 return false;
1f7371b2
AD
203}
204
205static int pp_wait_for_idle(void *handle)
206{
207 return 0;
208}
209
210static int pp_sw_reset(void *handle)
211{
212 return 0;
213}
214
1f7371b2
AD
215static int pp_set_powergating_state(void *handle,
216 enum amd_powergating_state state)
217{
85f80cb3 218 return 0;
1f7371b2
AD
219}
220
221static int pp_suspend(void *handle)
222{
b905090d
RZ
223 struct amdgpu_device *adev = handle;
224 struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
577bbe01 225
ba8ab90e 226 return hwmgr_suspend(hwmgr);
1f7371b2
AD
227}
228
229static int pp_resume(void *handle)
230{
b905090d
RZ
231 struct amdgpu_device *adev = handle;
232 struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
1c863802 233
ba8ab90e 234 return hwmgr_resume(hwmgr);
1f7371b2
AD
235}
236
f004938f
AG
237static int pp_set_clockgating_state(void *handle,
238 enum amd_clockgating_state state)
239{
240 return 0;
241}
242
b905090d 243static const struct amd_ip_funcs pp_ip_funcs = {
88a907d6 244 .name = "powerplay",
1f7371b2 245 .early_init = pp_early_init,
6d07fe7b 246 .late_init = pp_late_init,
1f7371b2
AD
247 .sw_init = pp_sw_init,
248 .sw_fini = pp_sw_fini,
249 .hw_init = pp_hw_init,
250 .hw_fini = pp_hw_fini,
139a285f 251 .late_fini = pp_late_fini,
1f7371b2
AD
252 .suspend = pp_suspend,
253 .resume = pp_resume,
254 .is_idle = pp_is_idle,
255 .wait_for_idle = pp_wait_for_idle,
256 .soft_reset = pp_sw_reset,
f004938f 257 .set_clockgating_state = pp_set_clockgating_state,
1f7371b2
AD
258 .set_powergating_state = pp_set_powergating_state,
259};
260
b905090d
RZ
261const struct amdgpu_ip_block_version pp_smu_ip_block =
262{
263 .type = AMD_IP_BLOCK_TYPE_SMC,
264 .major = 1,
265 .minor = 0,
266 .rev = 0,
267 .funcs = &pp_ip_funcs,
268};
269
9c8bc8d3
RZ
270/* This interface only be supported On Vi,
271 * because only smu7/8 can help to load gfx/sdma fw,
272 * smu need to be enabled before load other ip's fw.
273 * so call start smu to load smu7 fw and other ip's fw
274 */
1f7371b2
AD
275static int pp_dpm_load_fw(void *handle)
276{
9c8bc8d3
RZ
277 struct pp_hwmgr *hwmgr = handle;
278
279 if (!hwmgr || !hwmgr->smumgr_funcs || !hwmgr->smumgr_funcs->start_smu)
280 return -EINVAL;
281
282 if (hwmgr->smumgr_funcs->start_smu(hwmgr)) {
283 pr_err("fw load failed\n");
284 return -EINVAL;
285 }
286
1f7371b2
AD
287 return 0;
288}
289
290static int pp_dpm_fw_loading_complete(void *handle)
291{
292 return 0;
293}
294
3811f8f0
RZ
295static int pp_set_clockgating_by_smu(void *handle, uint32_t msg_id)
296{
b905090d 297 struct pp_hwmgr *hwmgr = handle;
3811f8f0 298
ba8ab90e
RZ
299 if (!hwmgr || !hwmgr->pm_en)
300 return -EINVAL;
3811f8f0 301
3811f8f0 302 if (hwmgr->hwmgr_func->update_clock_gatings == NULL) {
527aa2a0 303 pr_info_ratelimited("%s was not implemented.\n", __func__);
3811f8f0
RZ
304 return 0;
305 }
306
307 return hwmgr->hwmgr_func->update_clock_gatings(hwmgr, &msg_id);
308}
309
9947f704
RZ
310static void pp_dpm_en_umd_pstate(struct pp_hwmgr *hwmgr,
311 enum amd_dpm_forced_level *level)
312{
313 uint32_t profile_mode_mask = AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD |
314 AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK |
315 AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK |
316 AMD_DPM_FORCED_LEVEL_PROFILE_PEAK;
317
318 if (!(hwmgr->dpm_level & profile_mode_mask)) {
319 /* enter umd pstate, save current level, disable gfx cg*/
320 if (*level & profile_mode_mask) {
321 hwmgr->saved_dpm_level = hwmgr->dpm_level;
322 hwmgr->en_umd_pstate = true;
43fa561f 323 amdgpu_device_ip_set_clockgating_state(hwmgr->adev,
9947f704
RZ
324 AMD_IP_BLOCK_TYPE_GFX,
325 AMD_CG_STATE_UNGATE);
43fa561f 326 amdgpu_device_ip_set_powergating_state(hwmgr->adev,
9947f704
RZ
327 AMD_IP_BLOCK_TYPE_GFX,
328 AMD_PG_STATE_UNGATE);
329 }
330 } else {
331 /* exit umd pstate, restore level, enable gfx cg*/
332 if (!(*level & profile_mode_mask)) {
333 if (*level == AMD_DPM_FORCED_LEVEL_PROFILE_EXIT)
334 *level = hwmgr->saved_dpm_level;
335 hwmgr->en_umd_pstate = false;
43fa561f 336 amdgpu_device_ip_set_clockgating_state(hwmgr->adev,
9947f704
RZ
337 AMD_IP_BLOCK_TYPE_GFX,
338 AMD_CG_STATE_GATE);
43fa561f 339 amdgpu_device_ip_set_powergating_state(hwmgr->adev,
9947f704
RZ
340 AMD_IP_BLOCK_TYPE_GFX,
341 AMD_PG_STATE_GATE);
342 }
343 }
344}
345
1f7371b2
AD
346static int pp_dpm_force_performance_level(void *handle,
347 enum amd_dpm_forced_level level)
348{
b905090d 349 struct pp_hwmgr *hwmgr = handle;
577bbe01 350
ba8ab90e
RZ
351 if (!hwmgr || !hwmgr->pm_en)
352 return -EINVAL;
577bbe01 353
9947f704
RZ
354 if (level == hwmgr->dpm_level)
355 return 0;
356
b905090d 357 mutex_lock(&hwmgr->smu_lock);
9947f704
RZ
358 pp_dpm_en_umd_pstate(hwmgr, &level);
359 hwmgr->request_dpm_level = level;
b905090d
RZ
360 hwmgr_handle_task(hwmgr, AMD_PP_TASK_READJUST_POWER_STATE, NULL);
361 mutex_unlock(&hwmgr->smu_lock);
8621bbbb 362
1f7371b2
AD
363 return 0;
364}
577bbe01 365
1f7371b2
AD
366static enum amd_dpm_forced_level pp_dpm_get_performance_level(
367 void *handle)
368{
b905090d 369 struct pp_hwmgr *hwmgr = handle;
2a507105 370 enum amd_dpm_forced_level level;
577bbe01 371
ba8ab90e
RZ
372 if (!hwmgr || !hwmgr->pm_en)
373 return -EINVAL;
577bbe01 374
b905090d 375 mutex_lock(&hwmgr->smu_lock);
2a507105 376 level = hwmgr->dpm_level;
b905090d 377 mutex_unlock(&hwmgr->smu_lock);
2a507105 378 return level;
1f7371b2 379}
577bbe01 380
f93f0c3a 381static uint32_t pp_dpm_get_sclk(void *handle, bool low)
1f7371b2 382{
b905090d 383 struct pp_hwmgr *hwmgr = handle;
f93f0c3a 384 uint32_t clk = 0;
577bbe01 385
ba8ab90e
RZ
386 if (!hwmgr || !hwmgr->pm_en)
387 return 0;
577bbe01 388
7383bcb9 389 if (hwmgr->hwmgr_func->get_sclk == NULL) {
527aa2a0 390 pr_info_ratelimited("%s was not implemented.\n", __func__);
7383bcb9
RZ
391 return 0;
392 }
b905090d 393 mutex_lock(&hwmgr->smu_lock);
f93f0c3a 394 clk = hwmgr->hwmgr_func->get_sclk(hwmgr, low);
b905090d 395 mutex_unlock(&hwmgr->smu_lock);
f93f0c3a 396 return clk;
1f7371b2 397}
577bbe01 398
f93f0c3a 399static uint32_t pp_dpm_get_mclk(void *handle, bool low)
1f7371b2 400{
b905090d 401 struct pp_hwmgr *hwmgr = handle;
f93f0c3a 402 uint32_t clk = 0;
577bbe01 403
ba8ab90e
RZ
404 if (!hwmgr || !hwmgr->pm_en)
405 return 0;
577bbe01 406
7383bcb9 407 if (hwmgr->hwmgr_func->get_mclk == NULL) {
527aa2a0 408 pr_info_ratelimited("%s was not implemented.\n", __func__);
7383bcb9
RZ
409 return 0;
410 }
b905090d 411 mutex_lock(&hwmgr->smu_lock);
f93f0c3a 412 clk = hwmgr->hwmgr_func->get_mclk(hwmgr, low);
b905090d 413 mutex_unlock(&hwmgr->smu_lock);
f93f0c3a 414 return clk;
1f7371b2 415}
577bbe01 416
f93f0c3a 417static void pp_dpm_powergate_vce(void *handle, bool gate)
1f7371b2 418{
b905090d 419 struct pp_hwmgr *hwmgr = handle;
577bbe01 420
ba8ab90e 421 if (!hwmgr || !hwmgr->pm_en)
f93f0c3a 422 return;
577bbe01 423
7383bcb9 424 if (hwmgr->hwmgr_func->powergate_vce == NULL) {
527aa2a0 425 pr_info_ratelimited("%s was not implemented.\n", __func__);
f93f0c3a 426 return;
7383bcb9 427 }
b905090d 428 mutex_lock(&hwmgr->smu_lock);
f93f0c3a 429 hwmgr->hwmgr_func->powergate_vce(hwmgr, gate);
b905090d 430 mutex_unlock(&hwmgr->smu_lock);
1f7371b2 431}
577bbe01 432
f93f0c3a 433static void pp_dpm_powergate_uvd(void *handle, bool gate)
1f7371b2 434{
b905090d 435 struct pp_hwmgr *hwmgr = handle;
577bbe01 436
ba8ab90e 437 if (!hwmgr || !hwmgr->pm_en)
f93f0c3a 438 return;
577bbe01 439
7383bcb9 440 if (hwmgr->hwmgr_func->powergate_uvd == NULL) {
527aa2a0 441 pr_info_ratelimited("%s was not implemented.\n", __func__);
f93f0c3a 442 return;
7383bcb9 443 }
b905090d 444 mutex_lock(&hwmgr->smu_lock);
f93f0c3a 445 hwmgr->hwmgr_func->powergate_uvd(hwmgr, gate);
b905090d 446 mutex_unlock(&hwmgr->smu_lock);
577bbe01
RZ
447}
448
df1e6394 449static int pp_dpm_dispatch_tasks(void *handle, enum amd_pp_task task_id,
39199b80 450 enum amd_pm_state_type *user_state)
1f7371b2 451{
577bbe01 452 int ret = 0;
b905090d 453 struct pp_hwmgr *hwmgr = handle;
577bbe01 454
ba8ab90e
RZ
455 if (!hwmgr || !hwmgr->pm_en)
456 return -EINVAL;
577bbe01 457
b905090d
RZ
458 mutex_lock(&hwmgr->smu_lock);
459 ret = hwmgr_handle_task(hwmgr, task_id, user_state);
460 mutex_unlock(&hwmgr->smu_lock);
df1e6394 461
577bbe01 462 return ret;
1f7371b2 463}
577bbe01 464
f8a4c11b 465static enum amd_pm_state_type pp_dpm_get_current_power_state(void *handle)
1f7371b2 466{
b905090d 467 struct pp_hwmgr *hwmgr = handle;
577bbe01 468 struct pp_power_state *state;
2a507105 469 enum amd_pm_state_type pm_type;
577bbe01 470
ba8ab90e 471 if (!hwmgr || !hwmgr->pm_en || !hwmgr->current_ps)
577bbe01
RZ
472 return -EINVAL;
473
b905090d 474 mutex_lock(&hwmgr->smu_lock);
2a507105 475
577bbe01
RZ
476 state = hwmgr->current_ps;
477
478 switch (state->classification.ui_label) {
479 case PP_StateUILabel_Battery:
2a507105 480 pm_type = POWER_STATE_TYPE_BATTERY;
0f987cd0 481 break;
577bbe01 482 case PP_StateUILabel_Balanced:
2a507105 483 pm_type = POWER_STATE_TYPE_BALANCED;
0f987cd0 484 break;
577bbe01 485 case PP_StateUILabel_Performance:
2a507105 486 pm_type = POWER_STATE_TYPE_PERFORMANCE;
0f987cd0 487 break;
577bbe01 488 default:
f3898ea1 489 if (state->classification.flags & PP_StateClassificationFlag_Boot)
2a507105 490 pm_type = POWER_STATE_TYPE_INTERNAL_BOOT;
f3898ea1 491 else
2a507105 492 pm_type = POWER_STATE_TYPE_DEFAULT;
0f987cd0 493 break;
577bbe01 494 }
b905090d 495 mutex_unlock(&hwmgr->smu_lock);
2a507105
RZ
496
497 return pm_type;
1f7371b2 498}
577bbe01 499
f93f0c3a 500static void pp_dpm_set_fan_control_mode(void *handle, uint32_t mode)
cac9a199 501{
b905090d 502 struct pp_hwmgr *hwmgr = handle;
cac9a199 503
ba8ab90e 504 if (!hwmgr || !hwmgr->pm_en)
f93f0c3a 505 return;
cac9a199 506
7383bcb9 507 if (hwmgr->hwmgr_func->set_fan_control_mode == NULL) {
527aa2a0 508 pr_info_ratelimited("%s was not implemented.\n", __func__);
f93f0c3a 509 return;
7383bcb9 510 }
b905090d 511 mutex_lock(&hwmgr->smu_lock);
f93f0c3a 512 hwmgr->hwmgr_func->set_fan_control_mode(hwmgr, mode);
b905090d 513 mutex_unlock(&hwmgr->smu_lock);
cac9a199
RZ
514}
515
f93f0c3a 516static uint32_t pp_dpm_get_fan_control_mode(void *handle)
cac9a199 517{
b905090d 518 struct pp_hwmgr *hwmgr = handle;
f93f0c3a 519 uint32_t mode = 0;
cac9a199 520
ba8ab90e
RZ
521 if (!hwmgr || !hwmgr->pm_en)
522 return 0;
cac9a199 523
7383bcb9 524 if (hwmgr->hwmgr_func->get_fan_control_mode == NULL) {
527aa2a0 525 pr_info_ratelimited("%s was not implemented.\n", __func__);
7383bcb9
RZ
526 return 0;
527 }
b905090d 528 mutex_lock(&hwmgr->smu_lock);
f93f0c3a 529 mode = hwmgr->hwmgr_func->get_fan_control_mode(hwmgr);
b905090d 530 mutex_unlock(&hwmgr->smu_lock);
f93f0c3a 531 return mode;
cac9a199
RZ
532}
533
534static int pp_dpm_set_fan_speed_percent(void *handle, uint32_t percent)
535{
b905090d 536 struct pp_hwmgr *hwmgr = handle;
1c863802 537 int ret = 0;
cac9a199 538
ba8ab90e
RZ
539 if (!hwmgr || !hwmgr->pm_en)
540 return -EINVAL;
cac9a199 541
7383bcb9 542 if (hwmgr->hwmgr_func->set_fan_speed_percent == NULL) {
527aa2a0 543 pr_info_ratelimited("%s was not implemented.\n", __func__);
7383bcb9
RZ
544 return 0;
545 }
b905090d 546 mutex_lock(&hwmgr->smu_lock);
2a507105 547 ret = hwmgr->hwmgr_func->set_fan_speed_percent(hwmgr, percent);
b905090d 548 mutex_unlock(&hwmgr->smu_lock);
2a507105 549 return ret;
cac9a199
RZ
550}
551
552static int pp_dpm_get_fan_speed_percent(void *handle, uint32_t *speed)
553{
b905090d 554 struct pp_hwmgr *hwmgr = handle;
1c863802 555 int ret = 0;
cac9a199 556
ba8ab90e
RZ
557 if (!hwmgr || !hwmgr->pm_en)
558 return -EINVAL;
cac9a199 559
7383bcb9 560 if (hwmgr->hwmgr_func->get_fan_speed_percent == NULL) {
527aa2a0 561 pr_info_ratelimited("%s was not implemented.\n", __func__);
7383bcb9
RZ
562 return 0;
563 }
cac9a199 564
b905090d 565 mutex_lock(&hwmgr->smu_lock);
2a507105 566 ret = hwmgr->hwmgr_func->get_fan_speed_percent(hwmgr, speed);
b905090d 567 mutex_unlock(&hwmgr->smu_lock);
2a507105 568 return ret;
cac9a199
RZ
569}
570
72a16a9d
GI
571static int pp_dpm_get_fan_speed_rpm(void *handle, uint32_t *rpm)
572{
b905090d 573 struct pp_hwmgr *hwmgr = handle;
1c863802 574 int ret = 0;
72a16a9d 575
ba8ab90e
RZ
576 if (!hwmgr || !hwmgr->pm_en)
577 return -EINVAL;
72a16a9d 578
72a16a9d
GI
579 if (hwmgr->hwmgr_func->get_fan_speed_rpm == NULL)
580 return -EINVAL;
581
b905090d 582 mutex_lock(&hwmgr->smu_lock);
2a507105 583 ret = hwmgr->hwmgr_func->get_fan_speed_rpm(hwmgr, rpm);
b905090d 584 mutex_unlock(&hwmgr->smu_lock);
2a507105 585 return ret;
72a16a9d
GI
586}
587
c2870527
RZ
588static int pp_dpm_set_fan_speed_rpm(void *handle, uint32_t rpm)
589{
590 struct pp_hwmgr *hwmgr = handle;
591 int ret = 0;
592
593 if (!hwmgr || !hwmgr->pm_en)
594 return -EINVAL;
595
596 if (hwmgr->hwmgr_func->set_fan_speed_rpm == NULL) {
527aa2a0 597 pr_info_ratelimited("%s was not implemented.\n", __func__);
c2870527
RZ
598 return 0;
599 }
600 mutex_lock(&hwmgr->smu_lock);
601 ret = hwmgr->hwmgr_func->set_fan_speed_rpm(hwmgr, rpm);
602 mutex_unlock(&hwmgr->smu_lock);
603 return ret;
604}
605
f3898ea1
EH
606static int pp_dpm_get_pp_num_states(void *handle,
607 struct pp_states_info *data)
608{
b905090d 609 struct pp_hwmgr *hwmgr = handle;
f3898ea1
EH
610 int i;
611
4dbda35f
EQ
612 memset(data, 0, sizeof(*data));
613
ba8ab90e 614 if (!hwmgr || !hwmgr->pm_en ||!hwmgr->ps)
f3898ea1
EH
615 return -EINVAL;
616
b905090d 617 mutex_lock(&hwmgr->smu_lock);
2a507105 618
f3898ea1
EH
619 data->nums = hwmgr->num_ps;
620
621 for (i = 0; i < hwmgr->num_ps; i++) {
622 struct pp_power_state *state = (struct pp_power_state *)
623 ((unsigned long)hwmgr->ps + i * hwmgr->ps_size);
624 switch (state->classification.ui_label) {
625 case PP_StateUILabel_Battery:
626 data->states[i] = POWER_STATE_TYPE_BATTERY;
627 break;
628 case PP_StateUILabel_Balanced:
629 data->states[i] = POWER_STATE_TYPE_BALANCED;
630 break;
631 case PP_StateUILabel_Performance:
632 data->states[i] = POWER_STATE_TYPE_PERFORMANCE;
633 break;
634 default:
635 if (state->classification.flags & PP_StateClassificationFlag_Boot)
636 data->states[i] = POWER_STATE_TYPE_INTERNAL_BOOT;
637 else
638 data->states[i] = POWER_STATE_TYPE_DEFAULT;
639 }
640 }
b905090d 641 mutex_unlock(&hwmgr->smu_lock);
f3898ea1
EH
642 return 0;
643}
644
645static int pp_dpm_get_pp_table(void *handle, char **table)
646{
b905090d 647 struct pp_hwmgr *hwmgr = handle;
2a507105 648 int size = 0;
f3898ea1 649
ba8ab90e 650 if (!hwmgr || !hwmgr->pm_en ||!hwmgr->soft_pp_table)
4dcf9e6f
EH
651 return -EINVAL;
652
b905090d 653 mutex_lock(&hwmgr->smu_lock);
4dcf9e6f 654 *table = (char *)hwmgr->soft_pp_table;
2a507105 655 size = hwmgr->soft_pp_table_size;
b905090d 656 mutex_unlock(&hwmgr->smu_lock);
2a507105 657 return size;
f3898ea1
EH
658}
659
f685d714
RZ
660static int amd_powerplay_reset(void *handle)
661{
b905090d 662 struct pp_hwmgr *hwmgr = handle;
f685d714
RZ
663 int ret;
664
46b27ee9 665 ret = hwmgr_hw_fini(hwmgr);
f685d714
RZ
666 if (ret)
667 return ret;
668
b905090d 669 ret = hwmgr_hw_init(hwmgr);
f685d714
RZ
670 if (ret)
671 return ret;
672
b905090d 673 return hwmgr_handle_task(hwmgr, AMD_PP_TASK_COMPLETE_INIT, NULL);
f685d714
RZ
674}
675
f3898ea1
EH
676static int pp_dpm_set_pp_table(void *handle, const char *buf, size_t size)
677{
b905090d 678 struct pp_hwmgr *hwmgr = handle;
b61e54cb 679 int ret = -ENOMEM;
f3898ea1 680
ba8ab90e
RZ
681 if (!hwmgr || !hwmgr->pm_en)
682 return -EINVAL;
f3898ea1 683
b905090d 684 mutex_lock(&hwmgr->smu_lock);
4dcf9e6f 685 if (!hwmgr->hardcode_pp_table) {
efdf7a93
EC
686 hwmgr->hardcode_pp_table = kmemdup(hwmgr->soft_pp_table,
687 hwmgr->soft_pp_table_size,
688 GFP_KERNEL);
b61e54cb
RZ
689 if (!hwmgr->hardcode_pp_table)
690 goto err;
7383bcb9 691 }
f3898ea1 692
4dcf9e6f
EH
693 memcpy(hwmgr->hardcode_pp_table, buf, size);
694
695 hwmgr->soft_pp_table = hwmgr->hardcode_pp_table;
696
dd4bdf3b
EH
697 ret = amd_powerplay_reset(handle);
698 if (ret)
b61e54cb 699 goto err;
dd4bdf3b
EH
700
701 if (hwmgr->hwmgr_func->avfs_control) {
702 ret = hwmgr->hwmgr_func->avfs_control(hwmgr, false);
703 if (ret)
b61e54cb 704 goto err;
dd4bdf3b 705 }
b61e54cb 706 mutex_unlock(&hwmgr->smu_lock);
dd4bdf3b 707 return 0;
b61e54cb
RZ
708err:
709 mutex_unlock(&hwmgr->smu_lock);
710 return ret;
f3898ea1
EH
711}
712
713static int pp_dpm_force_clock_level(void *handle,
5632708f 714 enum pp_clock_type type, uint32_t mask)
f3898ea1 715{
b905090d 716 struct pp_hwmgr *hwmgr = handle;
1c863802 717 int ret = 0;
f3898ea1 718
ba8ab90e
RZ
719 if (!hwmgr || !hwmgr->pm_en)
720 return -EINVAL;
f3898ea1 721
7383bcb9 722 if (hwmgr->hwmgr_func->force_clock_level == NULL) {
527aa2a0 723 pr_info_ratelimited("%s was not implemented.\n", __func__);
7383bcb9
RZ
724 return 0;
725 }
241dbbb1
EQ
726
727 if (hwmgr->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL) {
728 pr_info("force clock level is for dpm manual mode only.\n");
729 return -EINVAL;
730 }
731
b905090d 732 mutex_lock(&hwmgr->smu_lock);
241dbbb1 733 ret = hwmgr->hwmgr_func->force_clock_level(hwmgr, type, mask);
b905090d 734 mutex_unlock(&hwmgr->smu_lock);
2a507105 735 return ret;
f3898ea1
EH
736}
737
738static int pp_dpm_print_clock_levels(void *handle,
739 enum pp_clock_type type, char *buf)
740{
b905090d 741 struct pp_hwmgr *hwmgr = handle;
1c863802 742 int ret = 0;
f3898ea1 743
ba8ab90e
RZ
744 if (!hwmgr || !hwmgr->pm_en)
745 return -EINVAL;
f3898ea1 746
7383bcb9 747 if (hwmgr->hwmgr_func->print_clock_levels == NULL) {
527aa2a0 748 pr_info_ratelimited("%s was not implemented.\n", __func__);
7383bcb9
RZ
749 return 0;
750 }
b905090d 751 mutex_lock(&hwmgr->smu_lock);
2a507105 752 ret = hwmgr->hwmgr_func->print_clock_levels(hwmgr, type, buf);
b905090d 753 mutex_unlock(&hwmgr->smu_lock);
2a507105 754 return ret;
f3898ea1
EH
755}
756
428bafa8
EH
757static int pp_dpm_get_sclk_od(void *handle)
758{
b905090d 759 struct pp_hwmgr *hwmgr = handle;
1c863802 760 int ret = 0;
428bafa8 761
ba8ab90e
RZ
762 if (!hwmgr || !hwmgr->pm_en)
763 return -EINVAL;
428bafa8 764
428bafa8 765 if (hwmgr->hwmgr_func->get_sclk_od == NULL) {
527aa2a0 766 pr_info_ratelimited("%s was not implemented.\n", __func__);
428bafa8
EH
767 return 0;
768 }
b905090d 769 mutex_lock(&hwmgr->smu_lock);
2a507105 770 ret = hwmgr->hwmgr_func->get_sclk_od(hwmgr);
b905090d 771 mutex_unlock(&hwmgr->smu_lock);
2a507105 772 return ret;
428bafa8
EH
773}
774
775static int pp_dpm_set_sclk_od(void *handle, uint32_t value)
776{
b905090d 777 struct pp_hwmgr *hwmgr = handle;
1c863802 778 int ret = 0;
428bafa8 779
ba8ab90e
RZ
780 if (!hwmgr || !hwmgr->pm_en)
781 return -EINVAL;
428bafa8 782
428bafa8 783 if (hwmgr->hwmgr_func->set_sclk_od == NULL) {
527aa2a0 784 pr_info_ratelimited("%s was not implemented.\n", __func__);
428bafa8
EH
785 return 0;
786 }
787
b905090d 788 mutex_lock(&hwmgr->smu_lock);
2a507105 789 ret = hwmgr->hwmgr_func->set_sclk_od(hwmgr, value);
b905090d 790 mutex_unlock(&hwmgr->smu_lock);
2a507105 791 return ret;
428bafa8
EH
792}
793
f2bdc05f
EH
794static int pp_dpm_get_mclk_od(void *handle)
795{
b905090d 796 struct pp_hwmgr *hwmgr = handle;
1c863802 797 int ret = 0;
f2bdc05f 798
ba8ab90e
RZ
799 if (!hwmgr || !hwmgr->pm_en)
800 return -EINVAL;
f2bdc05f 801
f2bdc05f 802 if (hwmgr->hwmgr_func->get_mclk_od == NULL) {
527aa2a0 803 pr_info_ratelimited("%s was not implemented.\n", __func__);
f2bdc05f
EH
804 return 0;
805 }
b905090d 806 mutex_lock(&hwmgr->smu_lock);
2a507105 807 ret = hwmgr->hwmgr_func->get_mclk_od(hwmgr);
b905090d 808 mutex_unlock(&hwmgr->smu_lock);
2a507105 809 return ret;
f2bdc05f
EH
810}
811
812static int pp_dpm_set_mclk_od(void *handle, uint32_t value)
813{
b905090d 814 struct pp_hwmgr *hwmgr = handle;
1c863802 815 int ret = 0;
f2bdc05f 816
ba8ab90e
RZ
817 if (!hwmgr || !hwmgr->pm_en)
818 return -EINVAL;
f2bdc05f 819
f2bdc05f 820 if (hwmgr->hwmgr_func->set_mclk_od == NULL) {
527aa2a0 821 pr_info_ratelimited("%s was not implemented.\n", __func__);
f2bdc05f
EH
822 return 0;
823 }
b905090d 824 mutex_lock(&hwmgr->smu_lock);
2a507105 825 ret = hwmgr->hwmgr_func->set_mclk_od(hwmgr, value);
b905090d 826 mutex_unlock(&hwmgr->smu_lock);
2a507105 827 return ret;
f2bdc05f
EH
828}
829
9f8df7d7
TSD
830static int pp_dpm_read_sensor(void *handle, int idx,
831 void *value, int *size)
a6e36952 832{
b905090d 833 struct pp_hwmgr *hwmgr = handle;
1c863802 834 int ret = 0;
a6e36952 835
ba8ab90e 836 if (!hwmgr || !hwmgr->pm_en || !value)
5ed8d656
RZ
837 return -EINVAL;
838
5ed8d656
RZ
839 switch (idx) {
840 case AMDGPU_PP_SENSOR_STABLE_PSTATE_SCLK:
841 *((uint32_t *)value) = hwmgr->pstate_sclk;
842 return 0;
843 case AMDGPU_PP_SENSOR_STABLE_PSTATE_MCLK:
844 *((uint32_t *)value) = hwmgr->pstate_mclk;
a6e36952 845 return 0;
d5f48037
RZ
846 case AMDGPU_PP_SENSOR_MIN_FAN_RPM:
847 *((uint32_t *)value) = hwmgr->thermal_controller.fanInfo.ulMinRPM;
848 return 0;
849 case AMDGPU_PP_SENSOR_MAX_FAN_RPM:
850 *((uint32_t *)value) = hwmgr->thermal_controller.fanInfo.ulMaxRPM;
851 return 0;
5ed8d656 852 default:
b905090d 853 mutex_lock(&hwmgr->smu_lock);
5ed8d656 854 ret = hwmgr->hwmgr_func->read_sensor(hwmgr, idx, value, size);
b905090d 855 mutex_unlock(&hwmgr->smu_lock);
5ed8d656 856 return ret;
a6e36952 857 }
a6e36952
TSD
858}
859
597be302
AD
860static struct amd_vce_state*
861pp_dpm_get_vce_clock_state(void *handle, unsigned idx)
862{
b905090d 863 struct pp_hwmgr *hwmgr = handle;
597be302 864
ba8ab90e 865 if (!hwmgr || !hwmgr->pm_en)
1c863802
RZ
866 return NULL;
867
ba8ab90e 868 if (idx < hwmgr->num_vce_state_tables)
1c863802 869 return &hwmgr->vce_states[idx];
597be302
AD
870 return NULL;
871}
872
6390258a
RZ
873static int pp_get_power_profile_mode(void *handle, char *buf)
874{
b905090d 875 struct pp_hwmgr *hwmgr = handle;
6390258a 876
ba8ab90e 877 if (!hwmgr || !hwmgr->pm_en || !buf)
6390258a
RZ
878 return -EINVAL;
879
6390258a 880 if (hwmgr->hwmgr_func->get_power_profile_mode == NULL) {
527aa2a0 881 pr_info_ratelimited("%s was not implemented.\n", __func__);
6390258a
RZ
882 return snprintf(buf, PAGE_SIZE, "\n");
883 }
884
885 return hwmgr->hwmgr_func->get_power_profile_mode(hwmgr, buf);
886}
887
888static int pp_set_power_profile_mode(void *handle, long *input, uint32_t size)
889{
b905090d 890 struct pp_hwmgr *hwmgr = handle;
337ecd6a 891 int ret = -EINVAL;
6390258a 892
ba8ab90e
RZ
893 if (!hwmgr || !hwmgr->pm_en)
894 return ret;
6390258a 895
6390258a 896 if (hwmgr->hwmgr_func->set_power_profile_mode == NULL) {
527aa2a0 897 pr_info_ratelimited("%s was not implemented.\n", __func__);
ba8ab90e 898 return ret;
6390258a 899 }
7a862028
EQ
900
901 if (hwmgr->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL) {
902 pr_info("power profile setting is for manual dpm mode only.\n");
903 return ret;
904 }
905
b905090d 906 mutex_lock(&hwmgr->smu_lock);
7a862028 907 ret = hwmgr->hwmgr_func->set_power_profile_mode(hwmgr, input, size);
b905090d 908 mutex_unlock(&hwmgr->smu_lock);
337ecd6a 909 return ret;
6390258a
RZ
910}
911
e3933f26
RZ
912static int pp_odn_edit_dpm_table(void *handle, uint32_t type, long *input, uint32_t size)
913{
b905090d 914 struct pp_hwmgr *hwmgr = handle;
e3933f26 915
ba8ab90e 916 if (!hwmgr || !hwmgr->pm_en)
e3933f26
RZ
917 return -EINVAL;
918
e3933f26 919 if (hwmgr->hwmgr_func->odn_edit_dpm_table == NULL) {
527aa2a0 920 pr_info_ratelimited("%s was not implemented.\n", __func__);
e3933f26
RZ
921 return -EINVAL;
922 }
923
924 return hwmgr->hwmgr_func->odn_edit_dpm_table(hwmgr, type, input, size);
925}
926
34bb2734 927static int pp_dpm_switch_power_profile(void *handle,
052fe96d 928 enum PP_SMC_POWER_PROFILE type, bool en)
34bb2734 929{
b905090d 930 struct pp_hwmgr *hwmgr = handle;
052fe96d
RZ
931 long workload;
932 uint32_t index;
34bb2734 933
ba8ab90e 934 if (!hwmgr || !hwmgr->pm_en)
34bb2734
EH
935 return -EINVAL;
936
052fe96d 937 if (hwmgr->hwmgr_func->set_power_profile_mode == NULL) {
527aa2a0 938 pr_info_ratelimited("%s was not implemented.\n", __func__);
052fe96d
RZ
939 return -EINVAL;
940 }
941
942 if (!(type < PP_SMC_POWER_PROFILE_CUSTOM))
943 return -EINVAL;
944
b905090d 945 mutex_lock(&hwmgr->smu_lock);
052fe96d
RZ
946
947 if (!en) {
948 hwmgr->workload_mask &= ~(1 << hwmgr->workload_prority[type]);
949 index = fls(hwmgr->workload_mask);
950 index = index > 0 && index <= Workload_Policy_Max ? index - 1 : 0;
951 workload = hwmgr->workload_setting[index];
952 } else {
953 hwmgr->workload_mask |= (1 << hwmgr->workload_prority[type]);
954 index = fls(hwmgr->workload_mask);
955 index = index <= Workload_Policy_Max ? index - 1 : 0;
956 workload = hwmgr->workload_setting[index];
34bb2734
EH
957 }
958
052fe96d
RZ
959 if (hwmgr->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL)
960 hwmgr->hwmgr_func->set_power_profile_mode(hwmgr, &workload, 0);
b905090d 961 mutex_unlock(&hwmgr->smu_lock);
052fe96d 962
34bb2734
EH
963 return 0;
964}
965
6ab8555e
RZ
966static int pp_set_power_limit(void *handle, uint32_t limit)
967{
b905090d 968 struct pp_hwmgr *hwmgr = handle;
f7becf9a 969 uint32_t max_power_limit;
6ab8555e 970
ba8ab90e
RZ
971 if (!hwmgr || !hwmgr->pm_en)
972 return -EINVAL;
6ab8555e 973
6ab8555e 974 if (hwmgr->hwmgr_func->set_power_limit == NULL) {
527aa2a0 975 pr_info_ratelimited("%s was not implemented.\n", __func__);
6ab8555e
RZ
976 return -EINVAL;
977 }
978
979 if (limit == 0)
980 limit = hwmgr->default_power_limit;
981
f7becf9a
JG
982 max_power_limit = hwmgr->default_power_limit;
983 if (hwmgr->od_enabled) {
984 max_power_limit *= (100 + hwmgr->platform_descriptor.TDPODLimit);
985 max_power_limit /= 100;
986 }
987
988 if (limit > max_power_limit)
6ab8555e
RZ
989 return -EINVAL;
990
b905090d 991 mutex_lock(&hwmgr->smu_lock);
6ab8555e
RZ
992 hwmgr->hwmgr_func->set_power_limit(hwmgr, limit);
993 hwmgr->power_limit = limit;
b905090d 994 mutex_unlock(&hwmgr->smu_lock);
ba8ab90e 995 return 0;
6ab8555e
RZ
996}
997
998static int pp_get_power_limit(void *handle, uint32_t *limit, bool default_limit)
999{
b905090d 1000 struct pp_hwmgr *hwmgr = handle;
6ab8555e 1001
ba8ab90e 1002 if (!hwmgr || !hwmgr->pm_en ||!limit)
6ab8555e
RZ
1003 return -EINVAL;
1004
b905090d 1005 mutex_lock(&hwmgr->smu_lock);
6ab8555e 1006
f7becf9a 1007 if (default_limit) {
6ab8555e 1008 *limit = hwmgr->default_power_limit;
f7becf9a
JG
1009 if (hwmgr->od_enabled) {
1010 *limit *= (100 + hwmgr->platform_descriptor.TDPODLimit);
1011 *limit /= 100;
1012 }
1013 }
6ab8555e
RZ
1014 else
1015 *limit = hwmgr->power_limit;
1016
b905090d 1017 mutex_unlock(&hwmgr->smu_lock);
6ab8555e 1018
ba8ab90e 1019 return 0;
6ab8555e
RZ
1020}
1021
f685d714 1022static int pp_display_configuration_change(void *handle,
155f1127 1023 const struct amd_pp_display_configuration *display_config)
7fb72a1f 1024{
b905090d 1025 struct pp_hwmgr *hwmgr = handle;
7fb72a1f 1026
ba8ab90e
RZ
1027 if (!hwmgr || !hwmgr->pm_en)
1028 return -EINVAL;
7fb72a1f 1029
b905090d 1030 mutex_lock(&hwmgr->smu_lock);
7fb72a1f 1031 phm_store_dal_configuration_data(hwmgr, display_config);
b905090d 1032 mutex_unlock(&hwmgr->smu_lock);
7fb72a1f
RZ
1033 return 0;
1034}
c4dd206b 1035
f685d714 1036static int pp_get_display_power_level(void *handle,
47329134 1037 struct amd_pp_simple_clock_info *output)
c4dd206b 1038{
b905090d 1039 struct pp_hwmgr *hwmgr = handle;
1c863802 1040 int ret = 0;
c4dd206b 1041
ba8ab90e 1042 if (!hwmgr || !hwmgr->pm_en ||!output)
1c863802 1043 return -EINVAL;
ba5f884c 1044
b905090d 1045 mutex_lock(&hwmgr->smu_lock);
2a507105 1046 ret = phm_get_dal_power_level(hwmgr, output);
b905090d 1047 mutex_unlock(&hwmgr->smu_lock);
2a507105 1048 return ret;
c4dd206b 1049}
e273b041 1050
f685d714 1051static int pp_get_current_clocks(void *handle,
155f1127 1052 struct amd_pp_clock_info *clocks)
e273b041 1053{
97e8f102 1054 struct amd_pp_simple_clock_info simple_clocks = { 0 };
e273b041 1055 struct pp_clock_info hw_clocks;
b905090d 1056 struct pp_hwmgr *hwmgr = handle;
1c863802 1057 int ret = 0;
e273b041 1058
ba8ab90e
RZ
1059 if (!hwmgr || !hwmgr->pm_en)
1060 return -EINVAL;
e273b041 1061
b905090d 1062 mutex_lock(&hwmgr->smu_lock);
2a507105 1063
e273b041
RZ
1064 phm_get_dal_power_level(hwmgr, &simple_clocks);
1065
2a507105
RZ
1066 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
1067 PHM_PlatformCaps_PowerContainment))
1068 ret = phm_get_clock_info(hwmgr, &hwmgr->current_ps->hardware,
1069 &hw_clocks, PHM_PerformanceLevelDesignation_PowerContainment);
1070 else
1071 ret = phm_get_clock_info(hwmgr, &hwmgr->current_ps->hardware,
1072 &hw_clocks, PHM_PerformanceLevelDesignation_Activity);
1073
ae97988f 1074 if (ret) {
2a507105 1075 pr_info("Error in phm_get_clock_info \n");
b905090d 1076 mutex_unlock(&hwmgr->smu_lock);
2a507105 1077 return -EINVAL;
e273b041
RZ
1078 }
1079
1080 clocks->min_engine_clock = hw_clocks.min_eng_clk;
1081 clocks->max_engine_clock = hw_clocks.max_eng_clk;
1082 clocks->min_memory_clock = hw_clocks.min_mem_clk;
1083 clocks->max_memory_clock = hw_clocks.max_mem_clk;
1084 clocks->min_bus_bandwidth = hw_clocks.min_bus_bandwidth;
1085 clocks->max_bus_bandwidth = hw_clocks.max_bus_bandwidth;
1086
1087 clocks->max_engine_clock_in_sr = hw_clocks.max_eng_clk;
1088 clocks->min_engine_clock_in_sr = hw_clocks.min_eng_clk;
1089
97e8f102
RZ
1090 if (simple_clocks.level == 0)
1091 clocks->max_clocks_state = PP_DAL_POWERLEVEL_7;
1092 else
1093 clocks->max_clocks_state = simple_clocks.level;
e273b041
RZ
1094
1095 if (0 == phm_get_current_shallow_sleep_clocks(hwmgr, &hwmgr->current_ps->hardware, &hw_clocks)) {
1096 clocks->max_engine_clock_in_sr = hw_clocks.max_eng_clk;
1097 clocks->min_engine_clock_in_sr = hw_clocks.min_eng_clk;
1098 }
b905090d 1099 mutex_unlock(&hwmgr->smu_lock);
e273b041 1100 return 0;
e273b041
RZ
1101}
1102
f685d714 1103static int pp_get_clock_by_type(void *handle, enum amd_pp_clock_type type, struct amd_pp_clocks *clocks)
e273b041 1104{
b905090d 1105 struct pp_hwmgr *hwmgr = handle;
1c863802 1106 int ret = 0;
e273b041 1107
ba8ab90e
RZ
1108 if (!hwmgr || !hwmgr->pm_en)
1109 return -EINVAL;
1c863802 1110
fa9e6991 1111 if (clocks == NULL)
e273b041
RZ
1112 return -EINVAL;
1113
b905090d 1114 mutex_lock(&hwmgr->smu_lock);
2a507105 1115 ret = phm_get_clock_by_type(hwmgr, type, clocks);
b905090d 1116 mutex_unlock(&hwmgr->smu_lock);
2a507105 1117 return ret;
e273b041
RZ
1118}
1119
f685d714 1120static int pp_get_clock_by_type_with_latency(void *handle,
d0187727
EH
1121 enum amd_pp_clock_type type,
1122 struct pp_clock_levels_with_latency *clocks)
1123{
b905090d 1124 struct pp_hwmgr *hwmgr = handle;
d0187727
EH
1125 int ret = 0;
1126
ba8ab90e 1127 if (!hwmgr || !hwmgr->pm_en ||!clocks)
d0187727
EH
1128 return -EINVAL;
1129
b905090d 1130 mutex_lock(&hwmgr->smu_lock);
d0187727 1131 ret = phm_get_clock_by_type_with_latency(hwmgr, type, clocks);
b905090d 1132 mutex_unlock(&hwmgr->smu_lock);
d0187727
EH
1133 return ret;
1134}
1135
f685d714 1136static int pp_get_clock_by_type_with_voltage(void *handle,
d0187727
EH
1137 enum amd_pp_clock_type type,
1138 struct pp_clock_levels_with_voltage *clocks)
1139{
b905090d 1140 struct pp_hwmgr *hwmgr = handle;
d0187727
EH
1141 int ret = 0;
1142
ba8ab90e 1143 if (!hwmgr || !hwmgr->pm_en ||!clocks)
d0187727
EH
1144 return -EINVAL;
1145
b905090d 1146 mutex_lock(&hwmgr->smu_lock);
d0187727
EH
1147
1148 ret = phm_get_clock_by_type_with_voltage(hwmgr, type, clocks);
1149
b905090d 1150 mutex_unlock(&hwmgr->smu_lock);
d0187727
EH
1151 return ret;
1152}
1153
f685d714 1154static int pp_set_watermarks_for_clocks_ranges(void *handle,
99c5e27d 1155 void *clock_ranges)
d0187727 1156{
b905090d 1157 struct pp_hwmgr *hwmgr = handle;
d0187727
EH
1158 int ret = 0;
1159
99c5e27d 1160 if (!hwmgr || !hwmgr->pm_en || !clock_ranges)
d0187727
EH
1161 return -EINVAL;
1162
b905090d 1163 mutex_lock(&hwmgr->smu_lock);
d0187727 1164 ret = phm_set_watermarks_for_clocks_ranges(hwmgr,
99c5e27d 1165 clock_ranges);
b905090d 1166 mutex_unlock(&hwmgr->smu_lock);
d0187727
EH
1167
1168 return ret;
1169}
1170
f685d714 1171static int pp_display_clock_voltage_request(void *handle,
d0187727
EH
1172 struct pp_display_clock_request *clock)
1173{
b905090d 1174 struct pp_hwmgr *hwmgr = handle;
d0187727
EH
1175 int ret = 0;
1176
ba8ab90e 1177 if (!hwmgr || !hwmgr->pm_en ||!clock)
d0187727
EH
1178 return -EINVAL;
1179
b905090d 1180 mutex_lock(&hwmgr->smu_lock);
d0187727 1181 ret = phm_display_clock_voltage_request(hwmgr, clock);
b905090d 1182 mutex_unlock(&hwmgr->smu_lock);
d0187727
EH
1183
1184 return ret;
1185}
1186
f685d714 1187static int pp_get_display_mode_validation_clocks(void *handle,
155f1127 1188 struct amd_pp_simple_clock_info *clocks)
e273b041 1189{
b905090d 1190 struct pp_hwmgr *hwmgr = handle;
1c863802 1191 int ret = 0;
e273b041 1192
ba8ab90e 1193 if (!hwmgr || !hwmgr->pm_en ||!clocks)
1c863802 1194 return -EINVAL;
ba5f884c 1195
97e8f102
RZ
1196 clocks->level = PP_DAL_POWERLEVEL_7;
1197
b905090d 1198 mutex_lock(&hwmgr->smu_lock);
2a507105 1199
e273b041 1200 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DynamicPatchPowerState))
1c863802 1201 ret = phm_get_max_high_clocks(hwmgr, clocks);
e273b041 1202
b905090d 1203 mutex_unlock(&hwmgr->smu_lock);
1c863802 1204 return ret;
e273b041
RZ
1205}
1206
a8da8ff3 1207static int pp_dpm_powergate_mmhub(void *handle)
72d76191 1208{
b905090d 1209 struct pp_hwmgr *hwmgr = handle;
72d76191 1210
ba8ab90e
RZ
1211 if (!hwmgr || !hwmgr->pm_en)
1212 return -EINVAL;
72d76191 1213
a8da8ff3 1214 if (hwmgr->hwmgr_func->powergate_mmhub == NULL) {
527aa2a0 1215 pr_info_ratelimited("%s was not implemented.\n", __func__);
72d76191
EH
1216 return 0;
1217 }
1218
a8da8ff3 1219 return hwmgr->hwmgr_func->powergate_mmhub(hwmgr);
72d76191
EH
1220}
1221
85f80cb3
RZ
1222static int pp_dpm_powergate_gfx(void *handle, bool gate)
1223{
1224 struct pp_hwmgr *hwmgr = handle;
1225
1226 if (!hwmgr || !hwmgr->pm_en)
1227 return 0;
1228
1229 if (hwmgr->hwmgr_func->powergate_gfx == NULL) {
527aa2a0 1230 pr_info_ratelimited("%s was not implemented.\n", __func__);
85f80cb3
RZ
1231 return 0;
1232 }
1233
1234 return hwmgr->hwmgr_func->powergate_gfx(hwmgr, gate);
1235}
1236
982976d9
RZ
1237static void pp_dpm_powergate_acp(void *handle, bool gate)
1238{
1239 struct pp_hwmgr *hwmgr = handle;
1240
1241 if (!hwmgr || !hwmgr->pm_en)
1242 return;
1243
1244 if (hwmgr->hwmgr_func->powergate_acp == NULL) {
527aa2a0 1245 pr_info_ratelimited("%s was not implemented.\n", __func__);
982976d9
RZ
1246 return;
1247 }
1248
1249 hwmgr->hwmgr_func->powergate_acp(hwmgr, gate);
1250}
1251
40bea02f
RZ
1252static void pp_dpm_powergate_sdma(void *handle, bool gate)
1253{
1254 struct pp_hwmgr *hwmgr = handle;
1255
1256 if (!hwmgr)
1257 return;
1258
1259 if (hwmgr->hwmgr_func->powergate_sdma == NULL) {
527aa2a0 1260 pr_info_ratelimited("%s was not implemented.\n", __func__);
40bea02f
RZ
1261 return;
1262 }
1263
1264 hwmgr->hwmgr_func->powergate_sdma(hwmgr, gate);
1265}
1266
b92c6287
RZ
1267static int pp_set_powergating_by_smu(void *handle,
1268 uint32_t block_type, bool gate)
1269{
1270 int ret = 0;
1271
1272 switch (block_type) {
1273 case AMD_IP_BLOCK_TYPE_UVD:
1274 case AMD_IP_BLOCK_TYPE_VCN:
1275 pp_dpm_powergate_uvd(handle, gate);
1276 break;
1277 case AMD_IP_BLOCK_TYPE_VCE:
1278 pp_dpm_powergate_vce(handle, gate);
1279 break;
1280 case AMD_IP_BLOCK_TYPE_GMC:
1281 pp_dpm_powergate_mmhub(handle);
1282 break;
1283 case AMD_IP_BLOCK_TYPE_GFX:
85f80cb3 1284 ret = pp_dpm_powergate_gfx(handle, gate);
b92c6287 1285 break;
982976d9
RZ
1286 case AMD_IP_BLOCK_TYPE_ACP:
1287 pp_dpm_powergate_acp(handle, gate);
1288 break;
40bea02f
RZ
1289 case AMD_IP_BLOCK_TYPE_SDMA:
1290 pp_dpm_powergate_sdma(handle, gate);
1291 break;
b92c6287
RZ
1292 default:
1293 break;
1294 }
1295 return ret;
1296}
1297
ea870e44
RZ
1298static int pp_notify_smu_enable_pwe(void *handle)
1299{
1300 struct pp_hwmgr *hwmgr = handle;
1301
1302 if (!hwmgr || !hwmgr->pm_en)
0d7f824b 1303 return -EINVAL;
ea870e44
RZ
1304
1305 if (hwmgr->hwmgr_func->smus_notify_pwe == NULL) {
527aa2a0 1306 pr_info_ratelimited("%s was not implemented.\n", __func__);
ea870e44
RZ
1307 return -EINVAL;;
1308 }
1309
1310 mutex_lock(&hwmgr->smu_lock);
1311 hwmgr->hwmgr_func->smus_notify_pwe(hwmgr);
1312 mutex_unlock(&hwmgr->smu_lock);
1313
1314 return 0;
1315}
1316
b55c9e7a
EQ
1317static int pp_enable_mgpu_fan_boost(void *handle)
1318{
1319 struct pp_hwmgr *hwmgr = handle;
1320
5be3bb6e 1321 if (!hwmgr)
b55c9e7a
EQ
1322 return -EINVAL;
1323
5be3bb6e
EQ
1324 if (!hwmgr->pm_en ||
1325 hwmgr->hwmgr_func->enable_mgpu_fan_boost == NULL)
b55c9e7a 1326 return 0;
b55c9e7a
EQ
1327
1328 mutex_lock(&hwmgr->smu_lock);
1329 hwmgr->hwmgr_func->enable_mgpu_fan_boost(hwmgr);
1330 mutex_unlock(&hwmgr->smu_lock);
1331
1332 return 0;
1333}
1334
b905090d 1335static const struct amd_pm_funcs pp_dpm_funcs = {
f685d714
RZ
1336 .load_firmware = pp_dpm_load_fw,
1337 .wait_for_fw_loading_complete = pp_dpm_fw_loading_complete,
1338 .force_performance_level = pp_dpm_force_performance_level,
1339 .get_performance_level = pp_dpm_get_performance_level,
1340 .get_current_power_state = pp_dpm_get_current_power_state,
f685d714
RZ
1341 .dispatch_tasks = pp_dpm_dispatch_tasks,
1342 .set_fan_control_mode = pp_dpm_set_fan_control_mode,
1343 .get_fan_control_mode = pp_dpm_get_fan_control_mode,
1344 .set_fan_speed_percent = pp_dpm_set_fan_speed_percent,
1345 .get_fan_speed_percent = pp_dpm_get_fan_speed_percent,
1346 .get_fan_speed_rpm = pp_dpm_get_fan_speed_rpm,
c2870527 1347 .set_fan_speed_rpm = pp_dpm_set_fan_speed_rpm,
f685d714
RZ
1348 .get_pp_num_states = pp_dpm_get_pp_num_states,
1349 .get_pp_table = pp_dpm_get_pp_table,
1350 .set_pp_table = pp_dpm_set_pp_table,
1351 .force_clock_level = pp_dpm_force_clock_level,
1352 .print_clock_levels = pp_dpm_print_clock_levels,
1353 .get_sclk_od = pp_dpm_get_sclk_od,
1354 .set_sclk_od = pp_dpm_set_sclk_od,
1355 .get_mclk_od = pp_dpm_get_mclk_od,
1356 .set_mclk_od = pp_dpm_set_mclk_od,
1357 .read_sensor = pp_dpm_read_sensor,
1358 .get_vce_clock_state = pp_dpm_get_vce_clock_state,
f685d714
RZ
1359 .switch_power_profile = pp_dpm_switch_power_profile,
1360 .set_clockgating_by_smu = pp_set_clockgating_by_smu,
b92c6287 1361 .set_powergating_by_smu = pp_set_powergating_by_smu,
6390258a
RZ
1362 .get_power_profile_mode = pp_get_power_profile_mode,
1363 .set_power_profile_mode = pp_set_power_profile_mode,
e3933f26 1364 .odn_edit_dpm_table = pp_odn_edit_dpm_table,
6ab8555e
RZ
1365 .set_power_limit = pp_set_power_limit,
1366 .get_power_limit = pp_get_power_limit,
f685d714
RZ
1367/* export to DC */
1368 .get_sclk = pp_dpm_get_sclk,
1369 .get_mclk = pp_dpm_get_mclk,
1370 .display_configuration_change = pp_display_configuration_change,
1371 .get_display_power_level = pp_get_display_power_level,
1372 .get_current_clocks = pp_get_current_clocks,
1373 .get_clock_by_type = pp_get_clock_by_type,
1374 .get_clock_by_type_with_latency = pp_get_clock_by_type_with_latency,
1375 .get_clock_by_type_with_voltage = pp_get_clock_by_type_with_voltage,
1376 .set_watermarks_for_clocks_ranges = pp_set_watermarks_for_clocks_ranges,
1377 .display_clock_voltage_request = pp_display_clock_voltage_request,
1378 .get_display_mode_validation_clocks = pp_get_display_mode_validation_clocks,
ea870e44 1379 .notify_smu_enable_pwe = pp_notify_smu_enable_pwe,
b55c9e7a 1380 .enable_mgpu_fan_boost = pp_enable_mgpu_fan_boost,
f685d714 1381};