drm/amd/pm: Add missing mutex for pp_get_power_profile_mode
[linux-block.git] / drivers / gpu / drm / amd / pm / powerplay / amd_powerplay.c
1 /*
2  * Copyright 2015 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  */
23 #include "pp_debug.h"
24 #include <linux/types.h>
25 #include <linux/kernel.h>
26 #include <linux/gfp.h>
27 #include <linux/slab.h>
28 #include <linux/firmware.h>
29 #include "amd_shared.h"
30 #include "amd_powerplay.h"
31 #include "power_state.h"
32 #include "amdgpu.h"
33 #include "hwmgr.h"
34
35
36 static const struct amd_pm_funcs pp_dpm_funcs;
37
38 static int amd_powerplay_create(struct amdgpu_device *adev)
39 {
40         struct pp_hwmgr *hwmgr;
41
42         if (adev == NULL)
43                 return -EINVAL;
44
45         hwmgr = kzalloc(sizeof(struct pp_hwmgr), GFP_KERNEL);
46         if (hwmgr == NULL)
47                 return -ENOMEM;
48
49         hwmgr->adev = adev;
50         hwmgr->not_vf = !amdgpu_sriov_vf(adev);
51         hwmgr->device = amdgpu_cgs_create_device(adev);
52         mutex_init(&hwmgr->smu_lock);
53         mutex_init(&hwmgr->msg_lock);
54         hwmgr->chip_family = adev->family;
55         hwmgr->chip_id = adev->asic_type;
56         hwmgr->feature_mask = adev->pm.pp_feature;
57         hwmgr->display_config = &adev->pm.pm_display_cfg;
58         adev->powerplay.pp_handle = hwmgr;
59         adev->powerplay.pp_funcs = &pp_dpm_funcs;
60         return 0;
61 }
62
63
64 static void amd_powerplay_destroy(struct amdgpu_device *adev)
65 {
66         struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
67
68         mutex_destroy(&hwmgr->msg_lock);
69
70         kfree(hwmgr->hardcode_pp_table);
71         hwmgr->hardcode_pp_table = NULL;
72
73         kfree(hwmgr);
74         hwmgr = NULL;
75 }
76
77 static int pp_early_init(void *handle)
78 {
79         int ret;
80         struct amdgpu_device *adev = handle;
81
82         ret = amd_powerplay_create(adev);
83
84         if (ret != 0)
85                 return ret;
86
87         ret = hwmgr_early_init(adev->powerplay.pp_handle);
88         if (ret)
89                 return -EINVAL;
90
91         return 0;
92 }
93
94 static int pp_sw_init(void *handle)
95 {
96         struct amdgpu_device *adev = handle;
97         struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
98         int ret = 0;
99
100         ret = hwmgr_sw_init(hwmgr);
101
102         pr_debug("powerplay sw init %s\n", ret ? "failed" : "successfully");
103
104         return ret;
105 }
106
107 static int pp_sw_fini(void *handle)
108 {
109         struct amdgpu_device *adev = handle;
110         struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
111
112         hwmgr_sw_fini(hwmgr);
113
114         release_firmware(adev->pm.fw);
115         adev->pm.fw = NULL;
116
117         return 0;
118 }
119
120 static int pp_hw_init(void *handle)
121 {
122         int ret = 0;
123         struct amdgpu_device *adev = handle;
124         struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
125
126         ret = hwmgr_hw_init(hwmgr);
127
128         if (ret)
129                 pr_err("powerplay hw init failed\n");
130
131         return ret;
132 }
133
134 static int pp_hw_fini(void *handle)
135 {
136         struct amdgpu_device *adev = handle;
137         struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
138
139         hwmgr_hw_fini(hwmgr);
140
141         return 0;
142 }
143
144 static void pp_reserve_vram_for_smu(struct amdgpu_device *adev)
145 {
146         int r = -EINVAL;
147         void *cpu_ptr = NULL;
148         uint64_t gpu_addr;
149         struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
150
151         if (amdgpu_bo_create_kernel(adev, adev->pm.smu_prv_buffer_size,
152                                                 PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT,
153                                                 &adev->pm.smu_prv_buffer,
154                                                 &gpu_addr,
155                                                 &cpu_ptr)) {
156                 DRM_ERROR("amdgpu: failed to create smu prv buffer\n");
157                 return;
158         }
159
160         if (hwmgr->hwmgr_func->notify_cac_buffer_info)
161                 r = hwmgr->hwmgr_func->notify_cac_buffer_info(hwmgr,
162                                         lower_32_bits((unsigned long)cpu_ptr),
163                                         upper_32_bits((unsigned long)cpu_ptr),
164                                         lower_32_bits(gpu_addr),
165                                         upper_32_bits(gpu_addr),
166                                         adev->pm.smu_prv_buffer_size);
167
168         if (r) {
169                 amdgpu_bo_free_kernel(&adev->pm.smu_prv_buffer, NULL, NULL);
170                 adev->pm.smu_prv_buffer = NULL;
171                 DRM_ERROR("amdgpu: failed to notify SMU buffer address\n");
172         }
173 }
174
175 static int pp_late_init(void *handle)
176 {
177         struct amdgpu_device *adev = handle;
178         struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
179
180         if (hwmgr && hwmgr->pm_en) {
181                 mutex_lock(&hwmgr->smu_lock);
182                 hwmgr_handle_task(hwmgr,
183                                         AMD_PP_TASK_COMPLETE_INIT, NULL);
184                 mutex_unlock(&hwmgr->smu_lock);
185         }
186         if (adev->pm.smu_prv_buffer_size != 0)
187                 pp_reserve_vram_for_smu(adev);
188
189         return 0;
190 }
191
192 static void pp_late_fini(void *handle)
193 {
194         struct amdgpu_device *adev = handle;
195
196         if (adev->pm.smu_prv_buffer)
197                 amdgpu_bo_free_kernel(&adev->pm.smu_prv_buffer, NULL, NULL);
198         amd_powerplay_destroy(adev);
199 }
200
201
202 static bool pp_is_idle(void *handle)
203 {
204         return false;
205 }
206
207 static int pp_wait_for_idle(void *handle)
208 {
209         return 0;
210 }
211
212 static int pp_sw_reset(void *handle)
213 {
214         return 0;
215 }
216
217 static int pp_set_powergating_state(void *handle,
218                                     enum amd_powergating_state state)
219 {
220         return 0;
221 }
222
223 static int pp_suspend(void *handle)
224 {
225         struct amdgpu_device *adev = handle;
226         struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
227
228         return hwmgr_suspend(hwmgr);
229 }
230
231 static int pp_resume(void *handle)
232 {
233         struct amdgpu_device *adev = handle;
234         struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
235
236         return hwmgr_resume(hwmgr);
237 }
238
239 static int pp_set_clockgating_state(void *handle,
240                                           enum amd_clockgating_state state)
241 {
242         return 0;
243 }
244
245 static const struct amd_ip_funcs pp_ip_funcs = {
246         .name = "powerplay",
247         .early_init = pp_early_init,
248         .late_init = pp_late_init,
249         .sw_init = pp_sw_init,
250         .sw_fini = pp_sw_fini,
251         .hw_init = pp_hw_init,
252         .hw_fini = pp_hw_fini,
253         .late_fini = pp_late_fini,
254         .suspend = pp_suspend,
255         .resume = pp_resume,
256         .is_idle = pp_is_idle,
257         .wait_for_idle = pp_wait_for_idle,
258         .soft_reset = pp_sw_reset,
259         .set_clockgating_state = pp_set_clockgating_state,
260         .set_powergating_state = pp_set_powergating_state,
261 };
262
263 const struct amdgpu_ip_block_version pp_smu_ip_block =
264 {
265         .type = AMD_IP_BLOCK_TYPE_SMC,
266         .major = 1,
267         .minor = 0,
268         .rev = 0,
269         .funcs = &pp_ip_funcs,
270 };
271
272 /* This interface only be supported On Vi,
273  * because only smu7/8 can help to load gfx/sdma fw,
274  * smu need to be enabled before load other ip's fw.
275  * so call start smu to load smu7 fw and other ip's fw
276  */
277 static int pp_dpm_load_fw(void *handle)
278 {
279         struct pp_hwmgr *hwmgr = handle;
280
281         if (!hwmgr || !hwmgr->smumgr_funcs || !hwmgr->smumgr_funcs->start_smu)
282                 return -EINVAL;
283
284         if (hwmgr->smumgr_funcs->start_smu(hwmgr)) {
285                 pr_err("fw load failed\n");
286                 return -EINVAL;
287         }
288
289         return 0;
290 }
291
292 static int pp_dpm_fw_loading_complete(void *handle)
293 {
294         return 0;
295 }
296
297 static int pp_set_clockgating_by_smu(void *handle, uint32_t msg_id)
298 {
299         struct pp_hwmgr *hwmgr = handle;
300
301         if (!hwmgr || !hwmgr->pm_en)
302                 return -EINVAL;
303
304         if (hwmgr->hwmgr_func->update_clock_gatings == NULL) {
305                 pr_info_ratelimited("%s was not implemented.\n", __func__);
306                 return 0;
307         }
308
309         return hwmgr->hwmgr_func->update_clock_gatings(hwmgr, &msg_id);
310 }
311
312 static void pp_dpm_en_umd_pstate(struct pp_hwmgr  *hwmgr,
313                                                 enum amd_dpm_forced_level *level)
314 {
315         uint32_t profile_mode_mask = AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD |
316                                         AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK |
317                                         AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK |
318                                         AMD_DPM_FORCED_LEVEL_PROFILE_PEAK;
319
320         if (!(hwmgr->dpm_level & profile_mode_mask)) {
321                 /* enter umd pstate, save current level, disable gfx cg*/
322                 if (*level & profile_mode_mask) {
323                         hwmgr->saved_dpm_level = hwmgr->dpm_level;
324                         hwmgr->en_umd_pstate = true;
325                         amdgpu_device_ip_set_powergating_state(hwmgr->adev,
326                                         AMD_IP_BLOCK_TYPE_GFX,
327                                         AMD_PG_STATE_UNGATE);
328                         amdgpu_device_ip_set_clockgating_state(hwmgr->adev,
329                                                 AMD_IP_BLOCK_TYPE_GFX,
330                                                 AMD_CG_STATE_UNGATE);
331                 }
332         } else {
333                 /* exit umd pstate, restore level, enable gfx cg*/
334                 if (!(*level & profile_mode_mask)) {
335                         if (*level == AMD_DPM_FORCED_LEVEL_PROFILE_EXIT)
336                                 *level = hwmgr->saved_dpm_level;
337                         hwmgr->en_umd_pstate = false;
338                         amdgpu_device_ip_set_clockgating_state(hwmgr->adev,
339                                         AMD_IP_BLOCK_TYPE_GFX,
340                                         AMD_CG_STATE_GATE);
341                         amdgpu_device_ip_set_powergating_state(hwmgr->adev,
342                                         AMD_IP_BLOCK_TYPE_GFX,
343                                         AMD_PG_STATE_GATE);
344                 }
345         }
346 }
347
348 static int pp_dpm_force_performance_level(void *handle,
349                                         enum amd_dpm_forced_level level)
350 {
351         struct pp_hwmgr *hwmgr = handle;
352
353         if (!hwmgr || !hwmgr->pm_en)
354                 return -EINVAL;
355
356         if (level == hwmgr->dpm_level)
357                 return 0;
358
359         mutex_lock(&hwmgr->smu_lock);
360         pp_dpm_en_umd_pstate(hwmgr, &level);
361         hwmgr->request_dpm_level = level;
362         hwmgr_handle_task(hwmgr, AMD_PP_TASK_READJUST_POWER_STATE, NULL);
363         mutex_unlock(&hwmgr->smu_lock);
364
365         return 0;
366 }
367
368 static enum amd_dpm_forced_level pp_dpm_get_performance_level(
369                                                                 void *handle)
370 {
371         struct pp_hwmgr *hwmgr = handle;
372         enum amd_dpm_forced_level level;
373
374         if (!hwmgr || !hwmgr->pm_en)
375                 return -EINVAL;
376
377         mutex_lock(&hwmgr->smu_lock);
378         level = hwmgr->dpm_level;
379         mutex_unlock(&hwmgr->smu_lock);
380         return level;
381 }
382
383 static uint32_t pp_dpm_get_sclk(void *handle, bool low)
384 {
385         struct pp_hwmgr *hwmgr = handle;
386         uint32_t clk = 0;
387
388         if (!hwmgr || !hwmgr->pm_en)
389                 return 0;
390
391         if (hwmgr->hwmgr_func->get_sclk == NULL) {
392                 pr_info_ratelimited("%s was not implemented.\n", __func__);
393                 return 0;
394         }
395         mutex_lock(&hwmgr->smu_lock);
396         clk = hwmgr->hwmgr_func->get_sclk(hwmgr, low);
397         mutex_unlock(&hwmgr->smu_lock);
398         return clk;
399 }
400
401 static uint32_t pp_dpm_get_mclk(void *handle, bool low)
402 {
403         struct pp_hwmgr *hwmgr = handle;
404         uint32_t clk = 0;
405
406         if (!hwmgr || !hwmgr->pm_en)
407                 return 0;
408
409         if (hwmgr->hwmgr_func->get_mclk == NULL) {
410                 pr_info_ratelimited("%s was not implemented.\n", __func__);
411                 return 0;
412         }
413         mutex_lock(&hwmgr->smu_lock);
414         clk = hwmgr->hwmgr_func->get_mclk(hwmgr, low);
415         mutex_unlock(&hwmgr->smu_lock);
416         return clk;
417 }
418
419 static void pp_dpm_powergate_vce(void *handle, bool gate)
420 {
421         struct pp_hwmgr *hwmgr = handle;
422
423         if (!hwmgr || !hwmgr->pm_en)
424                 return;
425
426         if (hwmgr->hwmgr_func->powergate_vce == NULL) {
427                 pr_info_ratelimited("%s was not implemented.\n", __func__);
428                 return;
429         }
430         mutex_lock(&hwmgr->smu_lock);
431         hwmgr->hwmgr_func->powergate_vce(hwmgr, gate);
432         mutex_unlock(&hwmgr->smu_lock);
433 }
434
435 static void pp_dpm_powergate_uvd(void *handle, bool gate)
436 {
437         struct pp_hwmgr *hwmgr = handle;
438
439         if (!hwmgr || !hwmgr->pm_en)
440                 return;
441
442         if (hwmgr->hwmgr_func->powergate_uvd == NULL) {
443                 pr_info_ratelimited("%s was not implemented.\n", __func__);
444                 return;
445         }
446         mutex_lock(&hwmgr->smu_lock);
447         hwmgr->hwmgr_func->powergate_uvd(hwmgr, gate);
448         mutex_unlock(&hwmgr->smu_lock);
449 }
450
451 static int pp_dpm_dispatch_tasks(void *handle, enum amd_pp_task task_id,
452                 enum amd_pm_state_type *user_state)
453 {
454         int ret = 0;
455         struct pp_hwmgr *hwmgr = handle;
456
457         if (!hwmgr || !hwmgr->pm_en)
458                 return -EINVAL;
459
460         mutex_lock(&hwmgr->smu_lock);
461         ret = hwmgr_handle_task(hwmgr, task_id, user_state);
462         mutex_unlock(&hwmgr->smu_lock);
463
464         return ret;
465 }
466
467 static enum amd_pm_state_type pp_dpm_get_current_power_state(void *handle)
468 {
469         struct pp_hwmgr *hwmgr = handle;
470         struct pp_power_state *state;
471         enum amd_pm_state_type pm_type;
472
473         if (!hwmgr || !hwmgr->pm_en || !hwmgr->current_ps)
474                 return -EINVAL;
475
476         mutex_lock(&hwmgr->smu_lock);
477
478         state = hwmgr->current_ps;
479
480         switch (state->classification.ui_label) {
481         case PP_StateUILabel_Battery:
482                 pm_type = POWER_STATE_TYPE_BATTERY;
483                 break;
484         case PP_StateUILabel_Balanced:
485                 pm_type = POWER_STATE_TYPE_BALANCED;
486                 break;
487         case PP_StateUILabel_Performance:
488                 pm_type = POWER_STATE_TYPE_PERFORMANCE;
489                 break;
490         default:
491                 if (state->classification.flags & PP_StateClassificationFlag_Boot)
492                         pm_type = POWER_STATE_TYPE_INTERNAL_BOOT;
493                 else
494                         pm_type = POWER_STATE_TYPE_DEFAULT;
495                 break;
496         }
497         mutex_unlock(&hwmgr->smu_lock);
498
499         return pm_type;
500 }
501
502 static void pp_dpm_set_fan_control_mode(void *handle, uint32_t mode)
503 {
504         struct pp_hwmgr *hwmgr = handle;
505
506         if (!hwmgr || !hwmgr->pm_en)
507                 return;
508
509         if (hwmgr->hwmgr_func->set_fan_control_mode == NULL) {
510                 pr_info_ratelimited("%s was not implemented.\n", __func__);
511                 return;
512         }
513         mutex_lock(&hwmgr->smu_lock);
514         hwmgr->hwmgr_func->set_fan_control_mode(hwmgr, mode);
515         mutex_unlock(&hwmgr->smu_lock);
516 }
517
518 static uint32_t pp_dpm_get_fan_control_mode(void *handle)
519 {
520         struct pp_hwmgr *hwmgr = handle;
521         uint32_t mode = 0;
522
523         if (!hwmgr || !hwmgr->pm_en)
524                 return 0;
525
526         if (hwmgr->hwmgr_func->get_fan_control_mode == NULL) {
527                 pr_info_ratelimited("%s was not implemented.\n", __func__);
528                 return 0;
529         }
530         mutex_lock(&hwmgr->smu_lock);
531         mode = hwmgr->hwmgr_func->get_fan_control_mode(hwmgr);
532         mutex_unlock(&hwmgr->smu_lock);
533         return mode;
534 }
535
536 static int pp_dpm_set_fan_speed_pwm(void *handle, uint32_t speed)
537 {
538         struct pp_hwmgr *hwmgr = handle;
539         int ret = 0;
540
541         if (!hwmgr || !hwmgr->pm_en)
542                 return -EINVAL;
543
544         if (hwmgr->hwmgr_func->set_fan_speed_pwm == NULL) {
545                 pr_info_ratelimited("%s was not implemented.\n", __func__);
546                 return 0;
547         }
548         mutex_lock(&hwmgr->smu_lock);
549         ret = hwmgr->hwmgr_func->set_fan_speed_pwm(hwmgr, speed);
550         mutex_unlock(&hwmgr->smu_lock);
551         return ret;
552 }
553
554 static int pp_dpm_get_fan_speed_pwm(void *handle, uint32_t *speed)
555 {
556         struct pp_hwmgr *hwmgr = handle;
557         int ret = 0;
558
559         if (!hwmgr || !hwmgr->pm_en)
560                 return -EINVAL;
561
562         if (hwmgr->hwmgr_func->get_fan_speed_pwm == NULL) {
563                 pr_info_ratelimited("%s was not implemented.\n", __func__);
564                 return 0;
565         }
566
567         mutex_lock(&hwmgr->smu_lock);
568         ret = hwmgr->hwmgr_func->get_fan_speed_pwm(hwmgr, speed);
569         mutex_unlock(&hwmgr->smu_lock);
570         return ret;
571 }
572
573 static int pp_dpm_get_fan_speed_rpm(void *handle, uint32_t *rpm)
574 {
575         struct pp_hwmgr *hwmgr = handle;
576         int ret = 0;
577
578         if (!hwmgr || !hwmgr->pm_en)
579                 return -EINVAL;
580
581         if (hwmgr->hwmgr_func->get_fan_speed_rpm == NULL)
582                 return -EINVAL;
583
584         mutex_lock(&hwmgr->smu_lock);
585         ret = hwmgr->hwmgr_func->get_fan_speed_rpm(hwmgr, rpm);
586         mutex_unlock(&hwmgr->smu_lock);
587         return ret;
588 }
589
590 static int pp_dpm_set_fan_speed_rpm(void *handle, uint32_t rpm)
591 {
592         struct pp_hwmgr *hwmgr = handle;
593         int ret = 0;
594
595         if (!hwmgr || !hwmgr->pm_en)
596                 return -EINVAL;
597
598         if (hwmgr->hwmgr_func->set_fan_speed_rpm == NULL) {
599                 pr_info_ratelimited("%s was not implemented.\n", __func__);
600                 return 0;
601         }
602         mutex_lock(&hwmgr->smu_lock);
603         ret = hwmgr->hwmgr_func->set_fan_speed_rpm(hwmgr, rpm);
604         mutex_unlock(&hwmgr->smu_lock);
605         return ret;
606 }
607
608 static int pp_dpm_get_pp_num_states(void *handle,
609                 struct pp_states_info *data)
610 {
611         struct pp_hwmgr *hwmgr = handle;
612         int i;
613
614         memset(data, 0, sizeof(*data));
615
616         if (!hwmgr || !hwmgr->pm_en ||!hwmgr->ps)
617                 return -EINVAL;
618
619         mutex_lock(&hwmgr->smu_lock);
620
621         data->nums = hwmgr->num_ps;
622
623         for (i = 0; i < hwmgr->num_ps; i++) {
624                 struct pp_power_state *state = (struct pp_power_state *)
625                                 ((unsigned long)hwmgr->ps + i * hwmgr->ps_size);
626                 switch (state->classification.ui_label) {
627                 case PP_StateUILabel_Battery:
628                         data->states[i] = POWER_STATE_TYPE_BATTERY;
629                         break;
630                 case PP_StateUILabel_Balanced:
631                         data->states[i] = POWER_STATE_TYPE_BALANCED;
632                         break;
633                 case PP_StateUILabel_Performance:
634                         data->states[i] = POWER_STATE_TYPE_PERFORMANCE;
635                         break;
636                 default:
637                         if (state->classification.flags & PP_StateClassificationFlag_Boot)
638                                 data->states[i] = POWER_STATE_TYPE_INTERNAL_BOOT;
639                         else
640                                 data->states[i] = POWER_STATE_TYPE_DEFAULT;
641                 }
642         }
643         mutex_unlock(&hwmgr->smu_lock);
644         return 0;
645 }
646
647 static int pp_dpm_get_pp_table(void *handle, char **table)
648 {
649         struct pp_hwmgr *hwmgr = handle;
650         int size = 0;
651
652         if (!hwmgr || !hwmgr->pm_en ||!hwmgr->soft_pp_table)
653                 return -EINVAL;
654
655         mutex_lock(&hwmgr->smu_lock);
656         *table = (char *)hwmgr->soft_pp_table;
657         size = hwmgr->soft_pp_table_size;
658         mutex_unlock(&hwmgr->smu_lock);
659         return size;
660 }
661
662 static int amd_powerplay_reset(void *handle)
663 {
664         struct pp_hwmgr *hwmgr = handle;
665         int ret;
666
667         ret = hwmgr_hw_fini(hwmgr);
668         if (ret)
669                 return ret;
670
671         ret = hwmgr_hw_init(hwmgr);
672         if (ret)
673                 return ret;
674
675         return hwmgr_handle_task(hwmgr, AMD_PP_TASK_COMPLETE_INIT, NULL);
676 }
677
678 static int pp_dpm_set_pp_table(void *handle, const char *buf, size_t size)
679 {
680         struct pp_hwmgr *hwmgr = handle;
681         int ret = -ENOMEM;
682
683         if (!hwmgr || !hwmgr->pm_en)
684                 return -EINVAL;
685
686         mutex_lock(&hwmgr->smu_lock);
687         if (!hwmgr->hardcode_pp_table) {
688                 hwmgr->hardcode_pp_table = kmemdup(hwmgr->soft_pp_table,
689                                                    hwmgr->soft_pp_table_size,
690                                                    GFP_KERNEL);
691                 if (!hwmgr->hardcode_pp_table)
692                         goto err;
693         }
694
695         memcpy(hwmgr->hardcode_pp_table, buf, size);
696
697         hwmgr->soft_pp_table = hwmgr->hardcode_pp_table;
698
699         ret = amd_powerplay_reset(handle);
700         if (ret)
701                 goto err;
702
703         if (hwmgr->hwmgr_func->avfs_control) {
704                 ret = hwmgr->hwmgr_func->avfs_control(hwmgr, false);
705                 if (ret)
706                         goto err;
707         }
708         mutex_unlock(&hwmgr->smu_lock);
709         return 0;
710 err:
711         mutex_unlock(&hwmgr->smu_lock);
712         return ret;
713 }
714
715 static int pp_dpm_force_clock_level(void *handle,
716                 enum pp_clock_type type, uint32_t mask)
717 {
718         struct pp_hwmgr *hwmgr = handle;
719         int ret = 0;
720
721         if (!hwmgr || !hwmgr->pm_en)
722                 return -EINVAL;
723
724         if (hwmgr->hwmgr_func->force_clock_level == NULL) {
725                 pr_info_ratelimited("%s was not implemented.\n", __func__);
726                 return 0;
727         }
728
729         if (hwmgr->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL) {
730                 pr_debug("force clock level is for dpm manual mode only.\n");
731                 return -EINVAL;
732         }
733
734         mutex_lock(&hwmgr->smu_lock);
735         ret = hwmgr->hwmgr_func->force_clock_level(hwmgr, type, mask);
736         mutex_unlock(&hwmgr->smu_lock);
737         return ret;
738 }
739
740 static int pp_dpm_print_clock_levels(void *handle,
741                 enum pp_clock_type type, char *buf)
742 {
743         struct pp_hwmgr *hwmgr = handle;
744         int ret = 0;
745
746         if (!hwmgr || !hwmgr->pm_en)
747                 return -EINVAL;
748
749         if (hwmgr->hwmgr_func->print_clock_levels == NULL) {
750                 pr_info_ratelimited("%s was not implemented.\n", __func__);
751                 return 0;
752         }
753         mutex_lock(&hwmgr->smu_lock);
754         ret = hwmgr->hwmgr_func->print_clock_levels(hwmgr, type, buf);
755         mutex_unlock(&hwmgr->smu_lock);
756         return ret;
757 }
758
759 static int pp_dpm_get_sclk_od(void *handle)
760 {
761         struct pp_hwmgr *hwmgr = handle;
762         int ret = 0;
763
764         if (!hwmgr || !hwmgr->pm_en)
765                 return -EINVAL;
766
767         if (hwmgr->hwmgr_func->get_sclk_od == NULL) {
768                 pr_info_ratelimited("%s was not implemented.\n", __func__);
769                 return 0;
770         }
771         mutex_lock(&hwmgr->smu_lock);
772         ret = hwmgr->hwmgr_func->get_sclk_od(hwmgr);
773         mutex_unlock(&hwmgr->smu_lock);
774         return ret;
775 }
776
777 static int pp_dpm_set_sclk_od(void *handle, uint32_t value)
778 {
779         struct pp_hwmgr *hwmgr = handle;
780         int ret = 0;
781
782         if (!hwmgr || !hwmgr->pm_en)
783                 return -EINVAL;
784
785         if (hwmgr->hwmgr_func->set_sclk_od == NULL) {
786                 pr_info_ratelimited("%s was not implemented.\n", __func__);
787                 return 0;
788         }
789
790         mutex_lock(&hwmgr->smu_lock);
791         ret = hwmgr->hwmgr_func->set_sclk_od(hwmgr, value);
792         mutex_unlock(&hwmgr->smu_lock);
793         return ret;
794 }
795
796 static int pp_dpm_get_mclk_od(void *handle)
797 {
798         struct pp_hwmgr *hwmgr = handle;
799         int ret = 0;
800
801         if (!hwmgr || !hwmgr->pm_en)
802                 return -EINVAL;
803
804         if (hwmgr->hwmgr_func->get_mclk_od == NULL) {
805                 pr_info_ratelimited("%s was not implemented.\n", __func__);
806                 return 0;
807         }
808         mutex_lock(&hwmgr->smu_lock);
809         ret = hwmgr->hwmgr_func->get_mclk_od(hwmgr);
810         mutex_unlock(&hwmgr->smu_lock);
811         return ret;
812 }
813
814 static int pp_dpm_set_mclk_od(void *handle, uint32_t value)
815 {
816         struct pp_hwmgr *hwmgr = handle;
817         int ret = 0;
818
819         if (!hwmgr || !hwmgr->pm_en)
820                 return -EINVAL;
821
822         if (hwmgr->hwmgr_func->set_mclk_od == NULL) {
823                 pr_info_ratelimited("%s was not implemented.\n", __func__);
824                 return 0;
825         }
826         mutex_lock(&hwmgr->smu_lock);
827         ret = hwmgr->hwmgr_func->set_mclk_od(hwmgr, value);
828         mutex_unlock(&hwmgr->smu_lock);
829         return ret;
830 }
831
832 static int pp_dpm_read_sensor(void *handle, int idx,
833                               void *value, int *size)
834 {
835         struct pp_hwmgr *hwmgr = handle;
836         int ret = 0;
837
838         if (!hwmgr || !hwmgr->pm_en || !value)
839                 return -EINVAL;
840
841         switch (idx) {
842         case AMDGPU_PP_SENSOR_STABLE_PSTATE_SCLK:
843                 *((uint32_t *)value) = hwmgr->pstate_sclk;
844                 return 0;
845         case AMDGPU_PP_SENSOR_STABLE_PSTATE_MCLK:
846                 *((uint32_t *)value) = hwmgr->pstate_mclk;
847                 return 0;
848         case AMDGPU_PP_SENSOR_MIN_FAN_RPM:
849                 *((uint32_t *)value) = hwmgr->thermal_controller.fanInfo.ulMinRPM;
850                 return 0;
851         case AMDGPU_PP_SENSOR_MAX_FAN_RPM:
852                 *((uint32_t *)value) = hwmgr->thermal_controller.fanInfo.ulMaxRPM;
853                 return 0;
854         default:
855                 mutex_lock(&hwmgr->smu_lock);
856                 ret = hwmgr->hwmgr_func->read_sensor(hwmgr, idx, value, size);
857                 mutex_unlock(&hwmgr->smu_lock);
858                 return ret;
859         }
860 }
861
862 static struct amd_vce_state*
863 pp_dpm_get_vce_clock_state(void *handle, unsigned idx)
864 {
865         struct pp_hwmgr *hwmgr = handle;
866
867         if (!hwmgr || !hwmgr->pm_en)
868                 return NULL;
869
870         if (idx < hwmgr->num_vce_state_tables)
871                 return &hwmgr->vce_states[idx];
872         return NULL;
873 }
874
875 static int pp_get_power_profile_mode(void *handle, char *buf)
876 {
877         struct pp_hwmgr *hwmgr = handle;
878         int ret;
879
880         if (!hwmgr || !hwmgr->pm_en || !buf)
881                 return -EINVAL;
882
883         if (hwmgr->hwmgr_func->get_power_profile_mode == NULL) {
884                 pr_info_ratelimited("%s was not implemented.\n", __func__);
885                 return snprintf(buf, PAGE_SIZE, "\n");
886         }
887
888         mutex_lock(&hwmgr->smu_lock);
889         ret = hwmgr->hwmgr_func->get_power_profile_mode(hwmgr, buf);
890         mutex_unlock(&hwmgr->smu_lock);
891         return ret;
892 }
893
894 static int pp_set_power_profile_mode(void *handle, long *input, uint32_t size)
895 {
896         struct pp_hwmgr *hwmgr = handle;
897         int ret = -EINVAL;
898
899         if (!hwmgr || !hwmgr->pm_en)
900                 return ret;
901
902         if (hwmgr->hwmgr_func->set_power_profile_mode == NULL) {
903                 pr_info_ratelimited("%s was not implemented.\n", __func__);
904                 return ret;
905         }
906
907         if (hwmgr->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL) {
908                 pr_debug("power profile setting is for manual dpm mode only.\n");
909                 return ret;
910         }
911
912         mutex_lock(&hwmgr->smu_lock);
913         ret = hwmgr->hwmgr_func->set_power_profile_mode(hwmgr, input, size);
914         mutex_unlock(&hwmgr->smu_lock);
915         return ret;
916 }
917
918 static int pp_set_fine_grain_clk_vol(void *handle, uint32_t type, long *input, uint32_t size)
919 {
920         struct pp_hwmgr *hwmgr = handle;
921
922         if (!hwmgr || !hwmgr->pm_en)
923                 return -EINVAL;
924
925         if (hwmgr->hwmgr_func->set_fine_grain_clk_vol == NULL)
926                 return 0;
927
928         return hwmgr->hwmgr_func->set_fine_grain_clk_vol(hwmgr, type, input, size);
929 }
930
931 static int pp_odn_edit_dpm_table(void *handle, uint32_t type, long *input, uint32_t size)
932 {
933         struct pp_hwmgr *hwmgr = handle;
934
935         if (!hwmgr || !hwmgr->pm_en)
936                 return -EINVAL;
937
938         if (hwmgr->hwmgr_func->odn_edit_dpm_table == NULL) {
939                 pr_info_ratelimited("%s was not implemented.\n", __func__);
940                 return 0;
941         }
942
943         return hwmgr->hwmgr_func->odn_edit_dpm_table(hwmgr, type, input, size);
944 }
945
946 static int pp_dpm_set_mp1_state(void *handle, enum pp_mp1_state mp1_state)
947 {
948         struct pp_hwmgr *hwmgr = handle;
949
950         if (!hwmgr)
951                 return -EINVAL;
952
953         if (!hwmgr->pm_en)
954                 return 0;
955
956         if (hwmgr->hwmgr_func->set_mp1_state)
957                 return hwmgr->hwmgr_func->set_mp1_state(hwmgr, mp1_state);
958
959         return 0;
960 }
961
962 static int pp_dpm_switch_power_profile(void *handle,
963                 enum PP_SMC_POWER_PROFILE type, bool en)
964 {
965         struct pp_hwmgr *hwmgr = handle;
966         long workload;
967         uint32_t index;
968
969         if (!hwmgr || !hwmgr->pm_en)
970                 return -EINVAL;
971
972         if (hwmgr->hwmgr_func->set_power_profile_mode == NULL) {
973                 pr_info_ratelimited("%s was not implemented.\n", __func__);
974                 return -EINVAL;
975         }
976
977         if (!(type < PP_SMC_POWER_PROFILE_CUSTOM))
978                 return -EINVAL;
979
980         mutex_lock(&hwmgr->smu_lock);
981
982         if (!en) {
983                 hwmgr->workload_mask &= ~(1 << hwmgr->workload_prority[type]);
984                 index = fls(hwmgr->workload_mask);
985                 index = index > 0 && index <= Workload_Policy_Max ? index - 1 : 0;
986                 workload = hwmgr->workload_setting[index];
987         } else {
988                 hwmgr->workload_mask |= (1 << hwmgr->workload_prority[type]);
989                 index = fls(hwmgr->workload_mask);
990                 index = index <= Workload_Policy_Max ? index - 1 : 0;
991                 workload = hwmgr->workload_setting[index];
992         }
993
994         if (type == PP_SMC_POWER_PROFILE_COMPUTE &&
995                 hwmgr->hwmgr_func->disable_power_features_for_compute_performance) {
996                         if (hwmgr->hwmgr_func->disable_power_features_for_compute_performance(hwmgr, en)) {
997                                 mutex_unlock(&hwmgr->smu_lock);
998                                 return -EINVAL;
999                         }
1000         }
1001
1002         if (hwmgr->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL)
1003                 hwmgr->hwmgr_func->set_power_profile_mode(hwmgr, &workload, 0);
1004         mutex_unlock(&hwmgr->smu_lock);
1005
1006         return 0;
1007 }
1008
1009 static int pp_set_power_limit(void *handle, uint32_t limit)
1010 {
1011         struct pp_hwmgr *hwmgr = handle;
1012         uint32_t max_power_limit;
1013
1014         if (!hwmgr || !hwmgr->pm_en)
1015                 return -EINVAL;
1016
1017         if (hwmgr->hwmgr_func->set_power_limit == NULL) {
1018                 pr_info_ratelimited("%s was not implemented.\n", __func__);
1019                 return -EINVAL;
1020         }
1021
1022         if (limit == 0)
1023                 limit = hwmgr->default_power_limit;
1024
1025         max_power_limit = hwmgr->default_power_limit;
1026         if (hwmgr->od_enabled) {
1027                 max_power_limit *= (100 + hwmgr->platform_descriptor.TDPODLimit);
1028                 max_power_limit /= 100;
1029         }
1030
1031         if (limit > max_power_limit)
1032                 return -EINVAL;
1033
1034         mutex_lock(&hwmgr->smu_lock);
1035         hwmgr->hwmgr_func->set_power_limit(hwmgr, limit);
1036         hwmgr->power_limit = limit;
1037         mutex_unlock(&hwmgr->smu_lock);
1038         return 0;
1039 }
1040
1041 static int pp_get_power_limit(void *handle, uint32_t *limit,
1042                               enum pp_power_limit_level pp_limit_level,
1043                               enum pp_power_type power_type)
1044 {
1045         struct pp_hwmgr *hwmgr = handle;
1046         int ret = 0;
1047
1048         if (!hwmgr || !hwmgr->pm_en ||!limit)
1049                 return -EINVAL;
1050
1051         if (power_type != PP_PWR_TYPE_SUSTAINED)
1052                 return -EOPNOTSUPP;
1053
1054         mutex_lock(&hwmgr->smu_lock);
1055
1056         switch (pp_limit_level) {
1057                 case PP_PWR_LIMIT_CURRENT:
1058                         *limit = hwmgr->power_limit;
1059                         break;
1060                 case PP_PWR_LIMIT_DEFAULT:
1061                         *limit = hwmgr->default_power_limit;
1062                         break;
1063                 case PP_PWR_LIMIT_MAX:
1064                         *limit = hwmgr->default_power_limit;
1065                         if (hwmgr->od_enabled) {
1066                                 *limit *= (100 + hwmgr->platform_descriptor.TDPODLimit);
1067                                 *limit /= 100;
1068                         }
1069                         break;
1070                 default:
1071                         ret = -EOPNOTSUPP;
1072                         break;
1073         }
1074
1075         mutex_unlock(&hwmgr->smu_lock);
1076
1077         return ret;
1078 }
1079
1080 static int pp_display_configuration_change(void *handle,
1081         const struct amd_pp_display_configuration *display_config)
1082 {
1083         struct pp_hwmgr *hwmgr = handle;
1084
1085         if (!hwmgr || !hwmgr->pm_en)
1086                 return -EINVAL;
1087
1088         mutex_lock(&hwmgr->smu_lock);
1089         phm_store_dal_configuration_data(hwmgr, display_config);
1090         mutex_unlock(&hwmgr->smu_lock);
1091         return 0;
1092 }
1093
1094 static int pp_get_display_power_level(void *handle,
1095                 struct amd_pp_simple_clock_info *output)
1096 {
1097         struct pp_hwmgr *hwmgr = handle;
1098         int ret = 0;
1099
1100         if (!hwmgr || !hwmgr->pm_en ||!output)
1101                 return -EINVAL;
1102
1103         mutex_lock(&hwmgr->smu_lock);
1104         ret = phm_get_dal_power_level(hwmgr, output);
1105         mutex_unlock(&hwmgr->smu_lock);
1106         return ret;
1107 }
1108
1109 static int pp_get_current_clocks(void *handle,
1110                 struct amd_pp_clock_info *clocks)
1111 {
1112         struct amd_pp_simple_clock_info simple_clocks = { 0 };
1113         struct pp_clock_info hw_clocks;
1114         struct pp_hwmgr *hwmgr = handle;
1115         int ret = 0;
1116
1117         if (!hwmgr || !hwmgr->pm_en)
1118                 return -EINVAL;
1119
1120         mutex_lock(&hwmgr->smu_lock);
1121
1122         phm_get_dal_power_level(hwmgr, &simple_clocks);
1123
1124         if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
1125                                         PHM_PlatformCaps_PowerContainment))
1126                 ret = phm_get_clock_info(hwmgr, &hwmgr->current_ps->hardware,
1127                                         &hw_clocks, PHM_PerformanceLevelDesignation_PowerContainment);
1128         else
1129                 ret = phm_get_clock_info(hwmgr, &hwmgr->current_ps->hardware,
1130                                         &hw_clocks, PHM_PerformanceLevelDesignation_Activity);
1131
1132         if (ret) {
1133                 pr_debug("Error in phm_get_clock_info \n");
1134                 mutex_unlock(&hwmgr->smu_lock);
1135                 return -EINVAL;
1136         }
1137
1138         clocks->min_engine_clock = hw_clocks.min_eng_clk;
1139         clocks->max_engine_clock = hw_clocks.max_eng_clk;
1140         clocks->min_memory_clock = hw_clocks.min_mem_clk;
1141         clocks->max_memory_clock = hw_clocks.max_mem_clk;
1142         clocks->min_bus_bandwidth = hw_clocks.min_bus_bandwidth;
1143         clocks->max_bus_bandwidth = hw_clocks.max_bus_bandwidth;
1144
1145         clocks->max_engine_clock_in_sr = hw_clocks.max_eng_clk;
1146         clocks->min_engine_clock_in_sr = hw_clocks.min_eng_clk;
1147
1148         if (simple_clocks.level == 0)
1149                 clocks->max_clocks_state = PP_DAL_POWERLEVEL_7;
1150         else
1151                 clocks->max_clocks_state = simple_clocks.level;
1152
1153         if (0 == phm_get_current_shallow_sleep_clocks(hwmgr, &hwmgr->current_ps->hardware, &hw_clocks)) {
1154                 clocks->max_engine_clock_in_sr = hw_clocks.max_eng_clk;
1155                 clocks->min_engine_clock_in_sr = hw_clocks.min_eng_clk;
1156         }
1157         mutex_unlock(&hwmgr->smu_lock);
1158         return 0;
1159 }
1160
1161 static int pp_get_clock_by_type(void *handle, enum amd_pp_clock_type type, struct amd_pp_clocks *clocks)
1162 {
1163         struct pp_hwmgr *hwmgr = handle;
1164         int ret = 0;
1165
1166         if (!hwmgr || !hwmgr->pm_en)
1167                 return -EINVAL;
1168
1169         if (clocks == NULL)
1170                 return -EINVAL;
1171
1172         mutex_lock(&hwmgr->smu_lock);
1173         ret = phm_get_clock_by_type(hwmgr, type, clocks);
1174         mutex_unlock(&hwmgr->smu_lock);
1175         return ret;
1176 }
1177
1178 static int pp_get_clock_by_type_with_latency(void *handle,
1179                 enum amd_pp_clock_type type,
1180                 struct pp_clock_levels_with_latency *clocks)
1181 {
1182         struct pp_hwmgr *hwmgr = handle;
1183         int ret = 0;
1184
1185         if (!hwmgr || !hwmgr->pm_en ||!clocks)
1186                 return -EINVAL;
1187
1188         mutex_lock(&hwmgr->smu_lock);
1189         ret = phm_get_clock_by_type_with_latency(hwmgr, type, clocks);
1190         mutex_unlock(&hwmgr->smu_lock);
1191         return ret;
1192 }
1193
1194 static int pp_get_clock_by_type_with_voltage(void *handle,
1195                 enum amd_pp_clock_type type,
1196                 struct pp_clock_levels_with_voltage *clocks)
1197 {
1198         struct pp_hwmgr *hwmgr = handle;
1199         int ret = 0;
1200
1201         if (!hwmgr || !hwmgr->pm_en ||!clocks)
1202                 return -EINVAL;
1203
1204         mutex_lock(&hwmgr->smu_lock);
1205
1206         ret = phm_get_clock_by_type_with_voltage(hwmgr, type, clocks);
1207
1208         mutex_unlock(&hwmgr->smu_lock);
1209         return ret;
1210 }
1211
1212 static int pp_set_watermarks_for_clocks_ranges(void *handle,
1213                 void *clock_ranges)
1214 {
1215         struct pp_hwmgr *hwmgr = handle;
1216         int ret = 0;
1217
1218         if (!hwmgr || !hwmgr->pm_en || !clock_ranges)
1219                 return -EINVAL;
1220
1221         mutex_lock(&hwmgr->smu_lock);
1222         ret = phm_set_watermarks_for_clocks_ranges(hwmgr,
1223                         clock_ranges);
1224         mutex_unlock(&hwmgr->smu_lock);
1225
1226         return ret;
1227 }
1228
1229 static int pp_display_clock_voltage_request(void *handle,
1230                 struct pp_display_clock_request *clock)
1231 {
1232         struct pp_hwmgr *hwmgr = handle;
1233         int ret = 0;
1234
1235         if (!hwmgr || !hwmgr->pm_en ||!clock)
1236                 return -EINVAL;
1237
1238         mutex_lock(&hwmgr->smu_lock);
1239         ret = phm_display_clock_voltage_request(hwmgr, clock);
1240         mutex_unlock(&hwmgr->smu_lock);
1241
1242         return ret;
1243 }
1244
1245 static int pp_get_display_mode_validation_clocks(void *handle,
1246                 struct amd_pp_simple_clock_info *clocks)
1247 {
1248         struct pp_hwmgr *hwmgr = handle;
1249         int ret = 0;
1250
1251         if (!hwmgr || !hwmgr->pm_en ||!clocks)
1252                 return -EINVAL;
1253
1254         clocks->level = PP_DAL_POWERLEVEL_7;
1255
1256         mutex_lock(&hwmgr->smu_lock);
1257
1258         if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DynamicPatchPowerState))
1259                 ret = phm_get_max_high_clocks(hwmgr, clocks);
1260
1261         mutex_unlock(&hwmgr->smu_lock);
1262         return ret;
1263 }
1264
1265 static int pp_dpm_powergate_mmhub(void *handle)
1266 {
1267         struct pp_hwmgr *hwmgr = handle;
1268
1269         if (!hwmgr || !hwmgr->pm_en)
1270                 return -EINVAL;
1271
1272         if (hwmgr->hwmgr_func->powergate_mmhub == NULL) {
1273                 pr_info_ratelimited("%s was not implemented.\n", __func__);
1274                 return 0;
1275         }
1276
1277         return hwmgr->hwmgr_func->powergate_mmhub(hwmgr);
1278 }
1279
1280 static int pp_dpm_powergate_gfx(void *handle, bool gate)
1281 {
1282         struct pp_hwmgr *hwmgr = handle;
1283
1284         if (!hwmgr || !hwmgr->pm_en)
1285                 return 0;
1286
1287         if (hwmgr->hwmgr_func->powergate_gfx == NULL) {
1288                 pr_info_ratelimited("%s was not implemented.\n", __func__);
1289                 return 0;
1290         }
1291
1292         return hwmgr->hwmgr_func->powergate_gfx(hwmgr, gate);
1293 }
1294
1295 static void pp_dpm_powergate_acp(void *handle, bool gate)
1296 {
1297         struct pp_hwmgr *hwmgr = handle;
1298
1299         if (!hwmgr || !hwmgr->pm_en)
1300                 return;
1301
1302         if (hwmgr->hwmgr_func->powergate_acp == NULL) {
1303                 pr_info_ratelimited("%s was not implemented.\n", __func__);
1304                 return;
1305         }
1306
1307         hwmgr->hwmgr_func->powergate_acp(hwmgr, gate);
1308 }
1309
1310 static void pp_dpm_powergate_sdma(void *handle, bool gate)
1311 {
1312         struct pp_hwmgr *hwmgr = handle;
1313
1314         if (!hwmgr)
1315                 return;
1316
1317         if (hwmgr->hwmgr_func->powergate_sdma == NULL) {
1318                 pr_info_ratelimited("%s was not implemented.\n", __func__);
1319                 return;
1320         }
1321
1322         hwmgr->hwmgr_func->powergate_sdma(hwmgr, gate);
1323 }
1324
1325 static int pp_set_powergating_by_smu(void *handle,
1326                                 uint32_t block_type, bool gate)
1327 {
1328         int ret = 0;
1329
1330         switch (block_type) {
1331         case AMD_IP_BLOCK_TYPE_UVD:
1332         case AMD_IP_BLOCK_TYPE_VCN:
1333                 pp_dpm_powergate_uvd(handle, gate);
1334                 break;
1335         case AMD_IP_BLOCK_TYPE_VCE:
1336                 pp_dpm_powergate_vce(handle, gate);
1337                 break;
1338         case AMD_IP_BLOCK_TYPE_GMC:
1339                 pp_dpm_powergate_mmhub(handle);
1340                 break;
1341         case AMD_IP_BLOCK_TYPE_GFX:
1342                 ret = pp_dpm_powergate_gfx(handle, gate);
1343                 break;
1344         case AMD_IP_BLOCK_TYPE_ACP:
1345                 pp_dpm_powergate_acp(handle, gate);
1346                 break;
1347         case AMD_IP_BLOCK_TYPE_SDMA:
1348                 pp_dpm_powergate_sdma(handle, gate);
1349                 break;
1350         default:
1351                 break;
1352         }
1353         return ret;
1354 }
1355
1356 static int pp_notify_smu_enable_pwe(void *handle)
1357 {
1358         struct pp_hwmgr *hwmgr = handle;
1359
1360         if (!hwmgr || !hwmgr->pm_en)
1361                 return -EINVAL;
1362
1363         if (hwmgr->hwmgr_func->smus_notify_pwe == NULL) {
1364                 pr_info_ratelimited("%s was not implemented.\n", __func__);
1365                 return -EINVAL;
1366         }
1367
1368         mutex_lock(&hwmgr->smu_lock);
1369         hwmgr->hwmgr_func->smus_notify_pwe(hwmgr);
1370         mutex_unlock(&hwmgr->smu_lock);
1371
1372         return 0;
1373 }
1374
1375 static int pp_enable_mgpu_fan_boost(void *handle)
1376 {
1377         struct pp_hwmgr *hwmgr = handle;
1378
1379         if (!hwmgr)
1380                 return -EINVAL;
1381
1382         if (!hwmgr->pm_en ||
1383              hwmgr->hwmgr_func->enable_mgpu_fan_boost == NULL)
1384                 return 0;
1385
1386         mutex_lock(&hwmgr->smu_lock);
1387         hwmgr->hwmgr_func->enable_mgpu_fan_boost(hwmgr);
1388         mutex_unlock(&hwmgr->smu_lock);
1389
1390         return 0;
1391 }
1392
1393 static int pp_set_min_deep_sleep_dcefclk(void *handle, uint32_t clock)
1394 {
1395         struct pp_hwmgr *hwmgr = handle;
1396
1397         if (!hwmgr || !hwmgr->pm_en)
1398                 return -EINVAL;
1399
1400         if (hwmgr->hwmgr_func->set_min_deep_sleep_dcefclk == NULL) {
1401                 pr_debug("%s was not implemented.\n", __func__);
1402                 return -EINVAL;
1403         }
1404
1405         mutex_lock(&hwmgr->smu_lock);
1406         hwmgr->hwmgr_func->set_min_deep_sleep_dcefclk(hwmgr, clock);
1407         mutex_unlock(&hwmgr->smu_lock);
1408
1409         return 0;
1410 }
1411
1412 static int pp_set_hard_min_dcefclk_by_freq(void *handle, uint32_t clock)
1413 {
1414         struct pp_hwmgr *hwmgr = handle;
1415
1416         if (!hwmgr || !hwmgr->pm_en)
1417                 return -EINVAL;
1418
1419         if (hwmgr->hwmgr_func->set_hard_min_dcefclk_by_freq == NULL) {
1420                 pr_debug("%s was not implemented.\n", __func__);
1421                 return -EINVAL;
1422         }
1423
1424         mutex_lock(&hwmgr->smu_lock);
1425         hwmgr->hwmgr_func->set_hard_min_dcefclk_by_freq(hwmgr, clock);
1426         mutex_unlock(&hwmgr->smu_lock);
1427
1428         return 0;
1429 }
1430
1431 static int pp_set_hard_min_fclk_by_freq(void *handle, uint32_t clock)
1432 {
1433         struct pp_hwmgr *hwmgr = handle;
1434
1435         if (!hwmgr || !hwmgr->pm_en)
1436                 return -EINVAL;
1437
1438         if (hwmgr->hwmgr_func->set_hard_min_fclk_by_freq == NULL) {
1439                 pr_debug("%s was not implemented.\n", __func__);
1440                 return -EINVAL;
1441         }
1442
1443         mutex_lock(&hwmgr->smu_lock);
1444         hwmgr->hwmgr_func->set_hard_min_fclk_by_freq(hwmgr, clock);
1445         mutex_unlock(&hwmgr->smu_lock);
1446
1447         return 0;
1448 }
1449
1450 static int pp_set_active_display_count(void *handle, uint32_t count)
1451 {
1452         struct pp_hwmgr *hwmgr = handle;
1453         int ret = 0;
1454
1455         if (!hwmgr || !hwmgr->pm_en)
1456                 return -EINVAL;
1457
1458         mutex_lock(&hwmgr->smu_lock);
1459         ret = phm_set_active_display_count(hwmgr, count);
1460         mutex_unlock(&hwmgr->smu_lock);
1461
1462         return ret;
1463 }
1464
1465 static int pp_get_asic_baco_capability(void *handle, bool *cap)
1466 {
1467         struct pp_hwmgr *hwmgr = handle;
1468
1469         *cap = false;
1470         if (!hwmgr)
1471                 return -EINVAL;
1472
1473         if (!(hwmgr->not_vf && amdgpu_dpm) ||
1474                 !hwmgr->hwmgr_func->get_asic_baco_capability)
1475                 return 0;
1476
1477         mutex_lock(&hwmgr->smu_lock);
1478         hwmgr->hwmgr_func->get_asic_baco_capability(hwmgr, cap);
1479         mutex_unlock(&hwmgr->smu_lock);
1480
1481         return 0;
1482 }
1483
1484 static int pp_get_asic_baco_state(void *handle, int *state)
1485 {
1486         struct pp_hwmgr *hwmgr = handle;
1487
1488         if (!hwmgr)
1489                 return -EINVAL;
1490
1491         if (!hwmgr->pm_en || !hwmgr->hwmgr_func->get_asic_baco_state)
1492                 return 0;
1493
1494         mutex_lock(&hwmgr->smu_lock);
1495         hwmgr->hwmgr_func->get_asic_baco_state(hwmgr, (enum BACO_STATE *)state);
1496         mutex_unlock(&hwmgr->smu_lock);
1497
1498         return 0;
1499 }
1500
1501 static int pp_set_asic_baco_state(void *handle, int state)
1502 {
1503         struct pp_hwmgr *hwmgr = handle;
1504
1505         if (!hwmgr)
1506                 return -EINVAL;
1507
1508         if (!(hwmgr->not_vf && amdgpu_dpm) ||
1509                 !hwmgr->hwmgr_func->set_asic_baco_state)
1510                 return 0;
1511
1512         mutex_lock(&hwmgr->smu_lock);
1513         hwmgr->hwmgr_func->set_asic_baco_state(hwmgr, (enum BACO_STATE)state);
1514         mutex_unlock(&hwmgr->smu_lock);
1515
1516         return 0;
1517 }
1518
1519 static int pp_get_ppfeature_status(void *handle, char *buf)
1520 {
1521         struct pp_hwmgr *hwmgr = handle;
1522         int ret = 0;
1523
1524         if (!hwmgr || !hwmgr->pm_en || !buf)
1525                 return -EINVAL;
1526
1527         if (hwmgr->hwmgr_func->get_ppfeature_status == NULL) {
1528                 pr_info_ratelimited("%s was not implemented.\n", __func__);
1529                 return -EINVAL;
1530         }
1531
1532         mutex_lock(&hwmgr->smu_lock);
1533         ret = hwmgr->hwmgr_func->get_ppfeature_status(hwmgr, buf);
1534         mutex_unlock(&hwmgr->smu_lock);
1535
1536         return ret;
1537 }
1538
1539 static int pp_set_ppfeature_status(void *handle, uint64_t ppfeature_masks)
1540 {
1541         struct pp_hwmgr *hwmgr = handle;
1542         int ret = 0;
1543
1544         if (!hwmgr || !hwmgr->pm_en)
1545                 return -EINVAL;
1546
1547         if (hwmgr->hwmgr_func->set_ppfeature_status == NULL) {
1548                 pr_info_ratelimited("%s was not implemented.\n", __func__);
1549                 return -EINVAL;
1550         }
1551
1552         mutex_lock(&hwmgr->smu_lock);
1553         ret = hwmgr->hwmgr_func->set_ppfeature_status(hwmgr, ppfeature_masks);
1554         mutex_unlock(&hwmgr->smu_lock);
1555
1556         return ret;
1557 }
1558
1559 static int pp_asic_reset_mode_2(void *handle)
1560 {
1561         struct pp_hwmgr *hwmgr = handle;
1562                 int ret = 0;
1563
1564         if (!hwmgr || !hwmgr->pm_en)
1565                 return -EINVAL;
1566
1567         if (hwmgr->hwmgr_func->asic_reset == NULL) {
1568                 pr_info_ratelimited("%s was not implemented.\n", __func__);
1569                 return -EINVAL;
1570         }
1571
1572         mutex_lock(&hwmgr->smu_lock);
1573         ret = hwmgr->hwmgr_func->asic_reset(hwmgr, SMU_ASIC_RESET_MODE_2);
1574         mutex_unlock(&hwmgr->smu_lock);
1575
1576         return ret;
1577 }
1578
1579 static int pp_smu_i2c_bus_access(void *handle, bool acquire)
1580 {
1581         struct pp_hwmgr *hwmgr = handle;
1582         int ret = 0;
1583
1584         if (!hwmgr || !hwmgr->pm_en)
1585                 return -EINVAL;
1586
1587         if (hwmgr->hwmgr_func->smu_i2c_bus_access == NULL) {
1588                 pr_info_ratelimited("%s was not implemented.\n", __func__);
1589                 return -EINVAL;
1590         }
1591
1592         mutex_lock(&hwmgr->smu_lock);
1593         ret = hwmgr->hwmgr_func->smu_i2c_bus_access(hwmgr, acquire);
1594         mutex_unlock(&hwmgr->smu_lock);
1595
1596         return ret;
1597 }
1598
1599 static int pp_set_df_cstate(void *handle, enum pp_df_cstate state)
1600 {
1601         struct pp_hwmgr *hwmgr = handle;
1602
1603         if (!hwmgr)
1604                 return -EINVAL;
1605
1606         if (!hwmgr->pm_en || !hwmgr->hwmgr_func->set_df_cstate)
1607                 return 0;
1608
1609         mutex_lock(&hwmgr->smu_lock);
1610         hwmgr->hwmgr_func->set_df_cstate(hwmgr, state);
1611         mutex_unlock(&hwmgr->smu_lock);
1612
1613         return 0;
1614 }
1615
1616 static int pp_set_xgmi_pstate(void *handle, uint32_t pstate)
1617 {
1618         struct pp_hwmgr *hwmgr = handle;
1619
1620         if (!hwmgr)
1621                 return -EINVAL;
1622
1623         if (!hwmgr->pm_en || !hwmgr->hwmgr_func->set_xgmi_pstate)
1624                 return 0;
1625
1626         mutex_lock(&hwmgr->smu_lock);
1627         hwmgr->hwmgr_func->set_xgmi_pstate(hwmgr, pstate);
1628         mutex_unlock(&hwmgr->smu_lock);
1629
1630         return 0;
1631 }
1632
1633 static ssize_t pp_get_gpu_metrics(void *handle, void **table)
1634 {
1635         struct pp_hwmgr *hwmgr = handle;
1636         ssize_t size;
1637
1638         if (!hwmgr)
1639                 return -EINVAL;
1640
1641         if (!hwmgr->pm_en || !hwmgr->hwmgr_func->get_gpu_metrics)
1642                 return -EOPNOTSUPP;
1643
1644         mutex_lock(&hwmgr->smu_lock);
1645         size = hwmgr->hwmgr_func->get_gpu_metrics(hwmgr, table);
1646         mutex_unlock(&hwmgr->smu_lock);
1647
1648         return size;
1649 }
1650
1651 static int pp_gfx_state_change_set(void *handle, uint32_t state)
1652 {
1653         struct pp_hwmgr *hwmgr = handle;
1654
1655         if (!hwmgr || !hwmgr->pm_en)
1656                 return -EINVAL;
1657
1658         if (hwmgr->hwmgr_func->gfx_state_change == NULL) {
1659                 pr_info_ratelimited("%s was not implemented.\n", __func__);
1660                 return -EINVAL;
1661         }
1662
1663         mutex_lock(&hwmgr->smu_lock);
1664         hwmgr->hwmgr_func->gfx_state_change(hwmgr, state);
1665         mutex_unlock(&hwmgr->smu_lock);
1666         return 0;
1667 }
1668
1669 static int pp_get_prv_buffer_details(void *handle, void **addr, size_t *size)
1670 {
1671         struct pp_hwmgr *hwmgr = handle;
1672         struct amdgpu_device *adev = hwmgr->adev;
1673
1674         if (!addr || !size)
1675                 return -EINVAL;
1676
1677         *addr = NULL;
1678         *size = 0;
1679         mutex_lock(&hwmgr->smu_lock);
1680         if (adev->pm.smu_prv_buffer) {
1681                 amdgpu_bo_kmap(adev->pm.smu_prv_buffer, addr);
1682                 *size = adev->pm.smu_prv_buffer_size;
1683         }
1684         mutex_unlock(&hwmgr->smu_lock);
1685
1686         return 0;
1687 }
1688
1689 static const struct amd_pm_funcs pp_dpm_funcs = {
1690         .load_firmware = pp_dpm_load_fw,
1691         .wait_for_fw_loading_complete = pp_dpm_fw_loading_complete,
1692         .force_performance_level = pp_dpm_force_performance_level,
1693         .get_performance_level = pp_dpm_get_performance_level,
1694         .get_current_power_state = pp_dpm_get_current_power_state,
1695         .dispatch_tasks = pp_dpm_dispatch_tasks,
1696         .set_fan_control_mode = pp_dpm_set_fan_control_mode,
1697         .get_fan_control_mode = pp_dpm_get_fan_control_mode,
1698         .set_fan_speed_pwm = pp_dpm_set_fan_speed_pwm,
1699         .get_fan_speed_pwm = pp_dpm_get_fan_speed_pwm,
1700         .get_fan_speed_rpm = pp_dpm_get_fan_speed_rpm,
1701         .set_fan_speed_rpm = pp_dpm_set_fan_speed_rpm,
1702         .get_pp_num_states = pp_dpm_get_pp_num_states,
1703         .get_pp_table = pp_dpm_get_pp_table,
1704         .set_pp_table = pp_dpm_set_pp_table,
1705         .force_clock_level = pp_dpm_force_clock_level,
1706         .print_clock_levels = pp_dpm_print_clock_levels,
1707         .get_sclk_od = pp_dpm_get_sclk_od,
1708         .set_sclk_od = pp_dpm_set_sclk_od,
1709         .get_mclk_od = pp_dpm_get_mclk_od,
1710         .set_mclk_od = pp_dpm_set_mclk_od,
1711         .read_sensor = pp_dpm_read_sensor,
1712         .get_vce_clock_state = pp_dpm_get_vce_clock_state,
1713         .switch_power_profile = pp_dpm_switch_power_profile,
1714         .set_clockgating_by_smu = pp_set_clockgating_by_smu,
1715         .set_powergating_by_smu = pp_set_powergating_by_smu,
1716         .get_power_profile_mode = pp_get_power_profile_mode,
1717         .set_power_profile_mode = pp_set_power_profile_mode,
1718         .set_fine_grain_clk_vol = pp_set_fine_grain_clk_vol,
1719         .odn_edit_dpm_table = pp_odn_edit_dpm_table,
1720         .set_mp1_state = pp_dpm_set_mp1_state,
1721         .set_power_limit = pp_set_power_limit,
1722         .get_power_limit = pp_get_power_limit,
1723 /* export to DC */
1724         .get_sclk = pp_dpm_get_sclk,
1725         .get_mclk = pp_dpm_get_mclk,
1726         .display_configuration_change = pp_display_configuration_change,
1727         .get_display_power_level = pp_get_display_power_level,
1728         .get_current_clocks = pp_get_current_clocks,
1729         .get_clock_by_type = pp_get_clock_by_type,
1730         .get_clock_by_type_with_latency = pp_get_clock_by_type_with_latency,
1731         .get_clock_by_type_with_voltage = pp_get_clock_by_type_with_voltage,
1732         .set_watermarks_for_clocks_ranges = pp_set_watermarks_for_clocks_ranges,
1733         .display_clock_voltage_request = pp_display_clock_voltage_request,
1734         .get_display_mode_validation_clocks = pp_get_display_mode_validation_clocks,
1735         .notify_smu_enable_pwe = pp_notify_smu_enable_pwe,
1736         .enable_mgpu_fan_boost = pp_enable_mgpu_fan_boost,
1737         .set_active_display_count = pp_set_active_display_count,
1738         .set_min_deep_sleep_dcefclk = pp_set_min_deep_sleep_dcefclk,
1739         .set_hard_min_dcefclk_by_freq = pp_set_hard_min_dcefclk_by_freq,
1740         .set_hard_min_fclk_by_freq = pp_set_hard_min_fclk_by_freq,
1741         .get_asic_baco_capability = pp_get_asic_baco_capability,
1742         .get_asic_baco_state = pp_get_asic_baco_state,
1743         .set_asic_baco_state = pp_set_asic_baco_state,
1744         .get_ppfeature_status = pp_get_ppfeature_status,
1745         .set_ppfeature_status = pp_set_ppfeature_status,
1746         .asic_reset_mode_2 = pp_asic_reset_mode_2,
1747         .smu_i2c_bus_access = pp_smu_i2c_bus_access,
1748         .set_df_cstate = pp_set_df_cstate,
1749         .set_xgmi_pstate = pp_set_xgmi_pstate,
1750         .get_gpu_metrics = pp_get_gpu_metrics,
1751         .gfx_state_change_set = pp_gfx_state_change_set,
1752         .get_smu_prv_buf_details = pp_get_prv_buffer_details,
1753 };