Merge branch 'uaccess.misc' of git://git.kernel.org/pub/scm/linux/kernel/git/viro/vfs
[linux-2.6-block.git] / drivers / gpu / drm / amd / powerplay / amdgpu_smu.c
1 /*
2  * Copyright 2019 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  */
22
23 #include <linux/firmware.h>
24 #include <linux/pci.h>
25
26 #include "amdgpu.h"
27 #include "amdgpu_smu.h"
28 #include "smu_internal.h"
29 #include "smu_v11_0.h"
30 #include "smu_v12_0.h"
31 #include "atom.h"
32 #include "vega20_ppt.h"
33 #include "arcturus_ppt.h"
34 #include "navi10_ppt.h"
35 #include "renoir_ppt.h"
36
37 #undef __SMU_DUMMY_MAP
38 #define __SMU_DUMMY_MAP(type)   #type
39 static const char* __smu_message_names[] = {
40         SMU_MESSAGE_TYPES
41 };
42
43 const char *smu_get_message_name(struct smu_context *smu, enum smu_message_type type)
44 {
45         if (type < 0 || type >= SMU_MSG_MAX_COUNT)
46                 return "unknown smu message";
47         return __smu_message_names[type];
48 }
49
50 #undef __SMU_DUMMY_MAP
51 #define __SMU_DUMMY_MAP(fea)    #fea
52 static const char* __smu_feature_names[] = {
53         SMU_FEATURE_MASKS
54 };
55
56 const char *smu_get_feature_name(struct smu_context *smu, enum smu_feature_mask feature)
57 {
58         if (feature < 0 || feature >= SMU_FEATURE_COUNT)
59                 return "unknown smu feature";
60         return __smu_feature_names[feature];
61 }
62
63 size_t smu_sys_get_pp_feature_mask(struct smu_context *smu, char *buf)
64 {
65         struct amdgpu_device *adev = smu->adev;
66         size_t size = 0;
67         int ret = 0, i = 0;
68         uint32_t feature_mask[2] = { 0 };
69         int32_t feature_index = 0;
70         uint32_t count = 0;
71         uint32_t sort_feature[SMU_FEATURE_COUNT];
72         uint64_t hw_feature_count = 0;
73
74         if (!adev->pm.dpm_enabled)
75                 return -EINVAL;
76
77         mutex_lock(&smu->mutex);
78
79         ret = smu_feature_get_enabled_mask(smu, feature_mask, 2);
80         if (ret)
81                 goto failed;
82
83         size =  sprintf(buf + size, "features high: 0x%08x low: 0x%08x\n",
84                         feature_mask[1], feature_mask[0]);
85
86         for (i = 0; i < SMU_FEATURE_COUNT; i++) {
87                 feature_index = smu_feature_get_index(smu, i);
88                 if (feature_index < 0)
89                         continue;
90                 sort_feature[feature_index] = i;
91                 hw_feature_count++;
92         }
93
94         for (i = 0; i < hw_feature_count; i++) {
95                 size += sprintf(buf + size, "%02d. %-20s (%2d) : %s\n",
96                                count++,
97                                smu_get_feature_name(smu, sort_feature[i]),
98                                i,
99                                !!smu_feature_is_enabled(smu, sort_feature[i]) ?
100                                "enabled" : "disabled");
101         }
102
103 failed:
104         mutex_unlock(&smu->mutex);
105
106         return size;
107 }
108
109 static int smu_feature_update_enable_state(struct smu_context *smu,
110                                            uint64_t feature_mask,
111                                            bool enabled)
112 {
113         struct smu_feature *feature = &smu->smu_feature;
114         uint32_t feature_low = 0, feature_high = 0;
115         int ret = 0;
116
117         feature_low = (feature_mask >> 0 ) & 0xffffffff;
118         feature_high = (feature_mask >> 32) & 0xffffffff;
119
120         if (enabled) {
121                 ret = smu_send_smc_msg_with_param(smu, SMU_MSG_EnableSmuFeaturesLow,
122                                                   feature_low, NULL);
123                 if (ret)
124                         return ret;
125                 ret = smu_send_smc_msg_with_param(smu, SMU_MSG_EnableSmuFeaturesHigh,
126                                                   feature_high, NULL);
127                 if (ret)
128                         return ret;
129         } else {
130                 ret = smu_send_smc_msg_with_param(smu, SMU_MSG_DisableSmuFeaturesLow,
131                                                   feature_low, NULL);
132                 if (ret)
133                         return ret;
134                 ret = smu_send_smc_msg_with_param(smu, SMU_MSG_DisableSmuFeaturesHigh,
135                                                   feature_high, NULL);
136                 if (ret)
137                         return ret;
138         }
139
140         mutex_lock(&feature->mutex);
141         if (enabled)
142                 bitmap_or(feature->enabled, feature->enabled,
143                                 (unsigned long *)(&feature_mask), SMU_FEATURE_MAX);
144         else
145                 bitmap_andnot(feature->enabled, feature->enabled,
146                                 (unsigned long *)(&feature_mask), SMU_FEATURE_MAX);
147         mutex_unlock(&feature->mutex);
148
149         return ret;
150 }
151
152 int smu_sys_set_pp_feature_mask(struct smu_context *smu, uint64_t new_mask)
153 {
154         int ret = 0;
155         uint32_t feature_mask[2] = { 0 };
156         uint64_t feature_2_enabled = 0;
157         uint64_t feature_2_disabled = 0;
158         uint64_t feature_enables = 0;
159         struct amdgpu_device *adev = smu->adev;
160
161         if (!adev->pm.dpm_enabled)
162                 return -EINVAL;
163
164         mutex_lock(&smu->mutex);
165
166         ret = smu_feature_get_enabled_mask(smu, feature_mask, 2);
167         if (ret)
168                 goto out;
169
170         feature_enables = ((uint64_t)feature_mask[1] << 32 | (uint64_t)feature_mask[0]);
171
172         feature_2_enabled  = ~feature_enables & new_mask;
173         feature_2_disabled = feature_enables & ~new_mask;
174
175         if (feature_2_enabled) {
176                 ret = smu_feature_update_enable_state(smu, feature_2_enabled, true);
177                 if (ret)
178                         goto out;
179         }
180         if (feature_2_disabled) {
181                 ret = smu_feature_update_enable_state(smu, feature_2_disabled, false);
182                 if (ret)
183                         goto out;
184         }
185
186 out:
187         mutex_unlock(&smu->mutex);
188
189         return ret;
190 }
191
192 int smu_get_smc_version(struct smu_context *smu, uint32_t *if_version, uint32_t *smu_version)
193 {
194         int ret = 0;
195
196         if (!if_version && !smu_version)
197                 return -EINVAL;
198
199         if (smu->smc_fw_if_version && smu->smc_fw_version)
200         {
201                 if (if_version)
202                         *if_version = smu->smc_fw_if_version;
203
204                 if (smu_version)
205                         *smu_version = smu->smc_fw_version;
206
207                 return 0;
208         }
209
210         if (if_version) {
211                 ret = smu_send_smc_msg(smu, SMU_MSG_GetDriverIfVersion, if_version);
212                 if (ret)
213                         return ret;
214
215                 smu->smc_fw_if_version = *if_version;
216         }
217
218         if (smu_version) {
219                 ret = smu_send_smc_msg(smu, SMU_MSG_GetSmuVersion, smu_version);
220                 if (ret)
221                         return ret;
222
223                 smu->smc_fw_version = *smu_version;
224         }
225
226         return ret;
227 }
228
229 int smu_set_soft_freq_range(struct smu_context *smu, enum smu_clk_type clk_type,
230                             uint32_t min, uint32_t max, bool lock_needed)
231 {
232         int ret = 0;
233
234         if (!smu_clk_dpm_is_enabled(smu, clk_type))
235                 return 0;
236
237         if (lock_needed)
238                 mutex_lock(&smu->mutex);
239         ret = smu_set_soft_freq_limited_range(smu, clk_type, min, max);
240         if (lock_needed)
241                 mutex_unlock(&smu->mutex);
242
243         return ret;
244 }
245
246 int smu_set_hard_freq_range(struct smu_context *smu, enum smu_clk_type clk_type,
247                             uint32_t min, uint32_t max)
248 {
249         int ret = 0, clk_id = 0;
250         uint32_t param;
251
252         if (min <= 0 && max <= 0)
253                 return -EINVAL;
254
255         if (!smu_clk_dpm_is_enabled(smu, clk_type))
256                 return 0;
257
258         clk_id = smu_clk_get_index(smu, clk_type);
259         if (clk_id < 0)
260                 return clk_id;
261
262         if (max > 0) {
263                 param = (uint32_t)((clk_id << 16) | (max & 0xffff));
264                 ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetHardMaxByFreq,
265                                                   param, NULL);
266                 if (ret)
267                         return ret;
268         }
269
270         if (min > 0) {
271                 param = (uint32_t)((clk_id << 16) | (min & 0xffff));
272                 ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinByFreq,
273                                                   param, NULL);
274                 if (ret)
275                         return ret;
276         }
277
278
279         return ret;
280 }
281
282 int smu_get_dpm_freq_range(struct smu_context *smu, enum smu_clk_type clk_type,
283                            uint32_t *min, uint32_t *max, bool lock_needed)
284 {
285         uint32_t clock_limit;
286         int ret = 0;
287
288         if (!min && !max)
289                 return -EINVAL;
290
291         if (lock_needed)
292                 mutex_lock(&smu->mutex);
293
294         if (!smu_clk_dpm_is_enabled(smu, clk_type)) {
295                 switch (clk_type) {
296                 case SMU_MCLK:
297                 case SMU_UCLK:
298                         clock_limit = smu->smu_table.boot_values.uclk;
299                         break;
300                 case SMU_GFXCLK:
301                 case SMU_SCLK:
302                         clock_limit = smu->smu_table.boot_values.gfxclk;
303                         break;
304                 case SMU_SOCCLK:
305                         clock_limit = smu->smu_table.boot_values.socclk;
306                         break;
307                 default:
308                         clock_limit = 0;
309                         break;
310                 }
311
312                 /* clock in Mhz unit */
313                 if (min)
314                         *min = clock_limit / 100;
315                 if (max)
316                         *max = clock_limit / 100;
317         } else {
318                 /*
319                  * Todo: Use each asic(ASIC_ppt funcs) control the callbacks exposed to the
320                  * core driver and then have helpers for stuff that is common(SMU_v11_x | SMU_v12_x funcs).
321                  */
322                 ret = smu_get_dpm_ultimate_freq(smu, clk_type, min, max);
323         }
324
325         if (lock_needed)
326                 mutex_unlock(&smu->mutex);
327
328         return ret;
329 }
330
331 int smu_get_dpm_freq_by_index(struct smu_context *smu, enum smu_clk_type clk_type,
332                               uint16_t level, uint32_t *value)
333 {
334         int ret = 0, clk_id = 0;
335         uint32_t param;
336
337         if (!value)
338                 return -EINVAL;
339
340         if (!smu_clk_dpm_is_enabled(smu, clk_type))
341                 return 0;
342
343         clk_id = smu_clk_get_index(smu, clk_type);
344         if (clk_id < 0)
345                 return clk_id;
346
347         param = (uint32_t)(((clk_id & 0xffff) << 16) | (level & 0xffff));
348
349         ret = smu_send_smc_msg_with_param(smu, SMU_MSG_GetDpmFreqByIndex,
350                                           param, value);
351         if (ret)
352                 return ret;
353
354         /* BIT31:  0 - Fine grained DPM, 1 - Dicrete DPM
355          * now, we un-support it */
356         *value = *value & 0x7fffffff;
357
358         return ret;
359 }
360
361 int smu_get_dpm_level_count(struct smu_context *smu, enum smu_clk_type clk_type,
362                             uint32_t *value)
363 {
364         return smu_get_dpm_freq_by_index(smu, clk_type, 0xff, value);
365 }
366
367 int smu_get_dpm_level_range(struct smu_context *smu, enum smu_clk_type clk_type,
368                             uint32_t *min_value, uint32_t *max_value)
369 {
370         int ret = 0;
371         uint32_t level_count = 0;
372
373         if (!min_value && !max_value)
374                 return -EINVAL;
375
376         if (min_value) {
377                 /* by default, level 0 clock value as min value */
378                 ret = smu_get_dpm_freq_by_index(smu, clk_type, 0, min_value);
379                 if (ret)
380                         return ret;
381         }
382
383         if (max_value) {
384                 ret = smu_get_dpm_level_count(smu, clk_type, &level_count);
385                 if (ret)
386                         return ret;
387
388                 ret = smu_get_dpm_freq_by_index(smu, clk_type, level_count - 1, max_value);
389                 if (ret)
390                         return ret;
391         }
392
393         return ret;
394 }
395
396 bool smu_clk_dpm_is_enabled(struct smu_context *smu, enum smu_clk_type clk_type)
397 {
398         enum smu_feature_mask feature_id = 0;
399
400         switch (clk_type) {
401         case SMU_MCLK:
402         case SMU_UCLK:
403                 feature_id = SMU_FEATURE_DPM_UCLK_BIT;
404                 break;
405         case SMU_GFXCLK:
406         case SMU_SCLK:
407                 feature_id = SMU_FEATURE_DPM_GFXCLK_BIT;
408                 break;
409         case SMU_SOCCLK:
410                 feature_id = SMU_FEATURE_DPM_SOCCLK_BIT;
411                 break;
412         default:
413                 return true;
414         }
415
416         if(!smu_feature_is_enabled(smu, feature_id)) {
417                 return false;
418         }
419
420         return true;
421 }
422
423 /**
424  * smu_dpm_set_power_gate - power gate/ungate the specific IP block
425  *
426  * @smu:        smu_context pointer
427  * @block_type: the IP block to power gate/ungate
428  * @gate:       to power gate if true, ungate otherwise
429  *
430  * This API uses no smu->mutex lock protection due to:
431  * 1. It is either called by other IP block(gfx/sdma/vcn/uvd/vce).
432  *    This is guarded to be race condition free by the caller.
433  * 2. Or get called on user setting request of power_dpm_force_performance_level.
434  *    Under this case, the smu->mutex lock protection is already enforced on
435  *    the parent API smu_force_performance_level of the call path.
436  */
437 int smu_dpm_set_power_gate(struct smu_context *smu, uint32_t block_type,
438                            bool gate)
439 {
440         struct amdgpu_device *adev = smu->adev;
441         int ret = 0;
442
443         if (!adev->pm.dpm_enabled)
444                 return -EINVAL;
445
446         switch (block_type) {
447         case AMD_IP_BLOCK_TYPE_UVD:
448                 ret = smu_dpm_set_uvd_enable(smu, !gate);
449                 break;
450         case AMD_IP_BLOCK_TYPE_VCE:
451                 ret = smu_dpm_set_vce_enable(smu, !gate);
452                 break;
453         case AMD_IP_BLOCK_TYPE_GFX:
454                 ret = smu_gfx_off_control(smu, gate);
455                 break;
456         case AMD_IP_BLOCK_TYPE_SDMA:
457                 ret = smu_powergate_sdma(smu, gate);
458                 break;
459         case AMD_IP_BLOCK_TYPE_JPEG:
460                 ret = smu_dpm_set_jpeg_enable(smu, !gate);
461                 break;
462         default:
463                 break;
464         }
465
466         return ret;
467 }
468
469 int smu_get_power_num_states(struct smu_context *smu,
470                              struct pp_states_info *state_info)
471 {
472         if (!state_info)
473                 return -EINVAL;
474
475         /* not support power state */
476         memset(state_info, 0, sizeof(struct pp_states_info));
477         state_info->nums = 1;
478         state_info->states[0] = POWER_STATE_TYPE_DEFAULT;
479
480         return 0;
481 }
482
483 int smu_common_read_sensor(struct smu_context *smu, enum amd_pp_sensors sensor,
484                            void *data, uint32_t *size)
485 {
486         struct smu_power_context *smu_power = &smu->smu_power;
487         struct smu_power_gate *power_gate = &smu_power->power_gate;
488         int ret = 0;
489
490         if(!data || !size)
491                 return -EINVAL;
492
493         switch (sensor) {
494         case AMDGPU_PP_SENSOR_STABLE_PSTATE_SCLK:
495                 *((uint32_t *)data) = smu->pstate_sclk;
496                 *size = 4;
497                 break;
498         case AMDGPU_PP_SENSOR_STABLE_PSTATE_MCLK:
499                 *((uint32_t *)data) = smu->pstate_mclk;
500                 *size = 4;
501                 break;
502         case AMDGPU_PP_SENSOR_ENABLED_SMC_FEATURES_MASK:
503                 ret = smu_feature_get_enabled_mask(smu, (uint32_t *)data, 2);
504                 *size = 8;
505                 break;
506         case AMDGPU_PP_SENSOR_UVD_POWER:
507                 *(uint32_t *)data = smu_feature_is_enabled(smu, SMU_FEATURE_DPM_UVD_BIT) ? 1 : 0;
508                 *size = 4;
509                 break;
510         case AMDGPU_PP_SENSOR_VCE_POWER:
511                 *(uint32_t *)data = smu_feature_is_enabled(smu, SMU_FEATURE_DPM_VCE_BIT) ? 1 : 0;
512                 *size = 4;
513                 break;
514         case AMDGPU_PP_SENSOR_VCN_POWER_STATE:
515                 *(uint32_t *)data = power_gate->vcn_gated ? 0 : 1;
516                 *size = 4;
517                 break;
518         default:
519                 ret = -EINVAL;
520                 break;
521         }
522
523         if (ret)
524                 *size = 0;
525
526         return ret;
527 }
528
529 int smu_update_table(struct smu_context *smu, enum smu_table_id table_index, int argument,
530                      void *table_data, bool drv2smu)
531 {
532         struct smu_table_context *smu_table = &smu->smu_table;
533         struct amdgpu_device *adev = smu->adev;
534         struct smu_table *table = &smu_table->driver_table;
535         int table_id = smu_table_get_index(smu, table_index);
536         uint32_t table_size;
537         int ret = 0;
538         if (!table_data || table_id >= SMU_TABLE_COUNT || table_id < 0)
539                 return -EINVAL;
540
541         table_size = smu_table->tables[table_index].size;
542
543         if (drv2smu) {
544                 memcpy(table->cpu_addr, table_data, table_size);
545                 /*
546                  * Flush hdp cache: to guard the content seen by
547                  * GPU is consitent with CPU.
548                  */
549                 amdgpu_asic_flush_hdp(adev, NULL);
550         }
551
552         ret = smu_send_smc_msg_with_param(smu, drv2smu ?
553                                           SMU_MSG_TransferTableDram2Smu :
554                                           SMU_MSG_TransferTableSmu2Dram,
555                                           table_id | ((argument & 0xFFFF) << 16),
556                                           NULL);
557         if (ret)
558                 return ret;
559
560         if (!drv2smu) {
561                 amdgpu_asic_flush_hdp(adev, NULL);
562                 memcpy(table_data, table->cpu_addr, table_size);
563         }
564
565         return ret;
566 }
567
568 bool is_support_sw_smu(struct amdgpu_device *adev)
569 {
570         if (adev->asic_type == CHIP_VEGA20)
571                 return (amdgpu_dpm == 2) ? true : false;
572         else if (adev->asic_type >= CHIP_ARCTURUS) {
573               if (amdgpu_sriov_is_pp_one_vf(adev) || !amdgpu_sriov_vf(adev))
574                         return true;
575         }
576         return false;
577 }
578
579 bool is_support_sw_smu_xgmi(struct amdgpu_device *adev)
580 {
581         if (!is_support_sw_smu(adev))
582                 return false;
583
584         if (adev->asic_type == CHIP_VEGA20)
585                 return true;
586
587         return false;
588 }
589
590 int smu_sys_get_pp_table(struct smu_context *smu, void **table)
591 {
592         struct smu_table_context *smu_table = &smu->smu_table;
593         struct amdgpu_device *adev = smu->adev;
594         uint32_t powerplay_table_size;
595
596         if (!adev->pm.dpm_enabled)
597                 return -EINVAL;
598
599         if (!smu_table->power_play_table && !smu_table->hardcode_pptable)
600                 return -EINVAL;
601
602         mutex_lock(&smu->mutex);
603
604         if (smu_table->hardcode_pptable)
605                 *table = smu_table->hardcode_pptable;
606         else
607                 *table = smu_table->power_play_table;
608
609         powerplay_table_size = smu_table->power_play_table_size;
610
611         mutex_unlock(&smu->mutex);
612
613         return powerplay_table_size;
614 }
615
616 int smu_sys_set_pp_table(struct smu_context *smu,  void *buf, size_t size)
617 {
618         struct smu_table_context *smu_table = &smu->smu_table;
619         struct amdgpu_device *adev = smu->adev;
620         ATOM_COMMON_TABLE_HEADER *header = (ATOM_COMMON_TABLE_HEADER *)buf;
621         int ret = 0;
622
623         if (!adev->pm.dpm_enabled)
624                 return -EINVAL;
625
626         if (header->usStructureSize != size) {
627                 pr_err("pp table size not matched !\n");
628                 return -EIO;
629         }
630
631         mutex_lock(&smu->mutex);
632         if (!smu_table->hardcode_pptable)
633                 smu_table->hardcode_pptable = kzalloc(size, GFP_KERNEL);
634         if (!smu_table->hardcode_pptable) {
635                 ret = -ENOMEM;
636                 goto failed;
637         }
638
639         memcpy(smu_table->hardcode_pptable, buf, size);
640         smu_table->power_play_table = smu_table->hardcode_pptable;
641         smu_table->power_play_table_size = size;
642
643         /*
644          * Special hw_fini action(for Navi1x, the DPMs disablement will be
645          * skipped) may be needed for custom pptable uploading.
646          */
647         smu->uploading_custom_pp_table = true;
648
649         ret = smu_reset(smu);
650         if (ret)
651                 pr_info("smu reset failed, ret = %d\n", ret);
652
653         smu->uploading_custom_pp_table = false;
654
655 failed:
656         mutex_unlock(&smu->mutex);
657         return ret;
658 }
659
660 int smu_feature_init_dpm(struct smu_context *smu)
661 {
662         struct smu_feature *feature = &smu->smu_feature;
663         int ret = 0;
664         uint32_t allowed_feature_mask[SMU_FEATURE_MAX/32];
665
666         mutex_lock(&feature->mutex);
667         bitmap_zero(feature->allowed, SMU_FEATURE_MAX);
668         mutex_unlock(&feature->mutex);
669
670         ret = smu_get_allowed_feature_mask(smu, allowed_feature_mask,
671                                              SMU_FEATURE_MAX/32);
672         if (ret)
673                 return ret;
674
675         mutex_lock(&feature->mutex);
676         bitmap_or(feature->allowed, feature->allowed,
677                       (unsigned long *)allowed_feature_mask,
678                       feature->feature_num);
679         mutex_unlock(&feature->mutex);
680
681         return ret;
682 }
683
684
685 int smu_feature_is_enabled(struct smu_context *smu, enum smu_feature_mask mask)
686 {
687         struct smu_feature *feature = &smu->smu_feature;
688         int feature_id;
689         int ret = 0;
690
691         if (smu->is_apu)
692                 return 1;
693         feature_id = smu_feature_get_index(smu, mask);
694         if (feature_id < 0)
695                 return 0;
696
697         WARN_ON(feature_id > feature->feature_num);
698
699         mutex_lock(&feature->mutex);
700         ret = test_bit(feature_id, feature->enabled);
701         mutex_unlock(&feature->mutex);
702
703         return ret;
704 }
705
706 int smu_feature_set_enabled(struct smu_context *smu, enum smu_feature_mask mask,
707                             bool enable)
708 {
709         struct smu_feature *feature = &smu->smu_feature;
710         int feature_id;
711
712         feature_id = smu_feature_get_index(smu, mask);
713         if (feature_id < 0)
714                 return -EINVAL;
715
716         WARN_ON(feature_id > feature->feature_num);
717
718         return smu_feature_update_enable_state(smu,
719                                                1ULL << feature_id,
720                                                enable);
721 }
722
723 int smu_feature_is_supported(struct smu_context *smu, enum smu_feature_mask mask)
724 {
725         struct smu_feature *feature = &smu->smu_feature;
726         int feature_id;
727         int ret = 0;
728
729         feature_id = smu_feature_get_index(smu, mask);
730         if (feature_id < 0)
731                 return 0;
732
733         WARN_ON(feature_id > feature->feature_num);
734
735         mutex_lock(&feature->mutex);
736         ret = test_bit(feature_id, feature->supported);
737         mutex_unlock(&feature->mutex);
738
739         return ret;
740 }
741
742 int smu_feature_set_supported(struct smu_context *smu,
743                               enum smu_feature_mask mask,
744                               bool enable)
745 {
746         struct smu_feature *feature = &smu->smu_feature;
747         int feature_id;
748         int ret = 0;
749
750         feature_id = smu_feature_get_index(smu, mask);
751         if (feature_id < 0)
752                 return -EINVAL;
753
754         WARN_ON(feature_id > feature->feature_num);
755
756         mutex_lock(&feature->mutex);
757         if (enable)
758                 test_and_set_bit(feature_id, feature->supported);
759         else
760                 test_and_clear_bit(feature_id, feature->supported);
761         mutex_unlock(&feature->mutex);
762
763         return ret;
764 }
765
766 static int smu_set_funcs(struct amdgpu_device *adev)
767 {
768         struct smu_context *smu = &adev->smu;
769
770         if (adev->pm.pp_feature & PP_OVERDRIVE_MASK)
771                 smu->od_enabled = true;
772
773         switch (adev->asic_type) {
774         case CHIP_VEGA20:
775                 adev->pm.pp_feature &= ~PP_GFXOFF_MASK;
776                 vega20_set_ppt_funcs(smu);
777                 break;
778         case CHIP_NAVI10:
779         case CHIP_NAVI14:
780         case CHIP_NAVI12:
781                 navi10_set_ppt_funcs(smu);
782                 break;
783         case CHIP_ARCTURUS:
784                 adev->pm.pp_feature &= ~PP_GFXOFF_MASK;
785                 arcturus_set_ppt_funcs(smu);
786                 /* OD is not supported on Arcturus */
787                 smu->od_enabled =false;
788                 break;
789         case CHIP_RENOIR:
790                 renoir_set_ppt_funcs(smu);
791                 break;
792         default:
793                 return -EINVAL;
794         }
795
796         return 0;
797 }
798
799 static int smu_early_init(void *handle)
800 {
801         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
802         struct smu_context *smu = &adev->smu;
803
804         smu->adev = adev;
805         smu->pm_enabled = !!amdgpu_dpm;
806         smu->is_apu = false;
807         mutex_init(&smu->mutex);
808
809         return smu_set_funcs(adev);
810 }
811
812 static int smu_late_init(void *handle)
813 {
814         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
815         struct smu_context *smu = &adev->smu;
816
817         if (!smu->pm_enabled)
818                 return 0;
819
820         smu_handle_task(&adev->smu,
821                         smu->smu_dpm.dpm_level,
822                         AMD_PP_TASK_COMPLETE_INIT,
823                         false);
824
825         return 0;
826 }
827
828 int smu_get_atom_data_table(struct smu_context *smu, uint32_t table,
829                             uint16_t *size, uint8_t *frev, uint8_t *crev,
830                             uint8_t **addr)
831 {
832         struct amdgpu_device *adev = smu->adev;
833         uint16_t data_start;
834
835         if (!amdgpu_atom_parse_data_header(adev->mode_info.atom_context, table,
836                                            size, frev, crev, &data_start))
837                 return -EINVAL;
838
839         *addr = (uint8_t *)adev->mode_info.atom_context->bios + data_start;
840
841         return 0;
842 }
843
844 static int smu_initialize_pptable(struct smu_context *smu)
845 {
846         /* TODO */
847         return 0;
848 }
849
850 static int smu_smc_table_sw_init(struct smu_context *smu)
851 {
852         int ret;
853
854         ret = smu_initialize_pptable(smu);
855         if (ret) {
856                 pr_err("Failed to init smu_initialize_pptable!\n");
857                 return ret;
858         }
859
860         /**
861          * Create smu_table structure, and init smc tables such as
862          * TABLE_PPTABLE, TABLE_WATERMARKS, TABLE_SMU_METRICS, and etc.
863          */
864         ret = smu_init_smc_tables(smu);
865         if (ret) {
866                 pr_err("Failed to init smc tables!\n");
867                 return ret;
868         }
869
870         /**
871          * Create smu_power_context structure, and allocate smu_dpm_context and
872          * context size to fill the smu_power_context data.
873          */
874         ret = smu_init_power(smu);
875         if (ret) {
876                 pr_err("Failed to init smu_init_power!\n");
877                 return ret;
878         }
879
880         return 0;
881 }
882
883 static int smu_smc_table_sw_fini(struct smu_context *smu)
884 {
885         int ret;
886
887         ret = smu_fini_smc_tables(smu);
888         if (ret) {
889                 pr_err("Failed to smu_fini_smc_tables!\n");
890                 return ret;
891         }
892
893         return 0;
894 }
895
896 static int smu_sw_init(void *handle)
897 {
898         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
899         struct smu_context *smu = &adev->smu;
900         int ret;
901
902         smu->pool_size = adev->pm.smu_prv_buffer_size;
903         smu->smu_feature.feature_num = SMU_FEATURE_MAX;
904         mutex_init(&smu->smu_feature.mutex);
905         bitmap_zero(smu->smu_feature.supported, SMU_FEATURE_MAX);
906         bitmap_zero(smu->smu_feature.enabled, SMU_FEATURE_MAX);
907         bitmap_zero(smu->smu_feature.allowed, SMU_FEATURE_MAX);
908
909         mutex_init(&smu->smu_baco.mutex);
910         smu->smu_baco.state = SMU_BACO_STATE_EXIT;
911         smu->smu_baco.platform_support = false;
912
913         mutex_init(&smu->sensor_lock);
914         mutex_init(&smu->metrics_lock);
915         mutex_init(&smu->message_lock);
916
917         smu->watermarks_bitmap = 0;
918         smu->power_profile_mode = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT;
919         smu->default_power_profile_mode = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT;
920
921         smu->workload_mask = 1 << smu->workload_prority[PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT];
922         smu->workload_prority[PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT] = 0;
923         smu->workload_prority[PP_SMC_POWER_PROFILE_FULLSCREEN3D] = 1;
924         smu->workload_prority[PP_SMC_POWER_PROFILE_POWERSAVING] = 2;
925         smu->workload_prority[PP_SMC_POWER_PROFILE_VIDEO] = 3;
926         smu->workload_prority[PP_SMC_POWER_PROFILE_VR] = 4;
927         smu->workload_prority[PP_SMC_POWER_PROFILE_COMPUTE] = 5;
928         smu->workload_prority[PP_SMC_POWER_PROFILE_CUSTOM] = 6;
929
930         smu->workload_setting[0] = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT;
931         smu->workload_setting[1] = PP_SMC_POWER_PROFILE_FULLSCREEN3D;
932         smu->workload_setting[2] = PP_SMC_POWER_PROFILE_POWERSAVING;
933         smu->workload_setting[3] = PP_SMC_POWER_PROFILE_VIDEO;
934         smu->workload_setting[4] = PP_SMC_POWER_PROFILE_VR;
935         smu->workload_setting[5] = PP_SMC_POWER_PROFILE_COMPUTE;
936         smu->workload_setting[6] = PP_SMC_POWER_PROFILE_CUSTOM;
937         smu->display_config = &adev->pm.pm_display_cfg;
938
939         smu->smu_dpm.dpm_level = AMD_DPM_FORCED_LEVEL_AUTO;
940         smu->smu_dpm.requested_dpm_level = AMD_DPM_FORCED_LEVEL_AUTO;
941         ret = smu_init_microcode(smu);
942         if (ret) {
943                 pr_err("Failed to load smu firmware!\n");
944                 return ret;
945         }
946
947         ret = smu_smc_table_sw_init(smu);
948         if (ret) {
949                 pr_err("Failed to sw init smc table!\n");
950                 return ret;
951         }
952
953         ret = smu_register_irq_handler(smu);
954         if (ret) {
955                 pr_err("Failed to register smc irq handler!\n");
956                 return ret;
957         }
958
959         return 0;
960 }
961
962 static int smu_sw_fini(void *handle)
963 {
964         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
965         struct smu_context *smu = &adev->smu;
966         int ret;
967
968         kfree(smu->irq_source);
969         smu->irq_source = NULL;
970
971         ret = smu_smc_table_sw_fini(smu);
972         if (ret) {
973                 pr_err("Failed to sw fini smc table!\n");
974                 return ret;
975         }
976
977         ret = smu_fini_power(smu);
978         if (ret) {
979                 pr_err("Failed to init smu_fini_power!\n");
980                 return ret;
981         }
982
983         return 0;
984 }
985
986 static int smu_init_fb_allocations(struct smu_context *smu)
987 {
988         struct amdgpu_device *adev = smu->adev;
989         struct smu_table_context *smu_table = &smu->smu_table;
990         struct smu_table *tables = smu_table->tables;
991         struct smu_table *driver_table = &(smu_table->driver_table);
992         uint32_t max_table_size = 0;
993         int ret, i;
994
995         /* VRAM allocation for tool table */
996         if (tables[SMU_TABLE_PMSTATUSLOG].size) {
997                 ret = amdgpu_bo_create_kernel(adev,
998                                               tables[SMU_TABLE_PMSTATUSLOG].size,
999                                               tables[SMU_TABLE_PMSTATUSLOG].align,
1000                                               tables[SMU_TABLE_PMSTATUSLOG].domain,
1001                                               &tables[SMU_TABLE_PMSTATUSLOG].bo,
1002                                               &tables[SMU_TABLE_PMSTATUSLOG].mc_address,
1003                                               &tables[SMU_TABLE_PMSTATUSLOG].cpu_addr);
1004                 if (ret) {
1005                         pr_err("VRAM allocation for tool table failed!\n");
1006                         return ret;
1007                 }
1008         }
1009
1010         /* VRAM allocation for driver table */
1011         for (i = 0; i < SMU_TABLE_COUNT; i++) {
1012                 if (tables[i].size == 0)
1013                         continue;
1014
1015                 if (i == SMU_TABLE_PMSTATUSLOG)
1016                         continue;
1017
1018                 if (max_table_size < tables[i].size)
1019                         max_table_size = tables[i].size;
1020         }
1021
1022         driver_table->size = max_table_size;
1023         driver_table->align = PAGE_SIZE;
1024         driver_table->domain = AMDGPU_GEM_DOMAIN_VRAM;
1025
1026         ret = amdgpu_bo_create_kernel(adev,
1027                                       driver_table->size,
1028                                       driver_table->align,
1029                                       driver_table->domain,
1030                                       &driver_table->bo,
1031                                       &driver_table->mc_address,
1032                                       &driver_table->cpu_addr);
1033         if (ret) {
1034                 pr_err("VRAM allocation for driver table failed!\n");
1035                 if (tables[SMU_TABLE_PMSTATUSLOG].mc_address)
1036                         amdgpu_bo_free_kernel(&tables[SMU_TABLE_PMSTATUSLOG].bo,
1037                                               &tables[SMU_TABLE_PMSTATUSLOG].mc_address,
1038                                               &tables[SMU_TABLE_PMSTATUSLOG].cpu_addr);
1039         }
1040
1041         return ret;
1042 }
1043
1044 static int smu_fini_fb_allocations(struct smu_context *smu)
1045 {
1046         struct smu_table_context *smu_table = &smu->smu_table;
1047         struct smu_table *tables = smu_table->tables;
1048         struct smu_table *driver_table = &(smu_table->driver_table);
1049
1050         if (!tables)
1051                 return 0;
1052
1053         if (tables[SMU_TABLE_PMSTATUSLOG].mc_address)
1054                 amdgpu_bo_free_kernel(&tables[SMU_TABLE_PMSTATUSLOG].bo,
1055                                       &tables[SMU_TABLE_PMSTATUSLOG].mc_address,
1056                                       &tables[SMU_TABLE_PMSTATUSLOG].cpu_addr);
1057
1058         amdgpu_bo_free_kernel(&driver_table->bo,
1059                               &driver_table->mc_address,
1060                               &driver_table->cpu_addr);
1061
1062         return 0;
1063 }
1064
1065 static int smu_smc_table_hw_init(struct smu_context *smu,
1066                                  bool initialize)
1067 {
1068         struct amdgpu_device *adev = smu->adev;
1069         int ret;
1070
1071         if (smu_is_dpm_running(smu) && adev->in_suspend) {
1072                 pr_info("dpm has been enabled\n");
1073                 return 0;
1074         }
1075
1076         if (adev->asic_type != CHIP_ARCTURUS) {
1077                 ret = smu_init_display_count(smu, 0);
1078                 if (ret)
1079                         return ret;
1080         }
1081
1082         if (initialize) {
1083                 /* get boot_values from vbios to set revision, gfxclk, and etc. */
1084                 ret = smu_get_vbios_bootup_values(smu);
1085                 if (ret)
1086                         return ret;
1087
1088                 ret = smu_setup_pptable(smu);
1089                 if (ret)
1090                         return ret;
1091
1092                 ret = smu_get_clk_info_from_vbios(smu);
1093                 if (ret)
1094                         return ret;
1095
1096                 /*
1097                  * check if the format_revision in vbios is up to pptable header
1098                  * version, and the structure size is not 0.
1099                  */
1100                 ret = smu_check_pptable(smu);
1101                 if (ret)
1102                         return ret;
1103
1104                 /*
1105                  * allocate vram bos to store smc table contents.
1106                  */
1107                 ret = smu_init_fb_allocations(smu);
1108                 if (ret)
1109                         return ret;
1110
1111                 /*
1112                  * Parse pptable format and fill PPTable_t smc_pptable to
1113                  * smu_table_context structure. And read the smc_dpm_table from vbios,
1114                  * then fill it into smc_pptable.
1115                  */
1116                 ret = smu_parse_pptable(smu);
1117                 if (ret)
1118                         return ret;
1119
1120                 /*
1121                  * Send msg GetDriverIfVersion to check if the return value is equal
1122                  * with DRIVER_IF_VERSION of smc header.
1123                  */
1124                 ret = smu_check_fw_version(smu);
1125                 if (ret)
1126                         return ret;
1127         }
1128
1129         ret = smu_set_driver_table_location(smu);
1130         if (ret)
1131                 return ret;
1132
1133         /* smu_dump_pptable(smu); */
1134         if (!amdgpu_sriov_vf(adev)) {
1135                 /*
1136                  * Copy pptable bo in the vram to smc with SMU MSGs such as
1137                  * SetDriverDramAddr and TransferTableDram2Smu.
1138                  */
1139                 ret = smu_write_pptable(smu);
1140                 if (ret)
1141                         return ret;
1142
1143                 /* issue Run*Btc msg */
1144                 ret = smu_run_btc(smu);
1145                 if (ret)
1146                         return ret;
1147                 ret = smu_feature_set_allowed_mask(smu);
1148                 if (ret)
1149                         return ret;
1150
1151                 ret = smu_system_features_control(smu, true);
1152                 if (ret)
1153                         return ret;
1154
1155                 if (adev->asic_type == CHIP_NAVI10) {
1156                         if ((adev->pdev->device == 0x731f && (adev->pdev->revision == 0xc2 ||
1157                                                               adev->pdev->revision == 0xc3 ||
1158                                                               adev->pdev->revision == 0xca ||
1159                                                               adev->pdev->revision == 0xcb)) ||
1160                             (adev->pdev->device == 0x66af && (adev->pdev->revision == 0xf3 ||
1161                                                               adev->pdev->revision == 0xf4 ||
1162                                                               adev->pdev->revision == 0xf5 ||
1163                                                               adev->pdev->revision == 0xf6))) {
1164                                 ret = smu_disable_umc_cdr_12gbps_workaround(smu);
1165                                 if (ret) {
1166                                         pr_err("Workaround failed to disable UMC CDR feature on 12Gbps SKU!\n");
1167                                         return ret;
1168                                 }
1169                         }
1170                 }
1171
1172                 if (smu->ppt_funcs->set_power_source) {
1173                         /*
1174                          * For Navi1X, manually switch it to AC mode as PMFW
1175                          * may boot it with DC mode.
1176                          */
1177                         if (adev->pm.ac_power)
1178                                 ret = smu_set_power_source(smu, SMU_POWER_SOURCE_AC);
1179                         else
1180                                 ret = smu_set_power_source(smu, SMU_POWER_SOURCE_DC);
1181                         if (ret) {
1182                                 pr_err("Failed to switch to %s mode!\n", adev->pm.ac_power ? "AC" : "DC");
1183                                 return ret;
1184                         }
1185                 }
1186         }
1187         if (adev->asic_type != CHIP_ARCTURUS) {
1188                 ret = smu_notify_display_change(smu);
1189                 if (ret)
1190                         return ret;
1191
1192                 /*
1193                  * Set min deep sleep dce fclk with bootup value from vbios via
1194                  * SetMinDeepSleepDcefclk MSG.
1195                  */
1196                 ret = smu_set_min_dcef_deep_sleep(smu);
1197                 if (ret)
1198                         return ret;
1199         }
1200
1201         /*
1202          * Set initialized values (get from vbios) to dpm tables context such as
1203          * gfxclk, memclk, dcefclk, and etc. And enable the DPM feature for each
1204          * type of clks.
1205          */
1206         if (initialize) {
1207                 ret = smu_populate_smc_tables(smu);
1208                 if (ret)
1209                         return ret;
1210
1211                 ret = smu_init_max_sustainable_clocks(smu);
1212                 if (ret)
1213                         return ret;
1214         }
1215
1216         if (adev->asic_type != CHIP_ARCTURUS) {
1217                 ret = smu_override_pcie_parameters(smu);
1218                 if (ret)
1219                         return ret;
1220         }
1221
1222         ret = smu_set_default_od_settings(smu, initialize);
1223         if (ret)
1224                 return ret;
1225
1226         if (initialize) {
1227                 ret = smu_populate_umd_state_clk(smu);
1228                 if (ret)
1229                         return ret;
1230
1231                 ret = smu_get_power_limit(smu, &smu->default_power_limit, false, false);
1232                 if (ret)
1233                         return ret;
1234         }
1235
1236         /*
1237          * Set PMSTATUSLOG table bo address with SetToolsDramAddr MSG for tools.
1238          */
1239         if (!amdgpu_sriov_vf(adev)) {
1240                 ret = smu_set_tool_table_location(smu);
1241         }
1242         if (!smu_is_dpm_running(smu))
1243                 pr_info("dpm has been disabled\n");
1244
1245         return ret;
1246 }
1247
1248 /**
1249  * smu_alloc_memory_pool - allocate memory pool in the system memory
1250  *
1251  * @smu: amdgpu_device pointer
1252  *
1253  * This memory pool will be used for SMC use and msg SetSystemVirtualDramAddr
1254  * and DramLogSetDramAddr can notify it changed.
1255  *
1256  * Returns 0 on success, error on failure.
1257  */
1258 static int smu_alloc_memory_pool(struct smu_context *smu)
1259 {
1260         struct amdgpu_device *adev = smu->adev;
1261         struct smu_table_context *smu_table = &smu->smu_table;
1262         struct smu_table *memory_pool = &smu_table->memory_pool;
1263         uint64_t pool_size = smu->pool_size;
1264         int ret = 0;
1265
1266         if (pool_size == SMU_MEMORY_POOL_SIZE_ZERO)
1267                 return ret;
1268
1269         memory_pool->size = pool_size;
1270         memory_pool->align = PAGE_SIZE;
1271         memory_pool->domain = AMDGPU_GEM_DOMAIN_GTT;
1272
1273         switch (pool_size) {
1274         case SMU_MEMORY_POOL_SIZE_256_MB:
1275         case SMU_MEMORY_POOL_SIZE_512_MB:
1276         case SMU_MEMORY_POOL_SIZE_1_GB:
1277         case SMU_MEMORY_POOL_SIZE_2_GB:
1278                 ret = amdgpu_bo_create_kernel(adev,
1279                                               memory_pool->size,
1280                                               memory_pool->align,
1281                                               memory_pool->domain,
1282                                               &memory_pool->bo,
1283                                               &memory_pool->mc_address,
1284                                               &memory_pool->cpu_addr);
1285                 break;
1286         default:
1287                 break;
1288         }
1289
1290         return ret;
1291 }
1292
1293 static int smu_free_memory_pool(struct smu_context *smu)
1294 {
1295         struct smu_table_context *smu_table = &smu->smu_table;
1296         struct smu_table *memory_pool = &smu_table->memory_pool;
1297
1298         if (memory_pool->size == SMU_MEMORY_POOL_SIZE_ZERO)
1299                 return 0;
1300
1301         amdgpu_bo_free_kernel(&memory_pool->bo,
1302                               &memory_pool->mc_address,
1303                               &memory_pool->cpu_addr);
1304
1305         memset(memory_pool, 0, sizeof(struct smu_table));
1306
1307         return 0;
1308 }
1309
1310 static int smu_start_smc_engine(struct smu_context *smu)
1311 {
1312         struct amdgpu_device *adev = smu->adev;
1313         int ret = 0;
1314
1315         if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
1316                 if (adev->asic_type < CHIP_NAVI10) {
1317                         if (smu->ppt_funcs->load_microcode) {
1318                                 ret = smu->ppt_funcs->load_microcode(smu);
1319                                 if (ret)
1320                                         return ret;
1321                         }
1322                 }
1323         }
1324
1325         if (smu->ppt_funcs->check_fw_status) {
1326                 ret = smu->ppt_funcs->check_fw_status(smu);
1327                 if (ret)
1328                         pr_err("SMC is not ready\n");
1329         }
1330
1331         return ret;
1332 }
1333
1334 static int smu_hw_init(void *handle)
1335 {
1336         int ret;
1337         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1338         struct smu_context *smu = &adev->smu;
1339
1340         if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev))
1341                 return 0;
1342
1343         ret = smu_start_smc_engine(smu);
1344         if (ret) {
1345                 pr_err("SMU is not ready yet!\n");
1346                 return ret;
1347         }
1348
1349         if (smu->is_apu) {
1350                 smu_powergate_sdma(&adev->smu, false);
1351                 smu_powergate_vcn(&adev->smu, false);
1352                 smu_powergate_jpeg(&adev->smu, false);
1353                 smu_set_gfx_cgpg(&adev->smu, true);
1354         }
1355
1356         if (!smu->pm_enabled)
1357                 return 0;
1358
1359         ret = smu_feature_init_dpm(smu);
1360         if (ret)
1361                 goto failed;
1362
1363         ret = smu_smc_table_hw_init(smu, true);
1364         if (ret)
1365                 goto failed;
1366
1367         ret = smu_alloc_memory_pool(smu);
1368         if (ret)
1369                 goto failed;
1370
1371         /*
1372          * Use msg SetSystemVirtualDramAddr and DramLogSetDramAddr can notify
1373          * pool location.
1374          */
1375         ret = smu_notify_memory_pool_location(smu);
1376         if (ret)
1377                 goto failed;
1378
1379         ret = smu_start_thermal_control(smu);
1380         if (ret)
1381                 goto failed;
1382
1383         ret = smu_i2c_eeprom_init(smu, &adev->pm.smu_i2c);
1384         if (ret)
1385                 goto failed;
1386
1387         adev->pm.dpm_enabled = true;
1388
1389         pr_info("SMU is initialized successfully!\n");
1390
1391         return 0;
1392
1393 failed:
1394         return ret;
1395 }
1396
1397 static int smu_stop_dpms(struct smu_context *smu)
1398 {
1399         if (amdgpu_sriov_vf(smu->adev))
1400                 return 0;
1401
1402         return smu_system_features_control(smu, false);
1403 }
1404
1405 static int smu_hw_fini(void *handle)
1406 {
1407         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1408         struct smu_context *smu = &adev->smu;
1409         struct smu_table_context *table_context = &smu->smu_table;
1410         int ret = 0;
1411
1412         if (amdgpu_sriov_vf(adev)&& !amdgpu_sriov_is_pp_one_vf(adev))
1413                 return 0;
1414
1415         if (smu->is_apu) {
1416                 smu_powergate_sdma(&adev->smu, true);
1417                 smu_powergate_vcn(&adev->smu, true);
1418                 smu_powergate_jpeg(&adev->smu, true);
1419         }
1420
1421         if (!smu->pm_enabled)
1422                 return 0;
1423
1424         adev->pm.dpm_enabled = false;
1425
1426         smu_i2c_eeprom_fini(smu, &adev->pm.smu_i2c);
1427
1428         if (!amdgpu_sriov_vf(adev)){
1429                 ret = smu_stop_thermal_control(smu);
1430                 if (ret) {
1431                         pr_warn("Fail to stop thermal control!\n");
1432                         return ret;
1433                 }
1434
1435                 /*
1436                  * For custom pptable uploading, skip the DPM features
1437                  * disable process on Navi1x ASICs.
1438                  *   - As the gfx related features are under control of
1439                  *     RLC on those ASICs. RLC reinitialization will be
1440                  *     needed to reenable them. That will cost much more
1441                  *     efforts.
1442                  *
1443                  *   - SMU firmware can handle the DPM reenablement
1444                  *     properly.
1445                  */
1446                 if (!smu->uploading_custom_pp_table ||
1447                                 !((adev->asic_type >= CHIP_NAVI10) &&
1448                                         (adev->asic_type <= CHIP_NAVI12))) {
1449                         ret = smu_stop_dpms(smu);
1450                         if (ret) {
1451                                 pr_warn("Fail to stop Dpms!\n");
1452                                 return ret;
1453                         }
1454                 }
1455         }
1456
1457         kfree(table_context->driver_pptable);
1458         table_context->driver_pptable = NULL;
1459
1460         kfree(table_context->max_sustainable_clocks);
1461         table_context->max_sustainable_clocks = NULL;
1462
1463         kfree(table_context->overdrive_table);
1464         table_context->overdrive_table = NULL;
1465
1466         ret = smu_fini_fb_allocations(smu);
1467         if (ret)
1468                 return ret;
1469
1470         ret = smu_free_memory_pool(smu);
1471         if (ret)
1472                 return ret;
1473
1474         return 0;
1475 }
1476
1477 int smu_reset(struct smu_context *smu)
1478 {
1479         struct amdgpu_device *adev = smu->adev;
1480         int ret = 0;
1481
1482         ret = smu_hw_fini(adev);
1483         if (ret)
1484                 return ret;
1485
1486         ret = smu_hw_init(adev);
1487         if (ret)
1488                 return ret;
1489
1490         return ret;
1491 }
1492
1493 static int smu_disable_dpm(struct smu_context *smu)
1494 {
1495         struct amdgpu_device *adev = smu->adev;
1496         uint32_t smu_version;
1497         int ret = 0;
1498         bool use_baco = !smu->is_apu &&
1499                 ((adev->in_gpu_reset &&
1500                   (amdgpu_asic_reset_method(adev) == AMD_RESET_METHOD_BACO)) ||
1501                  ((adev->in_runpm || adev->in_hibernate) && amdgpu_asic_supports_baco(adev)));
1502
1503         ret = smu_get_smc_version(smu, NULL, &smu_version);
1504         if (ret) {
1505                 pr_err("Failed to get smu version.\n");
1506                 return ret;
1507         }
1508
1509         /*
1510          * Disable all enabled SMU features.
1511          * This should be handled in SMU FW, as a backup
1512          * driver can issue call to SMU FW until sequence
1513          * in SMU FW is operational.
1514          */
1515         ret = smu_system_features_control(smu, false);
1516         if (ret) {
1517                 pr_err("Failed to disable smu features.\n");
1518                 return ret;
1519         }
1520
1521         /*
1522          * Arcturus does not have BACO bit in disable feature mask.
1523          * Enablement of BACO bit on Arcturus should be skipped.
1524          */
1525         if (adev->asic_type == CHIP_ARCTURUS) {
1526                 if (use_baco && (smu_version > 0x360e00))
1527                         return 0;
1528         }
1529
1530         /* For baco, need to leave BACO feature enabled */
1531         if (use_baco) {
1532                 /*
1533                  * Correct the way for checking whether SMU_FEATURE_BACO_BIT
1534                  * is supported.
1535                  *
1536                  * Since 'smu_feature_is_enabled(smu, SMU_FEATURE_BACO_BIT)' will
1537                  * always return false as the 'smu_system_features_control(smu, false)'
1538                  * was just issued above which disabled all SMU features.
1539                  *
1540                  * Thus 'smu_feature_get_index(smu, SMU_FEATURE_BACO_BIT)' is used
1541                  * now for the checking.
1542                  */
1543                 if (smu_feature_get_index(smu, SMU_FEATURE_BACO_BIT) >= 0) {
1544                         ret = smu_feature_set_enabled(smu, SMU_FEATURE_BACO_BIT, true);
1545                         if (ret) {
1546                                 pr_warn("set BACO feature enabled failed, return %d\n", ret);
1547                                 return ret;
1548                         }
1549                 }
1550         }
1551
1552         return ret;
1553 }
1554
1555 static int smu_suspend(void *handle)
1556 {
1557         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1558         struct smu_context *smu = &adev->smu;
1559         int ret;
1560
1561         if (amdgpu_sriov_vf(adev)&& !amdgpu_sriov_is_pp_one_vf(adev))
1562                 return 0;
1563
1564         if (!smu->pm_enabled)
1565                 return 0;
1566
1567         adev->pm.dpm_enabled = false;
1568
1569         smu_i2c_eeprom_fini(smu, &adev->pm.smu_i2c);
1570
1571         if(!amdgpu_sriov_vf(adev)) {
1572                 ret = smu_disable_dpm(smu);
1573                 if (ret)
1574                         return ret;
1575         }
1576
1577         smu->watermarks_bitmap &= ~(WATERMARKS_LOADED);
1578
1579         if (adev->asic_type >= CHIP_NAVI10 &&
1580             adev->gfx.rlc.funcs->stop)
1581                 adev->gfx.rlc.funcs->stop(adev);
1582         if (smu->is_apu)
1583                 smu_set_gfx_cgpg(&adev->smu, false);
1584
1585         return 0;
1586 }
1587
1588 static int smu_resume(void *handle)
1589 {
1590         int ret;
1591         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1592         struct smu_context *smu = &adev->smu;
1593
1594         if (amdgpu_sriov_vf(adev)&& !amdgpu_sriov_is_pp_one_vf(adev))
1595                 return 0;
1596
1597         if (!smu->pm_enabled)
1598                 return 0;
1599
1600         pr_info("SMU is resuming...\n");
1601
1602         ret = smu_start_smc_engine(smu);
1603         if (ret) {
1604                 pr_err("SMU is not ready yet!\n");
1605                 goto failed;
1606         }
1607
1608         ret = smu_smc_table_hw_init(smu, false);
1609         if (ret)
1610                 goto failed;
1611
1612         ret = smu_start_thermal_control(smu);
1613         if (ret)
1614                 goto failed;
1615
1616         ret = smu_i2c_eeprom_init(smu, &adev->pm.smu_i2c);
1617         if (ret)
1618                 goto failed;
1619
1620         if (smu->is_apu)
1621                 smu_set_gfx_cgpg(&adev->smu, true);
1622
1623         smu->disable_uclk_switch = 0;
1624
1625         adev->pm.dpm_enabled = true;
1626
1627         pr_info("SMU is resumed successfully!\n");
1628
1629         return 0;
1630
1631 failed:
1632         return ret;
1633 }
1634
1635 int smu_display_configuration_change(struct smu_context *smu,
1636                                      const struct amd_pp_display_configuration *display_config)
1637 {
1638         struct amdgpu_device *adev = smu->adev;
1639         int index = 0;
1640         int num_of_active_display = 0;
1641
1642         if (!adev->pm.dpm_enabled)
1643                 return -EINVAL;
1644
1645         if (!is_support_sw_smu(smu->adev))
1646                 return -EINVAL;
1647
1648         if (!display_config)
1649                 return -EINVAL;
1650
1651         mutex_lock(&smu->mutex);
1652
1653         if (smu->ppt_funcs->set_deep_sleep_dcefclk)
1654                 smu->ppt_funcs->set_deep_sleep_dcefclk(smu,
1655                                 display_config->min_dcef_deep_sleep_set_clk / 100);
1656
1657         for (index = 0; index < display_config->num_path_including_non_display; index++) {
1658                 if (display_config->displays[index].controller_id != 0)
1659                         num_of_active_display++;
1660         }
1661
1662         smu_set_active_display_count(smu, num_of_active_display);
1663
1664         smu_store_cc6_data(smu, display_config->cpu_pstate_separation_time,
1665                            display_config->cpu_cc6_disable,
1666                            display_config->cpu_pstate_disable,
1667                            display_config->nb_pstate_switch_disable);
1668
1669         mutex_unlock(&smu->mutex);
1670
1671         return 0;
1672 }
1673
1674 static int smu_get_clock_info(struct smu_context *smu,
1675                               struct smu_clock_info *clk_info,
1676                               enum smu_perf_level_designation designation)
1677 {
1678         int ret;
1679         struct smu_performance_level level = {0};
1680
1681         if (!clk_info)
1682                 return -EINVAL;
1683
1684         ret = smu_get_perf_level(smu, PERF_LEVEL_ACTIVITY, &level);
1685         if (ret)
1686                 return -EINVAL;
1687
1688         clk_info->min_mem_clk = level.memory_clock;
1689         clk_info->min_eng_clk = level.core_clock;
1690         clk_info->min_bus_bandwidth = level.non_local_mem_freq * level.non_local_mem_width;
1691
1692         ret = smu_get_perf_level(smu, designation, &level);
1693         if (ret)
1694                 return -EINVAL;
1695
1696         clk_info->min_mem_clk = level.memory_clock;
1697         clk_info->min_eng_clk = level.core_clock;
1698         clk_info->min_bus_bandwidth = level.non_local_mem_freq * level.non_local_mem_width;
1699
1700         return 0;
1701 }
1702
1703 int smu_get_current_clocks(struct smu_context *smu,
1704                            struct amd_pp_clock_info *clocks)
1705 {
1706         struct amd_pp_simple_clock_info simple_clocks = {0};
1707         struct amdgpu_device *adev = smu->adev;
1708         struct smu_clock_info hw_clocks;
1709         int ret = 0;
1710
1711         if (!is_support_sw_smu(smu->adev))
1712                 return -EINVAL;
1713
1714         if (!adev->pm.dpm_enabled)
1715                 return -EINVAL;
1716
1717         mutex_lock(&smu->mutex);
1718
1719         smu_get_dal_power_level(smu, &simple_clocks);
1720
1721         if (smu->support_power_containment)
1722                 ret = smu_get_clock_info(smu, &hw_clocks,
1723                                          PERF_LEVEL_POWER_CONTAINMENT);
1724         else
1725                 ret = smu_get_clock_info(smu, &hw_clocks, PERF_LEVEL_ACTIVITY);
1726
1727         if (ret) {
1728                 pr_err("Error in smu_get_clock_info\n");
1729                 goto failed;
1730         }
1731
1732         clocks->min_engine_clock = hw_clocks.min_eng_clk;
1733         clocks->max_engine_clock = hw_clocks.max_eng_clk;
1734         clocks->min_memory_clock = hw_clocks.min_mem_clk;
1735         clocks->max_memory_clock = hw_clocks.max_mem_clk;
1736         clocks->min_bus_bandwidth = hw_clocks.min_bus_bandwidth;
1737         clocks->max_bus_bandwidth = hw_clocks.max_bus_bandwidth;
1738         clocks->max_engine_clock_in_sr = hw_clocks.max_eng_clk;
1739         clocks->min_engine_clock_in_sr = hw_clocks.min_eng_clk;
1740
1741         if (simple_clocks.level == 0)
1742                 clocks->max_clocks_state = PP_DAL_POWERLEVEL_7;
1743         else
1744                 clocks->max_clocks_state = simple_clocks.level;
1745
1746         if (!smu_get_current_shallow_sleep_clocks(smu, &hw_clocks)) {
1747                 clocks->max_engine_clock_in_sr = hw_clocks.max_eng_clk;
1748                 clocks->min_engine_clock_in_sr = hw_clocks.min_eng_clk;
1749         }
1750
1751 failed:
1752         mutex_unlock(&smu->mutex);
1753         return ret;
1754 }
1755
1756 static int smu_set_clockgating_state(void *handle,
1757                                      enum amd_clockgating_state state)
1758 {
1759         return 0;
1760 }
1761
1762 static int smu_set_powergating_state(void *handle,
1763                                      enum amd_powergating_state state)
1764 {
1765         return 0;
1766 }
1767
1768 static int smu_enable_umd_pstate(void *handle,
1769                       enum amd_dpm_forced_level *level)
1770 {
1771         uint32_t profile_mode_mask = AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD |
1772                                         AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK |
1773                                         AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK |
1774                                         AMD_DPM_FORCED_LEVEL_PROFILE_PEAK;
1775
1776         struct smu_context *smu = (struct smu_context*)(handle);
1777         struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
1778
1779         if (!smu->is_apu && !smu_dpm_ctx->dpm_context)
1780                 return -EINVAL;
1781
1782         if (!(smu_dpm_ctx->dpm_level & profile_mode_mask)) {
1783                 /* enter umd pstate, save current level, disable gfx cg*/
1784                 if (*level & profile_mode_mask) {
1785                         smu_dpm_ctx->saved_dpm_level = smu_dpm_ctx->dpm_level;
1786                         smu_dpm_ctx->enable_umd_pstate = true;
1787                         amdgpu_device_ip_set_powergating_state(smu->adev,
1788                                                                AMD_IP_BLOCK_TYPE_GFX,
1789                                                                AMD_PG_STATE_UNGATE);
1790                         amdgpu_device_ip_set_clockgating_state(smu->adev,
1791                                                                AMD_IP_BLOCK_TYPE_GFX,
1792                                                                AMD_CG_STATE_UNGATE);
1793                 }
1794         } else {
1795                 /* exit umd pstate, restore level, enable gfx cg*/
1796                 if (!(*level & profile_mode_mask)) {
1797                         if (*level == AMD_DPM_FORCED_LEVEL_PROFILE_EXIT)
1798                                 *level = smu_dpm_ctx->saved_dpm_level;
1799                         smu_dpm_ctx->enable_umd_pstate = false;
1800                         amdgpu_device_ip_set_clockgating_state(smu->adev,
1801                                                                AMD_IP_BLOCK_TYPE_GFX,
1802                                                                AMD_CG_STATE_GATE);
1803                         amdgpu_device_ip_set_powergating_state(smu->adev,
1804                                                                AMD_IP_BLOCK_TYPE_GFX,
1805                                                                AMD_PG_STATE_GATE);
1806                 }
1807         }
1808
1809         return 0;
1810 }
1811
1812 int smu_adjust_power_state_dynamic(struct smu_context *smu,
1813                                    enum amd_dpm_forced_level level,
1814                                    bool skip_display_settings)
1815 {
1816         int ret = 0;
1817         int index = 0;
1818         long workload;
1819         struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
1820
1821         if (!skip_display_settings) {
1822                 ret = smu_display_config_changed(smu);
1823                 if (ret) {
1824                         pr_err("Failed to change display config!");
1825                         return ret;
1826                 }
1827         }
1828
1829         ret = smu_apply_clocks_adjust_rules(smu);
1830         if (ret) {
1831                 pr_err("Failed to apply clocks adjust rules!");
1832                 return ret;
1833         }
1834
1835         if (!skip_display_settings) {
1836                 ret = smu_notify_smc_display_config(smu);
1837                 if (ret) {
1838                         pr_err("Failed to notify smc display config!");
1839                         return ret;
1840                 }
1841         }
1842
1843         if (smu_dpm_ctx->dpm_level != level) {
1844                 ret = smu_asic_set_performance_level(smu, level);
1845                 if (ret) {
1846                         pr_err("Failed to set performance level!");
1847                         return ret;
1848                 }
1849
1850                 /* update the saved copy */
1851                 smu_dpm_ctx->dpm_level = level;
1852         }
1853
1854         if (smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL) {
1855                 index = fls(smu->workload_mask);
1856                 index = index > 0 && index <= WORKLOAD_POLICY_MAX ? index - 1 : 0;
1857                 workload = smu->workload_setting[index];
1858
1859                 if (smu->power_profile_mode != workload)
1860                         smu_set_power_profile_mode(smu, &workload, 0, false);
1861         }
1862
1863         return ret;
1864 }
1865
1866 int smu_handle_task(struct smu_context *smu,
1867                     enum amd_dpm_forced_level level,
1868                     enum amd_pp_task task_id,
1869                     bool lock_needed)
1870 {
1871         struct amdgpu_device *adev = smu->adev;
1872         int ret = 0;
1873
1874         if (!adev->pm.dpm_enabled)
1875                 return -EINVAL;
1876
1877         if (lock_needed)
1878                 mutex_lock(&smu->mutex);
1879
1880         switch (task_id) {
1881         case AMD_PP_TASK_DISPLAY_CONFIG_CHANGE:
1882                 ret = smu_pre_display_config_changed(smu);
1883                 if (ret)
1884                         goto out;
1885                 ret = smu_set_cpu_power_state(smu);
1886                 if (ret)
1887                         goto out;
1888                 ret = smu_adjust_power_state_dynamic(smu, level, false);
1889                 break;
1890         case AMD_PP_TASK_COMPLETE_INIT:
1891         case AMD_PP_TASK_READJUST_POWER_STATE:
1892                 ret = smu_adjust_power_state_dynamic(smu, level, true);
1893                 break;
1894         default:
1895                 break;
1896         }
1897
1898 out:
1899         if (lock_needed)
1900                 mutex_unlock(&smu->mutex);
1901
1902         return ret;
1903 }
1904
1905 int smu_switch_power_profile(struct smu_context *smu,
1906                              enum PP_SMC_POWER_PROFILE type,
1907                              bool en)
1908 {
1909         struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
1910         struct amdgpu_device *adev = smu->adev;
1911         long workload;
1912         uint32_t index;
1913
1914         if (!adev->pm.dpm_enabled)
1915                 return -EINVAL;
1916
1917         if (!(type < PP_SMC_POWER_PROFILE_CUSTOM))
1918                 return -EINVAL;
1919
1920         mutex_lock(&smu->mutex);
1921
1922         if (!en) {
1923                 smu->workload_mask &= ~(1 << smu->workload_prority[type]);
1924                 index = fls(smu->workload_mask);
1925                 index = index > 0 && index <= WORKLOAD_POLICY_MAX ? index - 1 : 0;
1926                 workload = smu->workload_setting[index];
1927         } else {
1928                 smu->workload_mask |= (1 << smu->workload_prority[type]);
1929                 index = fls(smu->workload_mask);
1930                 index = index <= WORKLOAD_POLICY_MAX ? index - 1 : 0;
1931                 workload = smu->workload_setting[index];
1932         }
1933
1934         if (smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL)
1935                 smu_set_power_profile_mode(smu, &workload, 0, false);
1936
1937         mutex_unlock(&smu->mutex);
1938
1939         return 0;
1940 }
1941
1942 enum amd_dpm_forced_level smu_get_performance_level(struct smu_context *smu)
1943 {
1944         struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
1945         struct amdgpu_device *adev = smu->adev;
1946         enum amd_dpm_forced_level level;
1947
1948         if (!adev->pm.dpm_enabled)
1949                 return -EINVAL;
1950
1951         if (!smu->is_apu && !smu_dpm_ctx->dpm_context)
1952                 return -EINVAL;
1953
1954         mutex_lock(&(smu->mutex));
1955         level = smu_dpm_ctx->dpm_level;
1956         mutex_unlock(&(smu->mutex));
1957
1958         return level;
1959 }
1960
1961 int smu_force_performance_level(struct smu_context *smu, enum amd_dpm_forced_level level)
1962 {
1963         struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
1964         struct amdgpu_device *adev = smu->adev;
1965         int ret = 0;
1966
1967         if (!adev->pm.dpm_enabled)
1968                 return -EINVAL;
1969
1970         if (!smu->is_apu && !smu_dpm_ctx->dpm_context)
1971                 return -EINVAL;
1972
1973         mutex_lock(&smu->mutex);
1974
1975         ret = smu_enable_umd_pstate(smu, &level);
1976         if (ret) {
1977                 mutex_unlock(&smu->mutex);
1978                 return ret;
1979         }
1980
1981         ret = smu_handle_task(smu, level,
1982                               AMD_PP_TASK_READJUST_POWER_STATE,
1983                               false);
1984
1985         mutex_unlock(&smu->mutex);
1986
1987         return ret;
1988 }
1989
1990 int smu_set_display_count(struct smu_context *smu, uint32_t count)
1991 {
1992         struct amdgpu_device *adev = smu->adev;
1993         int ret = 0;
1994
1995         if (!adev->pm.dpm_enabled)
1996                 return -EINVAL;
1997
1998         mutex_lock(&smu->mutex);
1999         ret = smu_init_display_count(smu, count);
2000         mutex_unlock(&smu->mutex);
2001
2002         return ret;
2003 }
2004
2005 int smu_force_clk_levels(struct smu_context *smu,
2006                          enum smu_clk_type clk_type,
2007                          uint32_t mask,
2008                          bool lock_needed)
2009 {
2010         struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
2011         struct amdgpu_device *adev = smu->adev;
2012         int ret = 0;
2013
2014         if (!adev->pm.dpm_enabled)
2015                 return -EINVAL;
2016
2017         if (smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL) {
2018                 pr_debug("force clock level is for dpm manual mode only.\n");
2019                 return -EINVAL;
2020         }
2021
2022         if (lock_needed)
2023                 mutex_lock(&smu->mutex);
2024
2025         if (smu->ppt_funcs && smu->ppt_funcs->force_clk_levels)
2026                 ret = smu->ppt_funcs->force_clk_levels(smu, clk_type, mask);
2027
2028         if (lock_needed)
2029                 mutex_unlock(&smu->mutex);
2030
2031         return ret;
2032 }
2033
2034 /*
2035  * On system suspending or resetting, the dpm_enabled
2036  * flag will be cleared. So that those SMU services which
2037  * are not supported will be gated.
2038  * However, the mp1 state setting should still be granted
2039  * even if the dpm_enabled cleared.
2040  */
2041 int smu_set_mp1_state(struct smu_context *smu,
2042                       enum pp_mp1_state mp1_state)
2043 {
2044         uint16_t msg;
2045         int ret;
2046
2047         mutex_lock(&smu->mutex);
2048
2049         switch (mp1_state) {
2050         case PP_MP1_STATE_SHUTDOWN:
2051                 msg = SMU_MSG_PrepareMp1ForShutdown;
2052                 break;
2053         case PP_MP1_STATE_UNLOAD:
2054                 msg = SMU_MSG_PrepareMp1ForUnload;
2055                 break;
2056         case PP_MP1_STATE_RESET:
2057                 msg = SMU_MSG_PrepareMp1ForReset;
2058                 break;
2059         case PP_MP1_STATE_NONE:
2060         default:
2061                 mutex_unlock(&smu->mutex);
2062                 return 0;
2063         }
2064
2065         /* some asics may not support those messages */
2066         if (smu_msg_get_index(smu, msg) < 0) {
2067                 mutex_unlock(&smu->mutex);
2068                 return 0;
2069         }
2070
2071         ret = smu_send_smc_msg(smu, msg, NULL);
2072         if (ret)
2073                 pr_err("[PrepareMp1] Failed!\n");
2074
2075         mutex_unlock(&smu->mutex);
2076
2077         return ret;
2078 }
2079
2080 int smu_set_df_cstate(struct smu_context *smu,
2081                       enum pp_df_cstate state)
2082 {
2083         struct amdgpu_device *adev = smu->adev;
2084         int ret = 0;
2085
2086         if (!adev->pm.dpm_enabled)
2087                 return -EINVAL;
2088
2089         if (!smu->ppt_funcs || !smu->ppt_funcs->set_df_cstate)
2090                 return 0;
2091
2092         mutex_lock(&smu->mutex);
2093
2094         ret = smu->ppt_funcs->set_df_cstate(smu, state);
2095         if (ret)
2096                 pr_err("[SetDfCstate] failed!\n");
2097
2098         mutex_unlock(&smu->mutex);
2099
2100         return ret;
2101 }
2102
2103 int smu_allow_xgmi_power_down(struct smu_context *smu, bool en)
2104 {
2105         struct amdgpu_device *adev = smu->adev;
2106         int ret = 0;
2107
2108         if (!adev->pm.dpm_enabled)
2109                 return -EINVAL;
2110
2111         if (!smu->ppt_funcs || !smu->ppt_funcs->allow_xgmi_power_down)
2112                 return 0;
2113
2114         mutex_lock(&smu->mutex);
2115
2116         ret = smu->ppt_funcs->allow_xgmi_power_down(smu, en);
2117         if (ret)
2118                 pr_err("[AllowXgmiPowerDown] failed!\n");
2119
2120         mutex_unlock(&smu->mutex);
2121
2122         return ret;
2123 }
2124
2125 int smu_write_watermarks_table(struct smu_context *smu)
2126 {
2127         void *watermarks_table = smu->smu_table.watermarks_table;
2128
2129         if (!watermarks_table)
2130                 return -EINVAL;
2131
2132         return smu_update_table(smu,
2133                                 SMU_TABLE_WATERMARKS,
2134                                 0,
2135                                 watermarks_table,
2136                                 true);
2137 }
2138
2139 int smu_set_watermarks_for_clock_ranges(struct smu_context *smu,
2140                 struct dm_pp_wm_sets_with_clock_ranges_soc15 *clock_ranges)
2141 {
2142         void *table = smu->smu_table.watermarks_table;
2143         struct amdgpu_device *adev = smu->adev;
2144
2145         if (!adev->pm.dpm_enabled)
2146                 return -EINVAL;
2147
2148         if (!table)
2149                 return -EINVAL;
2150
2151         mutex_lock(&smu->mutex);
2152
2153         if (!smu->disable_watermark &&
2154                         smu_feature_is_enabled(smu, SMU_FEATURE_DPM_DCEFCLK_BIT) &&
2155                         smu_feature_is_enabled(smu, SMU_FEATURE_DPM_SOCCLK_BIT)) {
2156                 smu_set_watermarks_table(smu, table, clock_ranges);
2157
2158                 if (!(smu->watermarks_bitmap & WATERMARKS_EXIST)) {
2159                         smu->watermarks_bitmap |= WATERMARKS_EXIST;
2160                         smu->watermarks_bitmap &= ~WATERMARKS_LOADED;
2161                 }
2162         }
2163
2164         mutex_unlock(&smu->mutex);
2165
2166         return 0;
2167 }
2168
2169 int smu_set_ac_dc(struct smu_context *smu)
2170 {
2171         struct amdgpu_device *adev = smu->adev;
2172         int ret = 0;
2173
2174         if (!adev->pm.dpm_enabled)
2175                 return -EINVAL;
2176
2177         /* controlled by firmware */
2178         if (smu->dc_controlled_by_gpio)
2179                 return 0;
2180
2181         mutex_lock(&smu->mutex);
2182         if (smu->ppt_funcs->set_power_source) {
2183                 if (smu->adev->pm.ac_power)
2184                         ret = smu_set_power_source(smu, SMU_POWER_SOURCE_AC);
2185                 else
2186                         ret = smu_set_power_source(smu, SMU_POWER_SOURCE_DC);
2187                 if (ret)
2188                         pr_err("Failed to switch to %s mode!\n",
2189                                smu->adev->pm.ac_power ? "AC" : "DC");
2190         }
2191         mutex_unlock(&smu->mutex);
2192
2193         return ret;
2194 }
2195
2196 const struct amd_ip_funcs smu_ip_funcs = {
2197         .name = "smu",
2198         .early_init = smu_early_init,
2199         .late_init = smu_late_init,
2200         .sw_init = smu_sw_init,
2201         .sw_fini = smu_sw_fini,
2202         .hw_init = smu_hw_init,
2203         .hw_fini = smu_hw_fini,
2204         .suspend = smu_suspend,
2205         .resume = smu_resume,
2206         .is_idle = NULL,
2207         .check_soft_reset = NULL,
2208         .wait_for_idle = NULL,
2209         .soft_reset = NULL,
2210         .set_clockgating_state = smu_set_clockgating_state,
2211         .set_powergating_state = smu_set_powergating_state,
2212         .enable_umd_pstate = smu_enable_umd_pstate,
2213 };
2214
2215 const struct amdgpu_ip_block_version smu_v11_0_ip_block =
2216 {
2217         .type = AMD_IP_BLOCK_TYPE_SMC,
2218         .major = 11,
2219         .minor = 0,
2220         .rev = 0,
2221         .funcs = &smu_ip_funcs,
2222 };
2223
2224 const struct amdgpu_ip_block_version smu_v12_0_ip_block =
2225 {
2226         .type = AMD_IP_BLOCK_TYPE_SMC,
2227         .major = 12,
2228         .minor = 0,
2229         .rev = 0,
2230         .funcs = &smu_ip_funcs,
2231 };
2232
2233 int smu_load_microcode(struct smu_context *smu)
2234 {
2235         struct amdgpu_device *adev = smu->adev;
2236         int ret = 0;
2237
2238         if (!adev->pm.dpm_enabled)
2239                 return -EINVAL;
2240
2241         mutex_lock(&smu->mutex);
2242
2243         if (smu->ppt_funcs->load_microcode)
2244                 ret = smu->ppt_funcs->load_microcode(smu);
2245
2246         mutex_unlock(&smu->mutex);
2247
2248         return ret;
2249 }
2250
2251 int smu_check_fw_status(struct smu_context *smu)
2252 {
2253         struct amdgpu_device *adev = smu->adev;
2254         int ret = 0;
2255
2256         if (!adev->pm.dpm_enabled)
2257                 return -EINVAL;
2258
2259         mutex_lock(&smu->mutex);
2260
2261         if (smu->ppt_funcs->check_fw_status)
2262                 ret = smu->ppt_funcs->check_fw_status(smu);
2263
2264         mutex_unlock(&smu->mutex);
2265
2266         return ret;
2267 }
2268
2269 int smu_set_gfx_cgpg(struct smu_context *smu, bool enabled)
2270 {
2271         int ret = 0;
2272
2273         mutex_lock(&smu->mutex);
2274
2275         if (smu->ppt_funcs->set_gfx_cgpg)
2276                 ret = smu->ppt_funcs->set_gfx_cgpg(smu, enabled);
2277
2278         mutex_unlock(&smu->mutex);
2279
2280         return ret;
2281 }
2282
2283 int smu_set_fan_speed_rpm(struct smu_context *smu, uint32_t speed)
2284 {
2285         struct amdgpu_device *adev = smu->adev;
2286         int ret = 0;
2287
2288         if (!adev->pm.dpm_enabled)
2289                 return -EINVAL;
2290
2291         mutex_lock(&smu->mutex);
2292
2293         if (smu->ppt_funcs->set_fan_speed_rpm)
2294                 ret = smu->ppt_funcs->set_fan_speed_rpm(smu, speed);
2295
2296         mutex_unlock(&smu->mutex);
2297
2298         return ret;
2299 }
2300
2301 int smu_get_power_limit(struct smu_context *smu,
2302                         uint32_t *limit,
2303                         bool def,
2304                         bool lock_needed)
2305 {
2306         struct amdgpu_device *adev = smu->adev;
2307         int ret = 0;
2308
2309         if (lock_needed) {
2310                 if (!adev->pm.dpm_enabled)
2311                         return -EINVAL;
2312
2313                 mutex_lock(&smu->mutex);
2314         }
2315
2316         if (smu->ppt_funcs->get_power_limit)
2317                 ret = smu->ppt_funcs->get_power_limit(smu, limit, def);
2318
2319         if (lock_needed)
2320                 mutex_unlock(&smu->mutex);
2321
2322         return ret;
2323 }
2324
2325 int smu_set_power_limit(struct smu_context *smu, uint32_t limit)
2326 {
2327         struct amdgpu_device *adev = smu->adev;
2328         int ret = 0;
2329
2330         if (!adev->pm.dpm_enabled)
2331                 return -EINVAL;
2332
2333         mutex_lock(&smu->mutex);
2334
2335         if (smu->ppt_funcs->set_power_limit)
2336                 ret = smu->ppt_funcs->set_power_limit(smu, limit);
2337
2338         mutex_unlock(&smu->mutex);
2339
2340         return ret;
2341 }
2342
2343 int smu_print_clk_levels(struct smu_context *smu, enum smu_clk_type clk_type, char *buf)
2344 {
2345         struct amdgpu_device *adev = smu->adev;
2346         int ret = 0;
2347
2348         if (!adev->pm.dpm_enabled)
2349                 return -EINVAL;
2350
2351         mutex_lock(&smu->mutex);
2352
2353         if (smu->ppt_funcs->print_clk_levels)
2354                 ret = smu->ppt_funcs->print_clk_levels(smu, clk_type, buf);
2355
2356         mutex_unlock(&smu->mutex);
2357
2358         return ret;
2359 }
2360
2361 int smu_get_od_percentage(struct smu_context *smu, enum smu_clk_type type)
2362 {
2363         struct amdgpu_device *adev = smu->adev;
2364         int ret = 0;
2365
2366         if (!adev->pm.dpm_enabled)
2367                 return -EINVAL;
2368
2369         mutex_lock(&smu->mutex);
2370
2371         if (smu->ppt_funcs->get_od_percentage)
2372                 ret = smu->ppt_funcs->get_od_percentage(smu, type);
2373
2374         mutex_unlock(&smu->mutex);
2375
2376         return ret;
2377 }
2378
2379 int smu_set_od_percentage(struct smu_context *smu, enum smu_clk_type type, uint32_t value)
2380 {
2381         struct amdgpu_device *adev = smu->adev;
2382         int ret = 0;
2383
2384         if (!adev->pm.dpm_enabled)
2385                 return -EINVAL;
2386
2387         mutex_lock(&smu->mutex);
2388
2389         if (smu->ppt_funcs->set_od_percentage)
2390                 ret = smu->ppt_funcs->set_od_percentage(smu, type, value);
2391
2392         mutex_unlock(&smu->mutex);
2393
2394         return ret;
2395 }
2396
2397 int smu_od_edit_dpm_table(struct smu_context *smu,
2398                           enum PP_OD_DPM_TABLE_COMMAND type,
2399                           long *input, uint32_t size)
2400 {
2401         struct amdgpu_device *adev = smu->adev;
2402         int ret = 0;
2403
2404         if (!adev->pm.dpm_enabled)
2405                 return -EINVAL;
2406
2407         mutex_lock(&smu->mutex);
2408
2409         if (smu->ppt_funcs->od_edit_dpm_table)
2410                 ret = smu->ppt_funcs->od_edit_dpm_table(smu, type, input, size);
2411
2412         mutex_unlock(&smu->mutex);
2413
2414         return ret;
2415 }
2416
2417 int smu_read_sensor(struct smu_context *smu,
2418                     enum amd_pp_sensors sensor,
2419                     void *data, uint32_t *size)
2420 {
2421         struct amdgpu_device *adev = smu->adev;
2422         int ret = 0;
2423
2424         if (!adev->pm.dpm_enabled)
2425                 return -EINVAL;
2426
2427         mutex_lock(&smu->mutex);
2428
2429         if (smu->ppt_funcs->read_sensor)
2430                 ret = smu->ppt_funcs->read_sensor(smu, sensor, data, size);
2431
2432         mutex_unlock(&smu->mutex);
2433
2434         return ret;
2435 }
2436
2437 int smu_get_power_profile_mode(struct smu_context *smu, char *buf)
2438 {
2439         struct amdgpu_device *adev = smu->adev;
2440         int ret = 0;
2441
2442         if (!adev->pm.dpm_enabled)
2443                 return -EINVAL;
2444
2445         mutex_lock(&smu->mutex);
2446
2447         if (smu->ppt_funcs->get_power_profile_mode)
2448                 ret = smu->ppt_funcs->get_power_profile_mode(smu, buf);
2449
2450         mutex_unlock(&smu->mutex);
2451
2452         return ret;
2453 }
2454
2455 int smu_set_power_profile_mode(struct smu_context *smu,
2456                                long *param,
2457                                uint32_t param_size,
2458                                bool lock_needed)
2459 {
2460         struct amdgpu_device *adev = smu->adev;
2461         int ret = 0;
2462
2463         if (!adev->pm.dpm_enabled)
2464                 return -EINVAL;
2465
2466         if (lock_needed)
2467                 mutex_lock(&smu->mutex);
2468
2469         if (smu->ppt_funcs->set_power_profile_mode)
2470                 ret = smu->ppt_funcs->set_power_profile_mode(smu, param, param_size);
2471
2472         if (lock_needed)
2473                 mutex_unlock(&smu->mutex);
2474
2475         return ret;
2476 }
2477
2478
2479 int smu_get_fan_control_mode(struct smu_context *smu)
2480 {
2481         struct amdgpu_device *adev = smu->adev;
2482         int ret = 0;
2483
2484         if (!adev->pm.dpm_enabled)
2485                 return -EINVAL;
2486
2487         mutex_lock(&smu->mutex);
2488
2489         if (smu->ppt_funcs->get_fan_control_mode)
2490                 ret = smu->ppt_funcs->get_fan_control_mode(smu);
2491
2492         mutex_unlock(&smu->mutex);
2493
2494         return ret;
2495 }
2496
2497 int smu_set_fan_control_mode(struct smu_context *smu, int value)
2498 {
2499         struct amdgpu_device *adev = smu->adev;
2500         int ret = 0;
2501
2502         if (!adev->pm.dpm_enabled)
2503                 return -EINVAL;
2504
2505         mutex_lock(&smu->mutex);
2506
2507         if (smu->ppt_funcs->set_fan_control_mode)
2508                 ret = smu->ppt_funcs->set_fan_control_mode(smu, value);
2509
2510         mutex_unlock(&smu->mutex);
2511
2512         return ret;
2513 }
2514
2515 int smu_get_fan_speed_percent(struct smu_context *smu, uint32_t *speed)
2516 {
2517         struct amdgpu_device *adev = smu->adev;
2518         int ret = 0;
2519
2520         if (!adev->pm.dpm_enabled)
2521                 return -EINVAL;
2522
2523         mutex_lock(&smu->mutex);
2524
2525         if (smu->ppt_funcs->get_fan_speed_percent)
2526                 ret = smu->ppt_funcs->get_fan_speed_percent(smu, speed);
2527
2528         mutex_unlock(&smu->mutex);
2529
2530         return ret;
2531 }
2532
2533 int smu_set_fan_speed_percent(struct smu_context *smu, uint32_t speed)
2534 {
2535         struct amdgpu_device *adev = smu->adev;
2536         int ret = 0;
2537
2538         if (!adev->pm.dpm_enabled)
2539                 return -EINVAL;
2540
2541         mutex_lock(&smu->mutex);
2542
2543         if (smu->ppt_funcs->set_fan_speed_percent)
2544                 ret = smu->ppt_funcs->set_fan_speed_percent(smu, speed);
2545
2546         mutex_unlock(&smu->mutex);
2547
2548         return ret;
2549 }
2550
2551 int smu_get_fan_speed_rpm(struct smu_context *smu, uint32_t *speed)
2552 {
2553         struct amdgpu_device *adev = smu->adev;
2554         int ret = 0;
2555
2556         if (!adev->pm.dpm_enabled)
2557                 return -EINVAL;
2558
2559         mutex_lock(&smu->mutex);
2560
2561         if (smu->ppt_funcs->get_fan_speed_rpm)
2562                 ret = smu->ppt_funcs->get_fan_speed_rpm(smu, speed);
2563
2564         mutex_unlock(&smu->mutex);
2565
2566         return ret;
2567 }
2568
2569 int smu_set_deep_sleep_dcefclk(struct smu_context *smu, int clk)
2570 {
2571         struct amdgpu_device *adev = smu->adev;
2572         int ret = 0;
2573
2574         if (!adev->pm.dpm_enabled)
2575                 return -EINVAL;
2576
2577         mutex_lock(&smu->mutex);
2578
2579         if (smu->ppt_funcs->set_deep_sleep_dcefclk)
2580                 ret = smu->ppt_funcs->set_deep_sleep_dcefclk(smu, clk);
2581
2582         mutex_unlock(&smu->mutex);
2583
2584         return ret;
2585 }
2586
2587 int smu_set_active_display_count(struct smu_context *smu, uint32_t count)
2588 {
2589         struct amdgpu_device *adev = smu->adev;
2590         int ret = 0;
2591
2592         if (!adev->pm.dpm_enabled)
2593                 return -EINVAL;
2594
2595         if (smu->ppt_funcs->set_active_display_count)
2596                 ret = smu->ppt_funcs->set_active_display_count(smu, count);
2597
2598         return ret;
2599 }
2600
2601 int smu_get_clock_by_type(struct smu_context *smu,
2602                           enum amd_pp_clock_type type,
2603                           struct amd_pp_clocks *clocks)
2604 {
2605         struct amdgpu_device *adev = smu->adev;
2606         int ret = 0;
2607
2608         if (!adev->pm.dpm_enabled)
2609                 return -EINVAL;
2610
2611         mutex_lock(&smu->mutex);
2612
2613         if (smu->ppt_funcs->get_clock_by_type)
2614                 ret = smu->ppt_funcs->get_clock_by_type(smu, type, clocks);
2615
2616         mutex_unlock(&smu->mutex);
2617
2618         return ret;
2619 }
2620
2621 int smu_get_max_high_clocks(struct smu_context *smu,
2622                             struct amd_pp_simple_clock_info *clocks)
2623 {
2624         struct amdgpu_device *adev = smu->adev;
2625         int ret = 0;
2626
2627         if (!adev->pm.dpm_enabled)
2628                 return -EINVAL;
2629
2630         mutex_lock(&smu->mutex);
2631
2632         if (smu->ppt_funcs->get_max_high_clocks)
2633                 ret = smu->ppt_funcs->get_max_high_clocks(smu, clocks);
2634
2635         mutex_unlock(&smu->mutex);
2636
2637         return ret;
2638 }
2639
2640 int smu_get_clock_by_type_with_latency(struct smu_context *smu,
2641                                        enum smu_clk_type clk_type,
2642                                        struct pp_clock_levels_with_latency *clocks)
2643 {
2644         struct amdgpu_device *adev = smu->adev;
2645         int ret = 0;
2646
2647         if (!adev->pm.dpm_enabled)
2648                 return -EINVAL;
2649
2650         mutex_lock(&smu->mutex);
2651
2652         if (smu->ppt_funcs->get_clock_by_type_with_latency)
2653                 ret = smu->ppt_funcs->get_clock_by_type_with_latency(smu, clk_type, clocks);
2654
2655         mutex_unlock(&smu->mutex);
2656
2657         return ret;
2658 }
2659
2660 int smu_get_clock_by_type_with_voltage(struct smu_context *smu,
2661                                        enum amd_pp_clock_type type,
2662                                        struct pp_clock_levels_with_voltage *clocks)
2663 {
2664         struct amdgpu_device *adev = smu->adev;
2665         int ret = 0;
2666
2667         if (!adev->pm.dpm_enabled)
2668                 return -EINVAL;
2669
2670         mutex_lock(&smu->mutex);
2671
2672         if (smu->ppt_funcs->get_clock_by_type_with_voltage)
2673                 ret = smu->ppt_funcs->get_clock_by_type_with_voltage(smu, type, clocks);
2674
2675         mutex_unlock(&smu->mutex);
2676
2677         return ret;
2678 }
2679
2680
2681 int smu_display_clock_voltage_request(struct smu_context *smu,
2682                                       struct pp_display_clock_request *clock_req)
2683 {
2684         struct amdgpu_device *adev = smu->adev;
2685         int ret = 0;
2686
2687         if (!adev->pm.dpm_enabled)
2688                 return -EINVAL;
2689
2690         mutex_lock(&smu->mutex);
2691
2692         if (smu->ppt_funcs->display_clock_voltage_request)
2693                 ret = smu->ppt_funcs->display_clock_voltage_request(smu, clock_req);
2694
2695         mutex_unlock(&smu->mutex);
2696
2697         return ret;
2698 }
2699
2700
2701 int smu_display_disable_memory_clock_switch(struct smu_context *smu, bool disable_memory_clock_switch)
2702 {
2703         struct amdgpu_device *adev = smu->adev;
2704         int ret = -EINVAL;
2705
2706         if (!adev->pm.dpm_enabled)
2707                 return -EINVAL;
2708
2709         mutex_lock(&smu->mutex);
2710
2711         if (smu->ppt_funcs->display_disable_memory_clock_switch)
2712                 ret = smu->ppt_funcs->display_disable_memory_clock_switch(smu, disable_memory_clock_switch);
2713
2714         mutex_unlock(&smu->mutex);
2715
2716         return ret;
2717 }
2718
2719 int smu_notify_smu_enable_pwe(struct smu_context *smu)
2720 {
2721         struct amdgpu_device *adev = smu->adev;
2722         int ret = 0;
2723
2724         if (!adev->pm.dpm_enabled)
2725                 return -EINVAL;
2726
2727         mutex_lock(&smu->mutex);
2728
2729         if (smu->ppt_funcs->notify_smu_enable_pwe)
2730                 ret = smu->ppt_funcs->notify_smu_enable_pwe(smu);
2731
2732         mutex_unlock(&smu->mutex);
2733
2734         return ret;
2735 }
2736
2737 int smu_set_xgmi_pstate(struct smu_context *smu,
2738                         uint32_t pstate)
2739 {
2740         struct amdgpu_device *adev = smu->adev;
2741         int ret = 0;
2742
2743         if (!adev->pm.dpm_enabled)
2744                 return -EINVAL;
2745
2746         mutex_lock(&smu->mutex);
2747
2748         if (smu->ppt_funcs->set_xgmi_pstate)
2749                 ret = smu->ppt_funcs->set_xgmi_pstate(smu, pstate);
2750
2751         mutex_unlock(&smu->mutex);
2752
2753         return ret;
2754 }
2755
2756 int smu_set_azalia_d3_pme(struct smu_context *smu)
2757 {
2758         struct amdgpu_device *adev = smu->adev;
2759         int ret = 0;
2760
2761         if (!adev->pm.dpm_enabled)
2762                 return -EINVAL;
2763
2764         mutex_lock(&smu->mutex);
2765
2766         if (smu->ppt_funcs->set_azalia_d3_pme)
2767                 ret = smu->ppt_funcs->set_azalia_d3_pme(smu);
2768
2769         mutex_unlock(&smu->mutex);
2770
2771         return ret;
2772 }
2773
2774 /*
2775  * On system suspending or resetting, the dpm_enabled
2776  * flag will be cleared. So that those SMU services which
2777  * are not supported will be gated.
2778  *
2779  * However, the baco/mode1 reset should still be granted
2780  * as they are still supported and necessary.
2781  */
2782 bool smu_baco_is_support(struct smu_context *smu)
2783 {
2784         bool ret = false;
2785
2786         mutex_lock(&smu->mutex);
2787
2788         if (smu->ppt_funcs && smu->ppt_funcs->baco_is_support)
2789                 ret = smu->ppt_funcs->baco_is_support(smu);
2790
2791         mutex_unlock(&smu->mutex);
2792
2793         return ret;
2794 }
2795
2796 int smu_baco_get_state(struct smu_context *smu, enum smu_baco_state *state)
2797 {
2798         if (smu->ppt_funcs->baco_get_state)
2799                 return -EINVAL;
2800
2801         mutex_lock(&smu->mutex);
2802         *state = smu->ppt_funcs->baco_get_state(smu);
2803         mutex_unlock(&smu->mutex);
2804
2805         return 0;
2806 }
2807
2808 int smu_baco_enter(struct smu_context *smu)
2809 {
2810         int ret = 0;
2811
2812         mutex_lock(&smu->mutex);
2813
2814         if (smu->ppt_funcs->baco_enter)
2815                 ret = smu->ppt_funcs->baco_enter(smu);
2816
2817         mutex_unlock(&smu->mutex);
2818
2819         return ret;
2820 }
2821
2822 int smu_baco_exit(struct smu_context *smu)
2823 {
2824         int ret = 0;
2825
2826         mutex_lock(&smu->mutex);
2827
2828         if (smu->ppt_funcs->baco_exit)
2829                 ret = smu->ppt_funcs->baco_exit(smu);
2830
2831         mutex_unlock(&smu->mutex);
2832
2833         return ret;
2834 }
2835
2836 int smu_mode2_reset(struct smu_context *smu)
2837 {
2838         int ret = 0;
2839
2840         mutex_lock(&smu->mutex);
2841
2842         if (smu->ppt_funcs->mode2_reset)
2843                 ret = smu->ppt_funcs->mode2_reset(smu);
2844
2845         mutex_unlock(&smu->mutex);
2846
2847         return ret;
2848 }
2849
2850 int smu_get_max_sustainable_clocks_by_dc(struct smu_context *smu,
2851                                          struct pp_smu_nv_clock_table *max_clocks)
2852 {
2853         struct amdgpu_device *adev = smu->adev;
2854         int ret = 0;
2855
2856         if (!adev->pm.dpm_enabled)
2857                 return -EINVAL;
2858
2859         mutex_lock(&smu->mutex);
2860
2861         if (smu->ppt_funcs->get_max_sustainable_clocks_by_dc)
2862                 ret = smu->ppt_funcs->get_max_sustainable_clocks_by_dc(smu, max_clocks);
2863
2864         mutex_unlock(&smu->mutex);
2865
2866         return ret;
2867 }
2868
2869 int smu_get_uclk_dpm_states(struct smu_context *smu,
2870                             unsigned int *clock_values_in_khz,
2871                             unsigned int *num_states)
2872 {
2873         struct amdgpu_device *adev = smu->adev;
2874         int ret = 0;
2875
2876         if (!adev->pm.dpm_enabled)
2877                 return -EINVAL;
2878
2879         mutex_lock(&smu->mutex);
2880
2881         if (smu->ppt_funcs->get_uclk_dpm_states)
2882                 ret = smu->ppt_funcs->get_uclk_dpm_states(smu, clock_values_in_khz, num_states);
2883
2884         mutex_unlock(&smu->mutex);
2885
2886         return ret;
2887 }
2888
2889 enum amd_pm_state_type smu_get_current_power_state(struct smu_context *smu)
2890 {
2891         enum amd_pm_state_type pm_state = POWER_STATE_TYPE_DEFAULT;
2892         struct amdgpu_device *adev = smu->adev;
2893
2894         if (!adev->pm.dpm_enabled)
2895                 return -EINVAL;
2896
2897         mutex_lock(&smu->mutex);
2898
2899         if (smu->ppt_funcs->get_current_power_state)
2900                 pm_state = smu->ppt_funcs->get_current_power_state(smu);
2901
2902         mutex_unlock(&smu->mutex);
2903
2904         return pm_state;
2905 }
2906
2907 int smu_get_dpm_clock_table(struct smu_context *smu,
2908                             struct dpm_clocks *clock_table)
2909 {
2910         struct amdgpu_device *adev = smu->adev;
2911         int ret = 0;
2912
2913         if (!adev->pm.dpm_enabled)
2914                 return -EINVAL;
2915
2916         mutex_lock(&smu->mutex);
2917
2918         if (smu->ppt_funcs->get_dpm_clock_table)
2919                 ret = smu->ppt_funcs->get_dpm_clock_table(smu, clock_table);
2920
2921         mutex_unlock(&smu->mutex);
2922
2923         return ret;
2924 }
2925
2926 uint32_t smu_get_pptable_power_limit(struct smu_context *smu)
2927 {
2928         uint32_t ret = 0;
2929
2930         if (smu->ppt_funcs->get_pptable_power_limit)
2931                 ret = smu->ppt_funcs->get_pptable_power_limit(smu);
2932
2933         return ret;
2934 }