drm/amd/powerplay: use work queue to perform throttling logging
[linux-2.6-block.git] / drivers / gpu / drm / amd / powerplay / amdgpu_smu.c
1 /*
2  * Copyright 2019 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  */
22
23 #include <linux/firmware.h>
24 #include <linux/pci.h>
25
26 #include "amdgpu.h"
27 #include "amdgpu_smu.h"
28 #include "smu_internal.h"
29 #include "smu_v11_0.h"
30 #include "smu_v12_0.h"
31 #include "atom.h"
32 #include "arcturus_ppt.h"
33 #include "navi10_ppt.h"
34 #include "sienna_cichlid_ppt.h"
35 #include "renoir_ppt.h"
36
37 #undef __SMU_DUMMY_MAP
38 #define __SMU_DUMMY_MAP(type)   #type
39 static const char* __smu_message_names[] = {
40         SMU_MESSAGE_TYPES
41 };
42
43 const char *smu_get_message_name(struct smu_context *smu, enum smu_message_type type)
44 {
45         if (type < 0 || type >= SMU_MSG_MAX_COUNT)
46                 return "unknown smu message";
47         return __smu_message_names[type];
48 }
49
50 #undef __SMU_DUMMY_MAP
51 #define __SMU_DUMMY_MAP(fea)    #fea
52 static const char* __smu_feature_names[] = {
53         SMU_FEATURE_MASKS
54 };
55
56 const char *smu_get_feature_name(struct smu_context *smu, enum smu_feature_mask feature)
57 {
58         if (feature < 0 || feature >= SMU_FEATURE_COUNT)
59                 return "unknown smu feature";
60         return __smu_feature_names[feature];
61 }
62
63 size_t smu_sys_get_pp_feature_mask(struct smu_context *smu, char *buf)
64 {
65         size_t size = 0;
66         int ret = 0, i = 0;
67         uint32_t feature_mask[2] = { 0 };
68         int32_t feature_index = 0;
69         uint32_t count = 0;
70         uint32_t sort_feature[SMU_FEATURE_COUNT];
71         uint64_t hw_feature_count = 0;
72
73         if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
74                 return -EOPNOTSUPP;
75
76         mutex_lock(&smu->mutex);
77
78         ret = smu_feature_get_enabled_mask(smu, feature_mask, 2);
79         if (ret)
80                 goto failed;
81
82         size =  sprintf(buf + size, "features high: 0x%08x low: 0x%08x\n",
83                         feature_mask[1], feature_mask[0]);
84
85         for (i = 0; i < SMU_FEATURE_COUNT; i++) {
86                 feature_index = smu_feature_get_index(smu, i);
87                 if (feature_index < 0)
88                         continue;
89                 sort_feature[feature_index] = i;
90                 hw_feature_count++;
91         }
92
93         for (i = 0; i < hw_feature_count; i++) {
94                 size += sprintf(buf + size, "%02d. %-20s (%2d) : %s\n",
95                                count++,
96                                smu_get_feature_name(smu, sort_feature[i]),
97                                i,
98                                !!smu_feature_is_enabled(smu, sort_feature[i]) ?
99                                "enabled" : "disabled");
100         }
101
102 failed:
103         mutex_unlock(&smu->mutex);
104
105         return size;
106 }
107
108 static int smu_feature_update_enable_state(struct smu_context *smu,
109                                            uint64_t feature_mask,
110                                            bool enabled)
111 {
112         struct smu_feature *feature = &smu->smu_feature;
113         int ret = 0;
114
115         if (enabled) {
116                 ret = smu_send_smc_msg_with_param(smu,
117                                                   SMU_MSG_EnableSmuFeaturesLow,
118                                                   lower_32_bits(feature_mask),
119                                                   NULL);
120                 if (ret)
121                         return ret;
122                 ret = smu_send_smc_msg_with_param(smu,
123                                                   SMU_MSG_EnableSmuFeaturesHigh,
124                                                   upper_32_bits(feature_mask),
125                                                   NULL);
126                 if (ret)
127                         return ret;
128         } else {
129                 ret = smu_send_smc_msg_with_param(smu,
130                                                   SMU_MSG_DisableSmuFeaturesLow,
131                                                   lower_32_bits(feature_mask),
132                                                   NULL);
133                 if (ret)
134                         return ret;
135                 ret = smu_send_smc_msg_with_param(smu,
136                                                   SMU_MSG_DisableSmuFeaturesHigh,
137                                                   upper_32_bits(feature_mask),
138                                                   NULL);
139                 if (ret)
140                         return ret;
141         }
142
143         mutex_lock(&feature->mutex);
144         if (enabled)
145                 bitmap_or(feature->enabled, feature->enabled,
146                                 (unsigned long *)(&feature_mask), SMU_FEATURE_MAX);
147         else
148                 bitmap_andnot(feature->enabled, feature->enabled,
149                                 (unsigned long *)(&feature_mask), SMU_FEATURE_MAX);
150         mutex_unlock(&feature->mutex);
151
152         return ret;
153 }
154
155 int smu_sys_set_pp_feature_mask(struct smu_context *smu, uint64_t new_mask)
156 {
157         int ret = 0;
158         uint32_t feature_mask[2] = { 0 };
159         uint64_t feature_2_enabled = 0;
160         uint64_t feature_2_disabled = 0;
161         uint64_t feature_enables = 0;
162
163         if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
164                 return -EOPNOTSUPP;
165
166         mutex_lock(&smu->mutex);
167
168         ret = smu_feature_get_enabled_mask(smu, feature_mask, 2);
169         if (ret)
170                 goto out;
171
172         feature_enables = ((uint64_t)feature_mask[1] << 32 | (uint64_t)feature_mask[0]);
173
174         feature_2_enabled  = ~feature_enables & new_mask;
175         feature_2_disabled = feature_enables & ~new_mask;
176
177         if (feature_2_enabled) {
178                 ret = smu_feature_update_enable_state(smu, feature_2_enabled, true);
179                 if (ret)
180                         goto out;
181         }
182         if (feature_2_disabled) {
183                 ret = smu_feature_update_enable_state(smu, feature_2_disabled, false);
184                 if (ret)
185                         goto out;
186         }
187
188 out:
189         mutex_unlock(&smu->mutex);
190
191         return ret;
192 }
193
194 int smu_get_smc_version(struct smu_context *smu, uint32_t *if_version, uint32_t *smu_version)
195 {
196         int ret = 0;
197
198         if (!if_version && !smu_version)
199                 return -EINVAL;
200
201         if (smu->smc_fw_if_version && smu->smc_fw_version)
202         {
203                 if (if_version)
204                         *if_version = smu->smc_fw_if_version;
205
206                 if (smu_version)
207                         *smu_version = smu->smc_fw_version;
208
209                 return 0;
210         }
211
212         if (if_version) {
213                 ret = smu_send_smc_msg(smu, SMU_MSG_GetDriverIfVersion, if_version);
214                 if (ret)
215                         return ret;
216
217                 smu->smc_fw_if_version = *if_version;
218         }
219
220         if (smu_version) {
221                 ret = smu_send_smc_msg(smu, SMU_MSG_GetSmuVersion, smu_version);
222                 if (ret)
223                         return ret;
224
225                 smu->smc_fw_version = *smu_version;
226         }
227
228         return ret;
229 }
230
231 int smu_set_soft_freq_range(struct smu_context *smu, enum smu_clk_type clk_type,
232                             uint32_t min, uint32_t max, bool lock_needed)
233 {
234         int ret = 0;
235
236         if (!smu_clk_dpm_is_enabled(smu, clk_type))
237                 return 0;
238
239         if (lock_needed)
240                 mutex_lock(&smu->mutex);
241         ret = smu_set_soft_freq_limited_range(smu, clk_type, min, max);
242         if (lock_needed)
243                 mutex_unlock(&smu->mutex);
244
245         return ret;
246 }
247
248 int smu_set_hard_freq_range(struct smu_context *smu, enum smu_clk_type clk_type,
249                             uint32_t min, uint32_t max)
250 {
251         int ret = 0, clk_id = 0;
252         uint32_t param;
253
254         if (min <= 0 && max <= 0)
255                 return -EINVAL;
256
257         if (!smu_clk_dpm_is_enabled(smu, clk_type))
258                 return 0;
259
260         clk_id = smu_clk_get_index(smu, clk_type);
261         if (clk_id < 0)
262                 return clk_id;
263
264         if (max > 0) {
265                 param = (uint32_t)((clk_id << 16) | (max & 0xffff));
266                 ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetHardMaxByFreq,
267                                                   param, NULL);
268                 if (ret)
269                         return ret;
270         }
271
272         if (min > 0) {
273                 param = (uint32_t)((clk_id << 16) | (min & 0xffff));
274                 ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinByFreq,
275                                                   param, NULL);
276                 if (ret)
277                         return ret;
278         }
279
280
281         return ret;
282 }
283
284 int smu_get_dpm_freq_range(struct smu_context *smu, enum smu_clk_type clk_type,
285                            uint32_t *min, uint32_t *max, bool lock_needed)
286 {
287         uint32_t clock_limit;
288         int ret = 0;
289
290         if (!min && !max)
291                 return -EINVAL;
292
293         if (lock_needed)
294                 mutex_lock(&smu->mutex);
295
296         if (!smu_clk_dpm_is_enabled(smu, clk_type)) {
297                 switch (clk_type) {
298                 case SMU_MCLK:
299                 case SMU_UCLK:
300                         clock_limit = smu->smu_table.boot_values.uclk;
301                         break;
302                 case SMU_GFXCLK:
303                 case SMU_SCLK:
304                         clock_limit = smu->smu_table.boot_values.gfxclk;
305                         break;
306                 case SMU_SOCCLK:
307                         clock_limit = smu->smu_table.boot_values.socclk;
308                         break;
309                 default:
310                         clock_limit = 0;
311                         break;
312                 }
313
314                 /* clock in Mhz unit */
315                 if (min)
316                         *min = clock_limit / 100;
317                 if (max)
318                         *max = clock_limit / 100;
319         } else {
320                 /*
321                  * Todo: Use each asic(ASIC_ppt funcs) control the callbacks exposed to the
322                  * core driver and then have helpers for stuff that is common(SMU_v11_x | SMU_v12_x funcs).
323                  */
324                 ret = smu_get_dpm_ultimate_freq(smu, clk_type, min, max);
325         }
326
327         if (lock_needed)
328                 mutex_unlock(&smu->mutex);
329
330         return ret;
331 }
332
333 int smu_get_dpm_freq_by_index(struct smu_context *smu, enum smu_clk_type clk_type,
334                               uint16_t level, uint32_t *value)
335 {
336         int ret = 0, clk_id = 0;
337         uint32_t param;
338
339         if (!value)
340                 return -EINVAL;
341
342         if (!smu_clk_dpm_is_enabled(smu, clk_type))
343                 return 0;
344
345         clk_id = smu_clk_get_index(smu, clk_type);
346         if (clk_id < 0)
347                 return clk_id;
348
349         param = (uint32_t)(((clk_id & 0xffff) << 16) | (level & 0xffff));
350
351         ret = smu_send_smc_msg_with_param(smu, SMU_MSG_GetDpmFreqByIndex,
352                                           param, value);
353         if (ret)
354                 return ret;
355
356         /* BIT31:  0 - Fine grained DPM, 1 - Dicrete DPM
357          * now, we un-support it */
358         *value = *value & 0x7fffffff;
359
360         return ret;
361 }
362
363 int smu_get_dpm_level_count(struct smu_context *smu, enum smu_clk_type clk_type,
364                             uint32_t *value)
365 {
366         return smu_get_dpm_freq_by_index(smu, clk_type, 0xff, value);
367 }
368
369 int smu_get_dpm_level_range(struct smu_context *smu, enum smu_clk_type clk_type,
370                             uint32_t *min_value, uint32_t *max_value)
371 {
372         int ret = 0;
373         uint32_t level_count = 0;
374
375         if (!min_value && !max_value)
376                 return -EINVAL;
377
378         if (min_value) {
379                 /* by default, level 0 clock value as min value */
380                 ret = smu_get_dpm_freq_by_index(smu, clk_type, 0, min_value);
381                 if (ret)
382                         return ret;
383         }
384
385         if (max_value) {
386                 ret = smu_get_dpm_level_count(smu, clk_type, &level_count);
387                 if (ret)
388                         return ret;
389
390                 ret = smu_get_dpm_freq_by_index(smu, clk_type, level_count - 1, max_value);
391                 if (ret)
392                         return ret;
393         }
394
395         return ret;
396 }
397
398 bool smu_clk_dpm_is_enabled(struct smu_context *smu, enum smu_clk_type clk_type)
399 {
400         enum smu_feature_mask feature_id = 0;
401
402         switch (clk_type) {
403         case SMU_MCLK:
404         case SMU_UCLK:
405                 feature_id = SMU_FEATURE_DPM_UCLK_BIT;
406                 break;
407         case SMU_GFXCLK:
408         case SMU_SCLK:
409                 feature_id = SMU_FEATURE_DPM_GFXCLK_BIT;
410                 break;
411         case SMU_SOCCLK:
412                 feature_id = SMU_FEATURE_DPM_SOCCLK_BIT;
413                 break;
414         default:
415                 return true;
416         }
417
418         if(!smu_feature_is_enabled(smu, feature_id)) {
419                 return false;
420         }
421
422         return true;
423 }
424
425 /**
426  * smu_dpm_set_power_gate - power gate/ungate the specific IP block
427  *
428  * @smu:        smu_context pointer
429  * @block_type: the IP block to power gate/ungate
430  * @gate:       to power gate if true, ungate otherwise
431  *
432  * This API uses no smu->mutex lock protection due to:
433  * 1. It is either called by other IP block(gfx/sdma/vcn/uvd/vce).
434  *    This is guarded to be race condition free by the caller.
435  * 2. Or get called on user setting request of power_dpm_force_performance_level.
436  *    Under this case, the smu->mutex lock protection is already enforced on
437  *    the parent API smu_force_performance_level of the call path.
438  */
439 int smu_dpm_set_power_gate(struct smu_context *smu, uint32_t block_type,
440                            bool gate)
441 {
442         int ret = 0;
443
444         if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
445                 return -EOPNOTSUPP;
446
447         switch (block_type) {
448         case AMD_IP_BLOCK_TYPE_UVD:
449                 ret = smu_dpm_set_uvd_enable(smu, !gate);
450                 break;
451         case AMD_IP_BLOCK_TYPE_VCE:
452                 ret = smu_dpm_set_vce_enable(smu, !gate);
453                 break;
454         case AMD_IP_BLOCK_TYPE_GFX:
455                 ret = smu_gfx_off_control(smu, gate);
456                 break;
457         case AMD_IP_BLOCK_TYPE_SDMA:
458                 ret = smu_powergate_sdma(smu, gate);
459                 break;
460         case AMD_IP_BLOCK_TYPE_JPEG:
461                 ret = smu_dpm_set_jpeg_enable(smu, !gate);
462                 break;
463         default:
464                 break;
465         }
466
467         return ret;
468 }
469
470 int smu_get_power_num_states(struct smu_context *smu,
471                              struct pp_states_info *state_info)
472 {
473         if (!state_info)
474                 return -EINVAL;
475
476         /* not support power state */
477         memset(state_info, 0, sizeof(struct pp_states_info));
478         state_info->nums = 1;
479         state_info->states[0] = POWER_STATE_TYPE_DEFAULT;
480
481         return 0;
482 }
483
484 int smu_common_read_sensor(struct smu_context *smu, enum amd_pp_sensors sensor,
485                            void *data, uint32_t *size)
486 {
487         struct smu_power_context *smu_power = &smu->smu_power;
488         struct smu_power_gate *power_gate = &smu_power->power_gate;
489         int ret = 0;
490
491         if(!data || !size)
492                 return -EINVAL;
493
494         switch (sensor) {
495         case AMDGPU_PP_SENSOR_STABLE_PSTATE_SCLK:
496                 *((uint32_t *)data) = smu->pstate_sclk;
497                 *size = 4;
498                 break;
499         case AMDGPU_PP_SENSOR_STABLE_PSTATE_MCLK:
500                 *((uint32_t *)data) = smu->pstate_mclk;
501                 *size = 4;
502                 break;
503         case AMDGPU_PP_SENSOR_ENABLED_SMC_FEATURES_MASK:
504                 ret = smu_feature_get_enabled_mask(smu, (uint32_t *)data, 2);
505                 *size = 8;
506                 break;
507         case AMDGPU_PP_SENSOR_UVD_POWER:
508                 *(uint32_t *)data = smu_feature_is_enabled(smu, SMU_FEATURE_DPM_UVD_BIT) ? 1 : 0;
509                 *size = 4;
510                 break;
511         case AMDGPU_PP_SENSOR_VCE_POWER:
512                 *(uint32_t *)data = smu_feature_is_enabled(smu, SMU_FEATURE_DPM_VCE_BIT) ? 1 : 0;
513                 *size = 4;
514                 break;
515         case AMDGPU_PP_SENSOR_VCN_POWER_STATE:
516                 *(uint32_t *)data = power_gate->vcn_gated ? 0 : 1;
517                 *size = 4;
518                 break;
519         default:
520                 ret = -EINVAL;
521                 break;
522         }
523
524         if (ret)
525                 *size = 0;
526
527         return ret;
528 }
529
530 int smu_update_table(struct smu_context *smu, enum smu_table_id table_index, int argument,
531                      void *table_data, bool drv2smu)
532 {
533         struct smu_table_context *smu_table = &smu->smu_table;
534         struct amdgpu_device *adev = smu->adev;
535         struct smu_table *table = &smu_table->driver_table;
536         int table_id = smu_table_get_index(smu, table_index);
537         uint32_t table_size;
538         int ret = 0;
539         if (!table_data || table_id >= SMU_TABLE_COUNT || table_id < 0)
540                 return -EINVAL;
541
542         table_size = smu_table->tables[table_index].size;
543
544         if (drv2smu) {
545                 memcpy(table->cpu_addr, table_data, table_size);
546                 /*
547                  * Flush hdp cache: to guard the content seen by
548                  * GPU is consitent with CPU.
549                  */
550                 amdgpu_asic_flush_hdp(adev, NULL);
551         }
552
553         ret = smu_send_smc_msg_with_param(smu, drv2smu ?
554                                           SMU_MSG_TransferTableDram2Smu :
555                                           SMU_MSG_TransferTableSmu2Dram,
556                                           table_id | ((argument & 0xFFFF) << 16),
557                                           NULL);
558         if (ret)
559                 return ret;
560
561         if (!drv2smu) {
562                 amdgpu_asic_flush_hdp(adev, NULL);
563                 memcpy(table_data, table->cpu_addr, table_size);
564         }
565
566         return ret;
567 }
568
569 bool is_support_sw_smu(struct amdgpu_device *adev)
570 {
571         if (adev->asic_type >= CHIP_ARCTURUS)
572                 return true;
573
574         return false;
575 }
576
577 int smu_sys_get_pp_table(struct smu_context *smu, void **table)
578 {
579         struct smu_table_context *smu_table = &smu->smu_table;
580         uint32_t powerplay_table_size;
581
582         if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
583                 return -EOPNOTSUPP;
584
585         if (!smu_table->power_play_table && !smu_table->hardcode_pptable)
586                 return -EINVAL;
587
588         mutex_lock(&smu->mutex);
589
590         if (smu_table->hardcode_pptable)
591                 *table = smu_table->hardcode_pptable;
592         else
593                 *table = smu_table->power_play_table;
594
595         powerplay_table_size = smu_table->power_play_table_size;
596
597         mutex_unlock(&smu->mutex);
598
599         return powerplay_table_size;
600 }
601
602 int smu_sys_set_pp_table(struct smu_context *smu,  void *buf, size_t size)
603 {
604         struct smu_table_context *smu_table = &smu->smu_table;
605         ATOM_COMMON_TABLE_HEADER *header = (ATOM_COMMON_TABLE_HEADER *)buf;
606         int ret = 0;
607
608         if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
609                 return -EOPNOTSUPP;
610
611         if (header->usStructureSize != size) {
612                 pr_err("pp table size not matched !\n");
613                 return -EIO;
614         }
615
616         mutex_lock(&smu->mutex);
617         if (!smu_table->hardcode_pptable)
618                 smu_table->hardcode_pptable = kzalloc(size, GFP_KERNEL);
619         if (!smu_table->hardcode_pptable) {
620                 ret = -ENOMEM;
621                 goto failed;
622         }
623
624         memcpy(smu_table->hardcode_pptable, buf, size);
625         smu_table->power_play_table = smu_table->hardcode_pptable;
626         smu_table->power_play_table_size = size;
627
628         /*
629          * Special hw_fini action(for Navi1x, the DPMs disablement will be
630          * skipped) may be needed for custom pptable uploading.
631          */
632         smu->uploading_custom_pp_table = true;
633
634         ret = smu_reset(smu);
635         if (ret)
636                 pr_info("smu reset failed, ret = %d\n", ret);
637
638         smu->uploading_custom_pp_table = false;
639
640 failed:
641         mutex_unlock(&smu->mutex);
642         return ret;
643 }
644
645 static int smu_get_driver_allowed_feature_mask(struct smu_context *smu)
646 {
647         struct smu_feature *feature = &smu->smu_feature;
648         int ret = 0;
649         uint32_t allowed_feature_mask[SMU_FEATURE_MAX/32];
650
651         mutex_lock(&feature->mutex);
652         bitmap_zero(feature->allowed, SMU_FEATURE_MAX);
653         mutex_unlock(&feature->mutex);
654
655         ret = smu_get_allowed_feature_mask(smu, allowed_feature_mask,
656                                              SMU_FEATURE_MAX/32);
657         if (ret)
658                 return ret;
659
660         mutex_lock(&feature->mutex);
661         bitmap_or(feature->allowed, feature->allowed,
662                       (unsigned long *)allowed_feature_mask,
663                       feature->feature_num);
664         mutex_unlock(&feature->mutex);
665
666         return ret;
667 }
668
669 int smu_feature_is_enabled(struct smu_context *smu, enum smu_feature_mask mask)
670 {
671         struct smu_feature *feature = &smu->smu_feature;
672         int feature_id;
673         int ret = 0;
674
675         if (smu->is_apu)
676                 return 1;
677         feature_id = smu_feature_get_index(smu, mask);
678         if (feature_id < 0)
679                 return 0;
680
681         WARN_ON(feature_id > feature->feature_num);
682
683         mutex_lock(&feature->mutex);
684         ret = test_bit(feature_id, feature->enabled);
685         mutex_unlock(&feature->mutex);
686
687         return ret;
688 }
689
690 int smu_feature_set_enabled(struct smu_context *smu, enum smu_feature_mask mask,
691                             bool enable)
692 {
693         struct smu_feature *feature = &smu->smu_feature;
694         int feature_id;
695
696         feature_id = smu_feature_get_index(smu, mask);
697         if (feature_id < 0)
698                 return -EINVAL;
699
700         WARN_ON(feature_id > feature->feature_num);
701
702         return smu_feature_update_enable_state(smu,
703                                                1ULL << feature_id,
704                                                enable);
705 }
706
707 int smu_feature_is_supported(struct smu_context *smu, enum smu_feature_mask mask)
708 {
709         struct smu_feature *feature = &smu->smu_feature;
710         int feature_id;
711         int ret = 0;
712
713         feature_id = smu_feature_get_index(smu, mask);
714         if (feature_id < 0)
715                 return 0;
716
717         WARN_ON(feature_id > feature->feature_num);
718
719         mutex_lock(&feature->mutex);
720         ret = test_bit(feature_id, feature->supported);
721         mutex_unlock(&feature->mutex);
722
723         return ret;
724 }
725
726 static int smu_set_funcs(struct amdgpu_device *adev)
727 {
728         struct smu_context *smu = &adev->smu;
729
730         if (adev->pm.pp_feature & PP_OVERDRIVE_MASK)
731                 smu->od_enabled = true;
732
733         switch (adev->asic_type) {
734         case CHIP_NAVI10:
735         case CHIP_NAVI14:
736         case CHIP_NAVI12:
737                 navi10_set_ppt_funcs(smu);
738                 break;
739         case CHIP_ARCTURUS:
740                 adev->pm.pp_feature &= ~PP_GFXOFF_MASK;
741                 arcturus_set_ppt_funcs(smu);
742                 /* OD is not supported on Arcturus */
743                 smu->od_enabled =false;
744                 break;
745         case CHIP_SIENNA_CICHLID:
746                 sienna_cichlid_set_ppt_funcs(smu);
747                 break;
748         case CHIP_RENOIR:
749                 renoir_set_ppt_funcs(smu);
750                 break;
751         default:
752                 return -EINVAL;
753         }
754
755         return 0;
756 }
757
758 static int smu_early_init(void *handle)
759 {
760         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
761         struct smu_context *smu = &adev->smu;
762
763         smu->adev = adev;
764         smu->pm_enabled = !!amdgpu_dpm;
765         smu->is_apu = false;
766         mutex_init(&smu->mutex);
767
768         return smu_set_funcs(adev);
769 }
770
771 static int smu_late_init(void *handle)
772 {
773         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
774         struct smu_context *smu = &adev->smu;
775         int ret = 0;
776
777         if (!smu->pm_enabled)
778                 return 0;
779
780         ret = smu_set_default_od_settings(smu);
781         if (ret)
782                 return ret;
783
784         /*
785          * Set initialized values (get from vbios) to dpm tables context such as
786          * gfxclk, memclk, dcefclk, and etc. And enable the DPM feature for each
787          * type of clks.
788          */
789         ret = smu_populate_smc_tables(smu);
790         if (ret)
791                 return ret;
792
793         ret = smu_init_max_sustainable_clocks(smu);
794         if (ret)
795                 return ret;
796
797         ret = smu_populate_umd_state_clk(smu);
798         if (ret)
799                 return ret;
800
801         ret = smu_get_power_limit(smu, &smu->default_power_limit, false, false);
802         if (ret)
803                 return ret;
804
805         smu_get_unique_id(smu);
806
807         smu_handle_task(&adev->smu,
808                         smu->smu_dpm.dpm_level,
809                         AMD_PP_TASK_COMPLETE_INIT,
810                         false);
811
812         return 0;
813 }
814
815 int smu_get_atom_data_table(struct smu_context *smu, uint32_t table,
816                             uint16_t *size, uint8_t *frev, uint8_t *crev,
817                             uint8_t **addr)
818 {
819         struct amdgpu_device *adev = smu->adev;
820         uint16_t data_start;
821
822         if (!amdgpu_atom_parse_data_header(adev->mode_info.atom_context, table,
823                                            size, frev, crev, &data_start))
824                 return -EINVAL;
825
826         *addr = (uint8_t *)adev->mode_info.atom_context->bios + data_start;
827
828         return 0;
829 }
830
831 static int smu_init_fb_allocations(struct smu_context *smu)
832 {
833         struct amdgpu_device *adev = smu->adev;
834         struct smu_table_context *smu_table = &smu->smu_table;
835         struct smu_table *tables = smu_table->tables;
836         struct smu_table *driver_table = &(smu_table->driver_table);
837         uint32_t max_table_size = 0;
838         int ret, i;
839
840         /* VRAM allocation for tool table */
841         if (tables[SMU_TABLE_PMSTATUSLOG].size) {
842                 ret = amdgpu_bo_create_kernel(adev,
843                                               tables[SMU_TABLE_PMSTATUSLOG].size,
844                                               tables[SMU_TABLE_PMSTATUSLOG].align,
845                                               tables[SMU_TABLE_PMSTATUSLOG].domain,
846                                               &tables[SMU_TABLE_PMSTATUSLOG].bo,
847                                               &tables[SMU_TABLE_PMSTATUSLOG].mc_address,
848                                               &tables[SMU_TABLE_PMSTATUSLOG].cpu_addr);
849                 if (ret) {
850                         pr_err("VRAM allocation for tool table failed!\n");
851                         return ret;
852                 }
853         }
854
855         /* VRAM allocation for driver table */
856         for (i = 0; i < SMU_TABLE_COUNT; i++) {
857                 if (tables[i].size == 0)
858                         continue;
859
860                 if (i == SMU_TABLE_PMSTATUSLOG)
861                         continue;
862
863                 if (max_table_size < tables[i].size)
864                         max_table_size = tables[i].size;
865         }
866
867         driver_table->size = max_table_size;
868         driver_table->align = PAGE_SIZE;
869         driver_table->domain = AMDGPU_GEM_DOMAIN_VRAM;
870
871         ret = amdgpu_bo_create_kernel(adev,
872                                       driver_table->size,
873                                       driver_table->align,
874                                       driver_table->domain,
875                                       &driver_table->bo,
876                                       &driver_table->mc_address,
877                                       &driver_table->cpu_addr);
878         if (ret) {
879                 pr_err("VRAM allocation for driver table failed!\n");
880                 if (tables[SMU_TABLE_PMSTATUSLOG].mc_address)
881                         amdgpu_bo_free_kernel(&tables[SMU_TABLE_PMSTATUSLOG].bo,
882                                               &tables[SMU_TABLE_PMSTATUSLOG].mc_address,
883                                               &tables[SMU_TABLE_PMSTATUSLOG].cpu_addr);
884         }
885
886         return ret;
887 }
888
889 static int smu_fini_fb_allocations(struct smu_context *smu)
890 {
891         struct smu_table_context *smu_table = &smu->smu_table;
892         struct smu_table *tables = smu_table->tables;
893         struct smu_table *driver_table = &(smu_table->driver_table);
894
895         if (!tables)
896                 return 0;
897
898         if (tables[SMU_TABLE_PMSTATUSLOG].mc_address)
899                 amdgpu_bo_free_kernel(&tables[SMU_TABLE_PMSTATUSLOG].bo,
900                                       &tables[SMU_TABLE_PMSTATUSLOG].mc_address,
901                                       &tables[SMU_TABLE_PMSTATUSLOG].cpu_addr);
902
903         amdgpu_bo_free_kernel(&driver_table->bo,
904                               &driver_table->mc_address,
905                               &driver_table->cpu_addr);
906
907         return 0;
908 }
909
910 /**
911  * smu_alloc_memory_pool - allocate memory pool in the system memory
912  *
913  * @smu: amdgpu_device pointer
914  *
915  * This memory pool will be used for SMC use and msg SetSystemVirtualDramAddr
916  * and DramLogSetDramAddr can notify it changed.
917  *
918  * Returns 0 on success, error on failure.
919  */
920 static int smu_alloc_memory_pool(struct smu_context *smu)
921 {
922         struct amdgpu_device *adev = smu->adev;
923         struct smu_table_context *smu_table = &smu->smu_table;
924         struct smu_table *memory_pool = &smu_table->memory_pool;
925         uint64_t pool_size = smu->pool_size;
926         int ret = 0;
927
928         if (pool_size == SMU_MEMORY_POOL_SIZE_ZERO)
929                 return ret;
930
931         memory_pool->size = pool_size;
932         memory_pool->align = PAGE_SIZE;
933         memory_pool->domain = AMDGPU_GEM_DOMAIN_GTT;
934
935         switch (pool_size) {
936         case SMU_MEMORY_POOL_SIZE_256_MB:
937         case SMU_MEMORY_POOL_SIZE_512_MB:
938         case SMU_MEMORY_POOL_SIZE_1_GB:
939         case SMU_MEMORY_POOL_SIZE_2_GB:
940                 ret = amdgpu_bo_create_kernel(adev,
941                                               memory_pool->size,
942                                               memory_pool->align,
943                                               memory_pool->domain,
944                                               &memory_pool->bo,
945                                               &memory_pool->mc_address,
946                                               &memory_pool->cpu_addr);
947                 break;
948         default:
949                 break;
950         }
951
952         return ret;
953 }
954
955 static int smu_free_memory_pool(struct smu_context *smu)
956 {
957         struct smu_table_context *smu_table = &smu->smu_table;
958         struct smu_table *memory_pool = &smu_table->memory_pool;
959
960         if (memory_pool->size == SMU_MEMORY_POOL_SIZE_ZERO)
961                 return 0;
962
963         amdgpu_bo_free_kernel(&memory_pool->bo,
964                               &memory_pool->mc_address,
965                               &memory_pool->cpu_addr);
966
967         memset(memory_pool, 0, sizeof(struct smu_table));
968
969         return 0;
970 }
971
972 static int smu_smc_table_sw_init(struct smu_context *smu)
973 {
974         int ret;
975
976         /**
977          * Create smu_table structure, and init smc tables such as
978          * TABLE_PPTABLE, TABLE_WATERMARKS, TABLE_SMU_METRICS, and etc.
979          */
980         ret = smu_init_smc_tables(smu);
981         if (ret) {
982                 pr_err("Failed to init smc tables!\n");
983                 return ret;
984         }
985
986         /**
987          * Create smu_power_context structure, and allocate smu_dpm_context and
988          * context size to fill the smu_power_context data.
989          */
990         ret = smu_init_power(smu);
991         if (ret) {
992                 pr_err("Failed to init smu_init_power!\n");
993                 return ret;
994         }
995
996         /*
997          * allocate vram bos to store smc table contents.
998          */
999         ret = smu_init_fb_allocations(smu);
1000         if (ret)
1001                 return ret;
1002
1003         ret = smu_alloc_memory_pool(smu);
1004         if (ret)
1005                 return ret;
1006
1007         return 0;
1008 }
1009
1010 static int smu_smc_table_sw_fini(struct smu_context *smu)
1011 {
1012         int ret;
1013
1014         ret = smu_free_memory_pool(smu);
1015         if (ret)
1016                 return ret;
1017
1018         ret = smu_fini_fb_allocations(smu);
1019         if (ret)
1020                 return ret;
1021
1022         ret = smu_fini_power(smu);
1023         if (ret) {
1024                 pr_err("Failed to init smu_fini_power!\n");
1025                 return ret;
1026         }
1027
1028         ret = smu_fini_smc_tables(smu);
1029         if (ret) {
1030                 pr_err("Failed to smu_fini_smc_tables!\n");
1031                 return ret;
1032         }
1033
1034         return 0;
1035 }
1036
1037 static void smu_throttling_logging_work_fn(struct work_struct *work)
1038 {
1039         struct smu_context *smu = container_of(work, struct smu_context,
1040                                                throttling_logging_work);
1041
1042         smu_log_thermal_throttling(smu);
1043 }
1044
1045 static int smu_sw_init(void *handle)
1046 {
1047         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1048         struct smu_context *smu = &adev->smu;
1049         int ret;
1050
1051         smu->pool_size = adev->pm.smu_prv_buffer_size;
1052         smu->smu_feature.feature_num = SMU_FEATURE_MAX;
1053         mutex_init(&smu->smu_feature.mutex);
1054         bitmap_zero(smu->smu_feature.supported, SMU_FEATURE_MAX);
1055         bitmap_zero(smu->smu_feature.enabled, SMU_FEATURE_MAX);
1056         bitmap_zero(smu->smu_feature.allowed, SMU_FEATURE_MAX);
1057
1058         mutex_init(&smu->smu_baco.mutex);
1059         smu->smu_baco.state = SMU_BACO_STATE_EXIT;
1060         smu->smu_baco.platform_support = false;
1061
1062         mutex_init(&smu->sensor_lock);
1063         mutex_init(&smu->metrics_lock);
1064         mutex_init(&smu->message_lock);
1065
1066         INIT_WORK(&smu->throttling_logging_work, smu_throttling_logging_work_fn);
1067         smu->watermarks_bitmap = 0;
1068         smu->power_profile_mode = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT;
1069         smu->default_power_profile_mode = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT;
1070
1071         smu->workload_mask = 1 << smu->workload_prority[PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT];
1072         smu->workload_prority[PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT] = 0;
1073         smu->workload_prority[PP_SMC_POWER_PROFILE_FULLSCREEN3D] = 1;
1074         smu->workload_prority[PP_SMC_POWER_PROFILE_POWERSAVING] = 2;
1075         smu->workload_prority[PP_SMC_POWER_PROFILE_VIDEO] = 3;
1076         smu->workload_prority[PP_SMC_POWER_PROFILE_VR] = 4;
1077         smu->workload_prority[PP_SMC_POWER_PROFILE_COMPUTE] = 5;
1078         smu->workload_prority[PP_SMC_POWER_PROFILE_CUSTOM] = 6;
1079
1080         smu->workload_setting[0] = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT;
1081         smu->workload_setting[1] = PP_SMC_POWER_PROFILE_FULLSCREEN3D;
1082         smu->workload_setting[2] = PP_SMC_POWER_PROFILE_POWERSAVING;
1083         smu->workload_setting[3] = PP_SMC_POWER_PROFILE_VIDEO;
1084         smu->workload_setting[4] = PP_SMC_POWER_PROFILE_VR;
1085         smu->workload_setting[5] = PP_SMC_POWER_PROFILE_COMPUTE;
1086         smu->workload_setting[6] = PP_SMC_POWER_PROFILE_CUSTOM;
1087         smu->display_config = &adev->pm.pm_display_cfg;
1088
1089         smu->smu_dpm.dpm_level = AMD_DPM_FORCED_LEVEL_AUTO;
1090         smu->smu_dpm.requested_dpm_level = AMD_DPM_FORCED_LEVEL_AUTO;
1091         ret = smu_init_microcode(smu);
1092         if (ret) {
1093                 pr_err("Failed to load smu firmware!\n");
1094                 return ret;
1095         }
1096
1097         ret = smu_smc_table_sw_init(smu);
1098         if (ret) {
1099                 pr_err("Failed to sw init smc table!\n");
1100                 return ret;
1101         }
1102
1103         ret = smu_register_irq_handler(smu);
1104         if (ret) {
1105                 pr_err("Failed to register smc irq handler!\n");
1106                 return ret;
1107         }
1108
1109         return 0;
1110 }
1111
1112 static int smu_sw_fini(void *handle)
1113 {
1114         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1115         struct smu_context *smu = &adev->smu;
1116         int ret;
1117
1118         ret = smu_smc_table_sw_fini(smu);
1119         if (ret) {
1120                 pr_err("Failed to sw fini smc table!\n");
1121                 return ret;
1122         }
1123
1124         smu_fini_microcode(smu);
1125
1126         return 0;
1127 }
1128
1129 static int smu_smc_hw_setup(struct smu_context *smu)
1130 {
1131         struct amdgpu_device *adev = smu->adev;
1132         int ret;
1133
1134         if (smu_is_dpm_running(smu) && adev->in_suspend) {
1135                 pr_info("dpm has been enabled\n");
1136                 return 0;
1137         }
1138
1139         ret = smu_init_display_count(smu, 0);
1140         if (ret)
1141                 return ret;
1142
1143         ret = smu_set_driver_table_location(smu);
1144         if (ret)
1145                 return ret;
1146
1147         /*
1148          * Set PMSTATUSLOG table bo address with SetToolsDramAddr MSG for tools.
1149          */
1150         ret = smu_set_tool_table_location(smu);
1151         if (ret)
1152                 return ret;
1153
1154         /*
1155          * Use msg SetSystemVirtualDramAddr and DramLogSetDramAddr can notify
1156          * pool location.
1157          */
1158         ret = smu_notify_memory_pool_location(smu);
1159         if (ret)
1160                 return ret;
1161
1162         /* smu_dump_pptable(smu); */
1163         /*
1164          * Copy pptable bo in the vram to smc with SMU MSGs such as
1165          * SetDriverDramAddr and TransferTableDram2Smu.
1166          */
1167         ret = smu_write_pptable(smu);
1168         if (ret)
1169                 return ret;
1170
1171         /* issue Run*Btc msg */
1172         ret = smu_run_btc(smu);
1173         if (ret)
1174                 return ret;
1175
1176         ret = smu_feature_set_allowed_mask(smu);
1177         if (ret)
1178                 return ret;
1179
1180         ret = smu_system_features_control(smu, true);
1181         if (ret)
1182                 return ret;
1183
1184         if (!smu_is_dpm_running(smu))
1185                 pr_info("dpm has been disabled\n");
1186
1187         ret = smu_override_pcie_parameters(smu);
1188         if (ret)
1189                 return ret;
1190
1191         ret = smu_enable_thermal_alert(smu);
1192         if (ret)
1193                 return ret;
1194
1195         ret = smu_i2c_eeprom_init(smu, &adev->pm.smu_i2c);
1196         if (ret)
1197                 return ret;
1198
1199         ret = smu_disable_umc_cdr_12gbps_workaround(smu);
1200         if (ret) {
1201                 pr_err("Workaround failed to disable UMC CDR feature on 12Gbps SKU!\n");
1202                 return ret;
1203         }
1204
1205         /*
1206          * For Navi1X, manually switch it to AC mode as PMFW
1207          * may boot it with DC mode.
1208          */
1209         ret = smu_set_power_source(smu,
1210                                    adev->pm.ac_power ? SMU_POWER_SOURCE_AC :
1211                                    SMU_POWER_SOURCE_DC);
1212         if (ret) {
1213                 pr_err("Failed to switch to %s mode!\n", adev->pm.ac_power ? "AC" : "DC");
1214                 return ret;
1215         }
1216
1217         ret = smu_notify_display_change(smu);
1218         if (ret)
1219                 return ret;
1220
1221         /*
1222          * Set min deep sleep dce fclk with bootup value from vbios via
1223          * SetMinDeepSleepDcefclk MSG.
1224          */
1225         ret = smu_set_min_dcef_deep_sleep(smu);
1226         if (ret)
1227                 return ret;
1228
1229         return ret;
1230 }
1231
1232 static int smu_start_smc_engine(struct smu_context *smu)
1233 {
1234         struct amdgpu_device *adev = smu->adev;
1235         int ret = 0;
1236
1237         if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
1238                 if (adev->asic_type < CHIP_NAVI10) {
1239                         if (smu->ppt_funcs->load_microcode) {
1240                                 ret = smu->ppt_funcs->load_microcode(smu);
1241                                 if (ret)
1242                                         return ret;
1243                         }
1244                 }
1245         }
1246
1247         if (smu->ppt_funcs->check_fw_status) {
1248                 ret = smu->ppt_funcs->check_fw_status(smu);
1249                 if (ret) {
1250                         pr_err("SMC is not ready\n");
1251                         return ret;
1252                 }
1253         }
1254
1255         /*
1256          * Send msg GetDriverIfVersion to check if the return value is equal
1257          * with DRIVER_IF_VERSION of smc header.
1258          */
1259         ret = smu_check_fw_version(smu);
1260         if (ret)
1261                 return ret;
1262
1263         return ret;
1264 }
1265
1266 static int smu_hw_init(void *handle)
1267 {
1268         int ret;
1269         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1270         struct smu_context *smu = &adev->smu;
1271
1272         if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev))
1273                 return 0;
1274
1275         ret = smu_start_smc_engine(smu);
1276         if (ret) {
1277                 pr_err("SMU is not ready yet!\n");
1278                 return ret;
1279         }
1280
1281         if (smu->is_apu) {
1282                 smu_powergate_sdma(&adev->smu, false);
1283                 smu_powergate_vcn(&adev->smu, false);
1284                 smu_powergate_jpeg(&adev->smu, false);
1285                 smu_set_gfx_cgpg(&adev->smu, true);
1286         }
1287
1288         if (!smu->pm_enabled)
1289                 return 0;
1290
1291         /* get boot_values from vbios to set revision, gfxclk, and etc. */
1292         ret = smu_get_vbios_bootup_values(smu);
1293         if (ret)
1294                 return ret;
1295
1296         ret = smu_setup_pptable(smu);
1297         if (ret)
1298                 return ret;
1299
1300         ret = smu_get_driver_allowed_feature_mask(smu);
1301         if (ret)
1302                 goto failed;
1303
1304         ret = smu_smc_hw_setup(smu);
1305         if (ret)
1306                 goto failed;
1307
1308         adev->pm.dpm_enabled = true;
1309
1310         pr_info("SMU is initialized successfully!\n");
1311
1312         return 0;
1313
1314 failed:
1315         return ret;
1316 }
1317
1318 static int smu_disable_dpms(struct smu_context *smu)
1319 {
1320         struct amdgpu_device *adev = smu->adev;
1321         uint64_t features_to_disable;
1322         int ret = 0;
1323         bool use_baco = !smu->is_apu &&
1324                 ((adev->in_gpu_reset &&
1325                   (amdgpu_asic_reset_method(adev) == AMD_RESET_METHOD_BACO)) ||
1326                  ((adev->in_runpm || adev->in_hibernate) && amdgpu_asic_supports_baco(adev)));
1327
1328         /*
1329          * For custom pptable uploading, skip the DPM features
1330          * disable process on Navi1x ASICs.
1331          *   - As the gfx related features are under control of
1332          *     RLC on those ASICs. RLC reinitialization will be
1333          *     needed to reenable them. That will cost much more
1334          *     efforts.
1335          *
1336          *   - SMU firmware can handle the DPM reenablement
1337          *     properly.
1338          */
1339         if (smu->uploading_custom_pp_table &&
1340             (adev->asic_type >= CHIP_NAVI10) &&
1341             (adev->asic_type <= CHIP_NAVI12))
1342                 return 0;
1343
1344         /*
1345          * For Sienna_Cichlid, PMFW will handle the features disablement properly
1346          * on BACO in. Driver involvement is unnecessary.
1347          */
1348         if ((adev->asic_type == CHIP_SIENNA_CICHLID) &&
1349              use_baco)
1350                 return 0;
1351
1352         /*
1353          * For gpu reset, runpm and hibernation through BACO,
1354          * BACO feature has to be kept enabled.
1355          */
1356         if (use_baco && smu_feature_is_enabled(smu, SMU_FEATURE_BACO_BIT)) {
1357                 features_to_disable = U64_MAX &
1358                         ~(1ULL << smu_feature_get_index(smu, SMU_FEATURE_BACO_BIT));
1359                 ret = smu_feature_update_enable_state(smu,
1360                                                       features_to_disable,
1361                                                       0);
1362                 if (ret)
1363                         pr_err("Failed to disable smu features except BACO.\n");
1364         } else {
1365                 ret = smu_system_features_control(smu, false);
1366                 if (ret)
1367                         pr_err("Failed to disable smu features.\n");
1368         }
1369
1370         if (adev->asic_type >= CHIP_NAVI10 &&
1371             adev->gfx.rlc.funcs->stop)
1372                 adev->gfx.rlc.funcs->stop(adev);
1373
1374         return ret;
1375 }
1376
1377 static int smu_smc_hw_cleanup(struct smu_context *smu)
1378 {
1379         struct amdgpu_device *adev = smu->adev;
1380         int ret = 0;
1381
1382         smu_i2c_eeprom_fini(smu, &adev->pm.smu_i2c);
1383
1384         cancel_work_sync(&smu->throttling_logging_work);
1385
1386         ret = smu_disable_thermal_alert(smu);
1387         if (ret) {
1388                 pr_warn("Fail to stop thermal control!\n");
1389                 return ret;
1390         }
1391
1392         ret = smu_disable_dpms(smu);
1393         if (ret)
1394                 return ret;
1395
1396         return 0;
1397 }
1398
1399 static int smu_hw_fini(void *handle)
1400 {
1401         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1402         struct smu_context *smu = &adev->smu;
1403         int ret = 0;
1404
1405         if (amdgpu_sriov_vf(adev)&& !amdgpu_sriov_is_pp_one_vf(adev))
1406                 return 0;
1407
1408         if (smu->is_apu) {
1409                 smu_powergate_sdma(&adev->smu, true);
1410                 smu_powergate_vcn(&adev->smu, true);
1411                 smu_powergate_jpeg(&adev->smu, true);
1412         }
1413
1414         if (!smu->pm_enabled)
1415                 return 0;
1416
1417         adev->pm.dpm_enabled = false;
1418
1419         ret = smu_smc_hw_cleanup(smu);
1420         if (ret)
1421                 return ret;
1422
1423         return 0;
1424 }
1425
1426 int smu_reset(struct smu_context *smu)
1427 {
1428         struct amdgpu_device *adev = smu->adev;
1429         int ret = 0;
1430
1431         ret = smu_hw_fini(adev);
1432         if (ret)
1433                 return ret;
1434
1435         ret = smu_hw_init(adev);
1436         if (ret)
1437                 return ret;
1438
1439         ret = smu_late_init(adev);
1440
1441         return ret;
1442 }
1443
1444 static int smu_suspend(void *handle)
1445 {
1446         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1447         struct smu_context *smu = &adev->smu;
1448         int ret;
1449
1450         if (amdgpu_sriov_vf(adev)&& !amdgpu_sriov_is_pp_one_vf(adev))
1451                 return 0;
1452
1453         if (!smu->pm_enabled)
1454                 return 0;
1455
1456         adev->pm.dpm_enabled = false;
1457
1458         ret = smu_smc_hw_cleanup(smu);
1459         if (ret)
1460                 return ret;
1461
1462         smu->watermarks_bitmap &= ~(WATERMARKS_LOADED);
1463
1464         if (smu->is_apu)
1465                 smu_set_gfx_cgpg(&adev->smu, false);
1466
1467         return 0;
1468 }
1469
1470 static int smu_resume(void *handle)
1471 {
1472         int ret;
1473         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1474         struct smu_context *smu = &adev->smu;
1475
1476         if (amdgpu_sriov_vf(adev)&& !amdgpu_sriov_is_pp_one_vf(adev))
1477                 return 0;
1478
1479         if (!smu->pm_enabled)
1480                 return 0;
1481
1482         pr_info("SMU is resuming...\n");
1483
1484         ret = smu_start_smc_engine(smu);
1485         if (ret) {
1486                 pr_err("SMU is not ready yet!\n");
1487                 goto failed;
1488         }
1489
1490         ret = smu_smc_hw_setup(smu);
1491         if (ret)
1492                 goto failed;
1493
1494         if (smu->is_apu)
1495                 smu_set_gfx_cgpg(&adev->smu, true);
1496
1497         smu->disable_uclk_switch = 0;
1498
1499         adev->pm.dpm_enabled = true;
1500
1501         pr_info("SMU is resumed successfully!\n");
1502
1503         return 0;
1504
1505 failed:
1506         return ret;
1507 }
1508
1509 int smu_display_configuration_change(struct smu_context *smu,
1510                                      const struct amd_pp_display_configuration *display_config)
1511 {
1512         int index = 0;
1513         int num_of_active_display = 0;
1514
1515         if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
1516                 return -EOPNOTSUPP;
1517
1518         if (!display_config)
1519                 return -EINVAL;
1520
1521         mutex_lock(&smu->mutex);
1522
1523         if (smu->ppt_funcs->set_deep_sleep_dcefclk)
1524                 smu->ppt_funcs->set_deep_sleep_dcefclk(smu,
1525                                 display_config->min_dcef_deep_sleep_set_clk / 100);
1526
1527         for (index = 0; index < display_config->num_path_including_non_display; index++) {
1528                 if (display_config->displays[index].controller_id != 0)
1529                         num_of_active_display++;
1530         }
1531
1532         smu_set_active_display_count(smu, num_of_active_display);
1533
1534         smu_store_cc6_data(smu, display_config->cpu_pstate_separation_time,
1535                            display_config->cpu_cc6_disable,
1536                            display_config->cpu_pstate_disable,
1537                            display_config->nb_pstate_switch_disable);
1538
1539         mutex_unlock(&smu->mutex);
1540
1541         return 0;
1542 }
1543
1544 static int smu_get_clock_info(struct smu_context *smu,
1545                               struct smu_clock_info *clk_info,
1546                               enum smu_perf_level_designation designation)
1547 {
1548         int ret;
1549         struct smu_performance_level level = {0};
1550
1551         if (!clk_info)
1552                 return -EINVAL;
1553
1554         ret = smu_get_perf_level(smu, PERF_LEVEL_ACTIVITY, &level);
1555         if (ret)
1556                 return -EINVAL;
1557
1558         clk_info->min_mem_clk = level.memory_clock;
1559         clk_info->min_eng_clk = level.core_clock;
1560         clk_info->min_bus_bandwidth = level.non_local_mem_freq * level.non_local_mem_width;
1561
1562         ret = smu_get_perf_level(smu, designation, &level);
1563         if (ret)
1564                 return -EINVAL;
1565
1566         clk_info->min_mem_clk = level.memory_clock;
1567         clk_info->min_eng_clk = level.core_clock;
1568         clk_info->min_bus_bandwidth = level.non_local_mem_freq * level.non_local_mem_width;
1569
1570         return 0;
1571 }
1572
1573 int smu_get_current_clocks(struct smu_context *smu,
1574                            struct amd_pp_clock_info *clocks)
1575 {
1576         struct amd_pp_simple_clock_info simple_clocks = {0};
1577         struct smu_clock_info hw_clocks;
1578         int ret = 0;
1579
1580         if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
1581                 return -EOPNOTSUPP;
1582
1583         mutex_lock(&smu->mutex);
1584
1585         smu_get_dal_power_level(smu, &simple_clocks);
1586
1587         if (smu->support_power_containment)
1588                 ret = smu_get_clock_info(smu, &hw_clocks,
1589                                          PERF_LEVEL_POWER_CONTAINMENT);
1590         else
1591                 ret = smu_get_clock_info(smu, &hw_clocks, PERF_LEVEL_ACTIVITY);
1592
1593         if (ret) {
1594                 pr_err("Error in smu_get_clock_info\n");
1595                 goto failed;
1596         }
1597
1598         clocks->min_engine_clock = hw_clocks.min_eng_clk;
1599         clocks->max_engine_clock = hw_clocks.max_eng_clk;
1600         clocks->min_memory_clock = hw_clocks.min_mem_clk;
1601         clocks->max_memory_clock = hw_clocks.max_mem_clk;
1602         clocks->min_bus_bandwidth = hw_clocks.min_bus_bandwidth;
1603         clocks->max_bus_bandwidth = hw_clocks.max_bus_bandwidth;
1604         clocks->max_engine_clock_in_sr = hw_clocks.max_eng_clk;
1605         clocks->min_engine_clock_in_sr = hw_clocks.min_eng_clk;
1606
1607         if (simple_clocks.level == 0)
1608                 clocks->max_clocks_state = PP_DAL_POWERLEVEL_7;
1609         else
1610                 clocks->max_clocks_state = simple_clocks.level;
1611
1612         if (!smu_get_current_shallow_sleep_clocks(smu, &hw_clocks)) {
1613                 clocks->max_engine_clock_in_sr = hw_clocks.max_eng_clk;
1614                 clocks->min_engine_clock_in_sr = hw_clocks.min_eng_clk;
1615         }
1616
1617 failed:
1618         mutex_unlock(&smu->mutex);
1619         return ret;
1620 }
1621
1622 static int smu_set_clockgating_state(void *handle,
1623                                      enum amd_clockgating_state state)
1624 {
1625         return 0;
1626 }
1627
1628 static int smu_set_powergating_state(void *handle,
1629                                      enum amd_powergating_state state)
1630 {
1631         return 0;
1632 }
1633
1634 static int smu_enable_umd_pstate(void *handle,
1635                       enum amd_dpm_forced_level *level)
1636 {
1637         uint32_t profile_mode_mask = AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD |
1638                                         AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK |
1639                                         AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK |
1640                                         AMD_DPM_FORCED_LEVEL_PROFILE_PEAK;
1641
1642         struct smu_context *smu = (struct smu_context*)(handle);
1643         struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
1644
1645         if (!smu->is_apu && !smu_dpm_ctx->dpm_context)
1646                 return -EINVAL;
1647
1648         if (!(smu_dpm_ctx->dpm_level & profile_mode_mask)) {
1649                 /* enter umd pstate, save current level, disable gfx cg*/
1650                 if (*level & profile_mode_mask) {
1651                         smu_dpm_ctx->saved_dpm_level = smu_dpm_ctx->dpm_level;
1652                         smu_dpm_ctx->enable_umd_pstate = true;
1653                         amdgpu_device_ip_set_powergating_state(smu->adev,
1654                                                                AMD_IP_BLOCK_TYPE_GFX,
1655                                                                AMD_PG_STATE_UNGATE);
1656                         amdgpu_device_ip_set_clockgating_state(smu->adev,
1657                                                                AMD_IP_BLOCK_TYPE_GFX,
1658                                                                AMD_CG_STATE_UNGATE);
1659                 }
1660         } else {
1661                 /* exit umd pstate, restore level, enable gfx cg*/
1662                 if (!(*level & profile_mode_mask)) {
1663                         if (*level == AMD_DPM_FORCED_LEVEL_PROFILE_EXIT)
1664                                 *level = smu_dpm_ctx->saved_dpm_level;
1665                         smu_dpm_ctx->enable_umd_pstate = false;
1666                         amdgpu_device_ip_set_clockgating_state(smu->adev,
1667                                                                AMD_IP_BLOCK_TYPE_GFX,
1668                                                                AMD_CG_STATE_GATE);
1669                         amdgpu_device_ip_set_powergating_state(smu->adev,
1670                                                                AMD_IP_BLOCK_TYPE_GFX,
1671                                                                AMD_PG_STATE_GATE);
1672                 }
1673         }
1674
1675         return 0;
1676 }
1677
1678 int smu_adjust_power_state_dynamic(struct smu_context *smu,
1679                                    enum amd_dpm_forced_level level,
1680                                    bool skip_display_settings)
1681 {
1682         int ret = 0;
1683         int index = 0;
1684         long workload;
1685         struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
1686
1687         if (!skip_display_settings) {
1688                 ret = smu_display_config_changed(smu);
1689                 if (ret) {
1690                         pr_err("Failed to change display config!");
1691                         return ret;
1692                 }
1693         }
1694
1695         ret = smu_apply_clocks_adjust_rules(smu);
1696         if (ret) {
1697                 pr_err("Failed to apply clocks adjust rules!");
1698                 return ret;
1699         }
1700
1701         if (!skip_display_settings) {
1702                 ret = smu_notify_smc_display_config(smu);
1703                 if (ret) {
1704                         pr_err("Failed to notify smc display config!");
1705                         return ret;
1706                 }
1707         }
1708
1709         if (smu_dpm_ctx->dpm_level != level) {
1710                 ret = smu_asic_set_performance_level(smu, level);
1711                 if (ret) {
1712                         pr_err("Failed to set performance level!");
1713                         return ret;
1714                 }
1715
1716                 /* update the saved copy */
1717                 smu_dpm_ctx->dpm_level = level;
1718         }
1719
1720         if (smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL) {
1721                 index = fls(smu->workload_mask);
1722                 index = index > 0 && index <= WORKLOAD_POLICY_MAX ? index - 1 : 0;
1723                 workload = smu->workload_setting[index];
1724
1725                 if (smu->power_profile_mode != workload)
1726                         smu_set_power_profile_mode(smu, &workload, 0, false);
1727         }
1728
1729         return ret;
1730 }
1731
1732 int smu_handle_task(struct smu_context *smu,
1733                     enum amd_dpm_forced_level level,
1734                     enum amd_pp_task task_id,
1735                     bool lock_needed)
1736 {
1737         int ret = 0;
1738
1739         if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
1740                 return -EOPNOTSUPP;
1741
1742         if (lock_needed)
1743                 mutex_lock(&smu->mutex);
1744
1745         switch (task_id) {
1746         case AMD_PP_TASK_DISPLAY_CONFIG_CHANGE:
1747                 ret = smu_pre_display_config_changed(smu);
1748                 if (ret)
1749                         goto out;
1750                 ret = smu_set_cpu_power_state(smu);
1751                 if (ret)
1752                         goto out;
1753                 ret = smu_adjust_power_state_dynamic(smu, level, false);
1754                 break;
1755         case AMD_PP_TASK_COMPLETE_INIT:
1756         case AMD_PP_TASK_READJUST_POWER_STATE:
1757                 ret = smu_adjust_power_state_dynamic(smu, level, true);
1758                 break;
1759         default:
1760                 break;
1761         }
1762
1763 out:
1764         if (lock_needed)
1765                 mutex_unlock(&smu->mutex);
1766
1767         return ret;
1768 }
1769
1770 int smu_switch_power_profile(struct smu_context *smu,
1771                              enum PP_SMC_POWER_PROFILE type,
1772                              bool en)
1773 {
1774         struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
1775         long workload;
1776         uint32_t index;
1777
1778         if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
1779                 return -EOPNOTSUPP;
1780
1781         if (!(type < PP_SMC_POWER_PROFILE_CUSTOM))
1782                 return -EINVAL;
1783
1784         mutex_lock(&smu->mutex);
1785
1786         if (!en) {
1787                 smu->workload_mask &= ~(1 << smu->workload_prority[type]);
1788                 index = fls(smu->workload_mask);
1789                 index = index > 0 && index <= WORKLOAD_POLICY_MAX ? index - 1 : 0;
1790                 workload = smu->workload_setting[index];
1791         } else {
1792                 smu->workload_mask |= (1 << smu->workload_prority[type]);
1793                 index = fls(smu->workload_mask);
1794                 index = index <= WORKLOAD_POLICY_MAX ? index - 1 : 0;
1795                 workload = smu->workload_setting[index];
1796         }
1797
1798         if (smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL)
1799                 smu_set_power_profile_mode(smu, &workload, 0, false);
1800
1801         mutex_unlock(&smu->mutex);
1802
1803         return 0;
1804 }
1805
1806 enum amd_dpm_forced_level smu_get_performance_level(struct smu_context *smu)
1807 {
1808         struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
1809         enum amd_dpm_forced_level level;
1810
1811         if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
1812                 return -EOPNOTSUPP;
1813
1814         if (!smu->is_apu && !smu_dpm_ctx->dpm_context)
1815                 return -EINVAL;
1816
1817         mutex_lock(&(smu->mutex));
1818         level = smu_dpm_ctx->dpm_level;
1819         mutex_unlock(&(smu->mutex));
1820
1821         return level;
1822 }
1823
1824 int smu_force_performance_level(struct smu_context *smu, enum amd_dpm_forced_level level)
1825 {
1826         struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
1827         int ret = 0;
1828
1829         if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
1830                 return -EOPNOTSUPP;
1831
1832         if (!smu->is_apu && !smu_dpm_ctx->dpm_context)
1833                 return -EINVAL;
1834
1835         mutex_lock(&smu->mutex);
1836
1837         ret = smu_enable_umd_pstate(smu, &level);
1838         if (ret) {
1839                 mutex_unlock(&smu->mutex);
1840                 return ret;
1841         }
1842
1843         ret = smu_handle_task(smu, level,
1844                               AMD_PP_TASK_READJUST_POWER_STATE,
1845                               false);
1846
1847         mutex_unlock(&smu->mutex);
1848
1849         return ret;
1850 }
1851
1852 int smu_set_display_count(struct smu_context *smu, uint32_t count)
1853 {
1854         int ret = 0;
1855
1856         if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
1857                 return -EOPNOTSUPP;
1858
1859         mutex_lock(&smu->mutex);
1860         ret = smu_init_display_count(smu, count);
1861         mutex_unlock(&smu->mutex);
1862
1863         return ret;
1864 }
1865
1866 int smu_force_clk_levels(struct smu_context *smu,
1867                          enum smu_clk_type clk_type,
1868                          uint32_t mask,
1869                          bool lock_needed)
1870 {
1871         struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
1872         int ret = 0;
1873
1874         if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
1875                 return -EOPNOTSUPP;
1876
1877         if (smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL) {
1878                 pr_debug("force clock level is for dpm manual mode only.\n");
1879                 return -EINVAL;
1880         }
1881
1882         if (lock_needed)
1883                 mutex_lock(&smu->mutex);
1884
1885         if (smu->ppt_funcs && smu->ppt_funcs->force_clk_levels)
1886                 ret = smu->ppt_funcs->force_clk_levels(smu, clk_type, mask);
1887
1888         if (lock_needed)
1889                 mutex_unlock(&smu->mutex);
1890
1891         return ret;
1892 }
1893
1894 /*
1895  * On system suspending or resetting, the dpm_enabled
1896  * flag will be cleared. So that those SMU services which
1897  * are not supported will be gated.
1898  * However, the mp1 state setting should still be granted
1899  * even if the dpm_enabled cleared.
1900  */
1901 int smu_set_mp1_state(struct smu_context *smu,
1902                       enum pp_mp1_state mp1_state)
1903 {
1904         uint16_t msg;
1905         int ret;
1906
1907         if (!smu->pm_enabled)
1908                 return -EOPNOTSUPP;
1909
1910         mutex_lock(&smu->mutex);
1911
1912         switch (mp1_state) {
1913         case PP_MP1_STATE_SHUTDOWN:
1914                 msg = SMU_MSG_PrepareMp1ForShutdown;
1915                 break;
1916         case PP_MP1_STATE_UNLOAD:
1917                 msg = SMU_MSG_PrepareMp1ForUnload;
1918                 break;
1919         case PP_MP1_STATE_RESET:
1920                 msg = SMU_MSG_PrepareMp1ForReset;
1921                 break;
1922         case PP_MP1_STATE_NONE:
1923         default:
1924                 mutex_unlock(&smu->mutex);
1925                 return 0;
1926         }
1927
1928         /* some asics may not support those messages */
1929         if (smu_msg_get_index(smu, msg) < 0) {
1930                 mutex_unlock(&smu->mutex);
1931                 return 0;
1932         }
1933
1934         ret = smu_send_smc_msg(smu, msg, NULL);
1935         if (ret)
1936                 pr_err("[PrepareMp1] Failed!\n");
1937
1938         mutex_unlock(&smu->mutex);
1939
1940         return ret;
1941 }
1942
1943 int smu_set_df_cstate(struct smu_context *smu,
1944                       enum pp_df_cstate state)
1945 {
1946         int ret = 0;
1947
1948         if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
1949                 return -EOPNOTSUPP;
1950
1951         if (!smu->ppt_funcs || !smu->ppt_funcs->set_df_cstate)
1952                 return 0;
1953
1954         mutex_lock(&smu->mutex);
1955
1956         ret = smu->ppt_funcs->set_df_cstate(smu, state);
1957         if (ret)
1958                 pr_err("[SetDfCstate] failed!\n");
1959
1960         mutex_unlock(&smu->mutex);
1961
1962         return ret;
1963 }
1964
1965 int smu_allow_xgmi_power_down(struct smu_context *smu, bool en)
1966 {
1967         int ret = 0;
1968
1969         if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
1970                 return -EOPNOTSUPP;
1971
1972         if (!smu->ppt_funcs || !smu->ppt_funcs->allow_xgmi_power_down)
1973                 return 0;
1974
1975         mutex_lock(&smu->mutex);
1976
1977         ret = smu->ppt_funcs->allow_xgmi_power_down(smu, en);
1978         if (ret)
1979                 pr_err("[AllowXgmiPowerDown] failed!\n");
1980
1981         mutex_unlock(&smu->mutex);
1982
1983         return ret;
1984 }
1985
1986 int smu_write_watermarks_table(struct smu_context *smu)
1987 {
1988         void *watermarks_table = smu->smu_table.watermarks_table;
1989
1990         if (!watermarks_table)
1991                 return -EINVAL;
1992
1993         return smu_update_table(smu,
1994                                 SMU_TABLE_WATERMARKS,
1995                                 0,
1996                                 watermarks_table,
1997                                 true);
1998 }
1999
2000 int smu_set_watermarks_for_clock_ranges(struct smu_context *smu,
2001                 struct dm_pp_wm_sets_with_clock_ranges_soc15 *clock_ranges)
2002 {
2003         void *table = smu->smu_table.watermarks_table;
2004
2005         if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2006                 return -EOPNOTSUPP;
2007
2008         if (!table)
2009                 return -EINVAL;
2010
2011         mutex_lock(&smu->mutex);
2012
2013         if (!smu->disable_watermark &&
2014                         smu_feature_is_enabled(smu, SMU_FEATURE_DPM_DCEFCLK_BIT) &&
2015                         smu_feature_is_enabled(smu, SMU_FEATURE_DPM_SOCCLK_BIT)) {
2016                 smu_set_watermarks_table(smu, table, clock_ranges);
2017
2018                 if (!(smu->watermarks_bitmap & WATERMARKS_EXIST)) {
2019                         smu->watermarks_bitmap |= WATERMARKS_EXIST;
2020                         smu->watermarks_bitmap &= ~WATERMARKS_LOADED;
2021                 }
2022         }
2023
2024         mutex_unlock(&smu->mutex);
2025
2026         return 0;
2027 }
2028
2029 int smu_set_ac_dc(struct smu_context *smu)
2030 {
2031         int ret = 0;
2032
2033         if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2034                 return -EOPNOTSUPP;
2035
2036         /* controlled by firmware */
2037         if (smu->dc_controlled_by_gpio)
2038                 return 0;
2039
2040         mutex_lock(&smu->mutex);
2041         ret = smu_set_power_source(smu,
2042                                    smu->adev->pm.ac_power ? SMU_POWER_SOURCE_AC :
2043                                    SMU_POWER_SOURCE_DC);
2044         if (ret)
2045                 pr_err("Failed to switch to %s mode!\n",
2046                        smu->adev->pm.ac_power ? "AC" : "DC");
2047         mutex_unlock(&smu->mutex);
2048
2049         return ret;
2050 }
2051
2052 const struct amd_ip_funcs smu_ip_funcs = {
2053         .name = "smu",
2054         .early_init = smu_early_init,
2055         .late_init = smu_late_init,
2056         .sw_init = smu_sw_init,
2057         .sw_fini = smu_sw_fini,
2058         .hw_init = smu_hw_init,
2059         .hw_fini = smu_hw_fini,
2060         .suspend = smu_suspend,
2061         .resume = smu_resume,
2062         .is_idle = NULL,
2063         .check_soft_reset = NULL,
2064         .wait_for_idle = NULL,
2065         .soft_reset = NULL,
2066         .set_clockgating_state = smu_set_clockgating_state,
2067         .set_powergating_state = smu_set_powergating_state,
2068         .enable_umd_pstate = smu_enable_umd_pstate,
2069 };
2070
2071 const struct amdgpu_ip_block_version smu_v11_0_ip_block =
2072 {
2073         .type = AMD_IP_BLOCK_TYPE_SMC,
2074         .major = 11,
2075         .minor = 0,
2076         .rev = 0,
2077         .funcs = &smu_ip_funcs,
2078 };
2079
2080 const struct amdgpu_ip_block_version smu_v12_0_ip_block =
2081 {
2082         .type = AMD_IP_BLOCK_TYPE_SMC,
2083         .major = 12,
2084         .minor = 0,
2085         .rev = 0,
2086         .funcs = &smu_ip_funcs,
2087 };
2088
2089 int smu_load_microcode(struct smu_context *smu)
2090 {
2091         int ret = 0;
2092
2093         if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2094                 return -EOPNOTSUPP;
2095
2096         mutex_lock(&smu->mutex);
2097
2098         if (smu->ppt_funcs->load_microcode)
2099                 ret = smu->ppt_funcs->load_microcode(smu);
2100
2101         mutex_unlock(&smu->mutex);
2102
2103         return ret;
2104 }
2105
2106 int smu_check_fw_status(struct smu_context *smu)
2107 {
2108         int ret = 0;
2109
2110         if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2111                 return -EOPNOTSUPP;
2112
2113         mutex_lock(&smu->mutex);
2114
2115         if (smu->ppt_funcs->check_fw_status)
2116                 ret = smu->ppt_funcs->check_fw_status(smu);
2117
2118         mutex_unlock(&smu->mutex);
2119
2120         return ret;
2121 }
2122
2123 int smu_set_gfx_cgpg(struct smu_context *smu, bool enabled)
2124 {
2125         int ret = 0;
2126
2127         mutex_lock(&smu->mutex);
2128
2129         if (smu->ppt_funcs->set_gfx_cgpg)
2130                 ret = smu->ppt_funcs->set_gfx_cgpg(smu, enabled);
2131
2132         mutex_unlock(&smu->mutex);
2133
2134         return ret;
2135 }
2136
2137 int smu_set_fan_speed_rpm(struct smu_context *smu, uint32_t speed)
2138 {
2139         int ret = 0;
2140
2141         if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2142                 return -EOPNOTSUPP;
2143
2144         mutex_lock(&smu->mutex);
2145
2146         if (smu->ppt_funcs->set_fan_speed_rpm)
2147                 ret = smu->ppt_funcs->set_fan_speed_rpm(smu, speed);
2148
2149         mutex_unlock(&smu->mutex);
2150
2151         return ret;
2152 }
2153
2154 int smu_get_power_limit(struct smu_context *smu,
2155                         uint32_t *limit,
2156                         bool def,
2157                         bool lock_needed)
2158 {
2159         int ret = 0;
2160
2161         if (lock_needed) {
2162                 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2163                         return -EOPNOTSUPP;
2164
2165                 mutex_lock(&smu->mutex);
2166         }
2167
2168         if (smu->ppt_funcs->get_power_limit)
2169                 ret = smu->ppt_funcs->get_power_limit(smu, limit, def);
2170
2171         if (lock_needed)
2172                 mutex_unlock(&smu->mutex);
2173
2174         return ret;
2175 }
2176
2177 int smu_set_power_limit(struct smu_context *smu, uint32_t limit)
2178 {
2179         int ret = 0;
2180
2181         if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2182                 return -EOPNOTSUPP;
2183
2184         mutex_lock(&smu->mutex);
2185
2186         if (smu->ppt_funcs->set_power_limit)
2187                 ret = smu->ppt_funcs->set_power_limit(smu, limit);
2188
2189         mutex_unlock(&smu->mutex);
2190
2191         return ret;
2192 }
2193
2194 int smu_print_clk_levels(struct smu_context *smu, enum smu_clk_type clk_type, char *buf)
2195 {
2196         int ret = 0;
2197
2198         if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2199                 return -EOPNOTSUPP;
2200
2201         mutex_lock(&smu->mutex);
2202
2203         if (smu->ppt_funcs->print_clk_levels)
2204                 ret = smu->ppt_funcs->print_clk_levels(smu, clk_type, buf);
2205
2206         mutex_unlock(&smu->mutex);
2207
2208         return ret;
2209 }
2210
2211 int smu_get_od_percentage(struct smu_context *smu, enum smu_clk_type type)
2212 {
2213         int ret = 0;
2214
2215         if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2216                 return -EOPNOTSUPP;
2217
2218         mutex_lock(&smu->mutex);
2219
2220         if (smu->ppt_funcs->get_od_percentage)
2221                 ret = smu->ppt_funcs->get_od_percentage(smu, type);
2222
2223         mutex_unlock(&smu->mutex);
2224
2225         return ret;
2226 }
2227
2228 int smu_set_od_percentage(struct smu_context *smu, enum smu_clk_type type, uint32_t value)
2229 {
2230         int ret = 0;
2231
2232         if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2233                 return -EOPNOTSUPP;
2234
2235         mutex_lock(&smu->mutex);
2236
2237         if (smu->ppt_funcs->set_od_percentage)
2238                 ret = smu->ppt_funcs->set_od_percentage(smu, type, value);
2239
2240         mutex_unlock(&smu->mutex);
2241
2242         return ret;
2243 }
2244
2245 int smu_od_edit_dpm_table(struct smu_context *smu,
2246                           enum PP_OD_DPM_TABLE_COMMAND type,
2247                           long *input, uint32_t size)
2248 {
2249         int ret = 0;
2250
2251         if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2252                 return -EOPNOTSUPP;
2253
2254         mutex_lock(&smu->mutex);
2255
2256         if (smu->ppt_funcs->od_edit_dpm_table)
2257                 ret = smu->ppt_funcs->od_edit_dpm_table(smu, type, input, size);
2258
2259         mutex_unlock(&smu->mutex);
2260
2261         return ret;
2262 }
2263
2264 int smu_read_sensor(struct smu_context *smu,
2265                     enum amd_pp_sensors sensor,
2266                     void *data, uint32_t *size)
2267 {
2268         int ret = 0;
2269
2270         if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2271                 return -EOPNOTSUPP;
2272
2273         mutex_lock(&smu->mutex);
2274
2275         if (smu->ppt_funcs->read_sensor)
2276                 ret = smu->ppt_funcs->read_sensor(smu, sensor, data, size);
2277
2278         mutex_unlock(&smu->mutex);
2279
2280         return ret;
2281 }
2282
2283 int smu_get_power_profile_mode(struct smu_context *smu, char *buf)
2284 {
2285         int ret = 0;
2286
2287         if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2288                 return -EOPNOTSUPP;
2289
2290         mutex_lock(&smu->mutex);
2291
2292         if (smu->ppt_funcs->get_power_profile_mode)
2293                 ret = smu->ppt_funcs->get_power_profile_mode(smu, buf);
2294
2295         mutex_unlock(&smu->mutex);
2296
2297         return ret;
2298 }
2299
2300 int smu_set_power_profile_mode(struct smu_context *smu,
2301                                long *param,
2302                                uint32_t param_size,
2303                                bool lock_needed)
2304 {
2305         int ret = 0;
2306
2307         if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2308                 return -EOPNOTSUPP;
2309
2310         if (lock_needed)
2311                 mutex_lock(&smu->mutex);
2312
2313         if (smu->ppt_funcs->set_power_profile_mode)
2314                 ret = smu->ppt_funcs->set_power_profile_mode(smu, param, param_size);
2315
2316         if (lock_needed)
2317                 mutex_unlock(&smu->mutex);
2318
2319         return ret;
2320 }
2321
2322
2323 int smu_get_fan_control_mode(struct smu_context *smu)
2324 {
2325         int ret = 0;
2326
2327         if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2328                 return -EOPNOTSUPP;
2329
2330         mutex_lock(&smu->mutex);
2331
2332         if (smu->ppt_funcs->get_fan_control_mode)
2333                 ret = smu->ppt_funcs->get_fan_control_mode(smu);
2334
2335         mutex_unlock(&smu->mutex);
2336
2337         return ret;
2338 }
2339
2340 int smu_set_fan_control_mode(struct smu_context *smu, int value)
2341 {
2342         int ret = 0;
2343
2344         if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2345                 return -EOPNOTSUPP;
2346
2347         mutex_lock(&smu->mutex);
2348
2349         if (smu->ppt_funcs->set_fan_control_mode)
2350                 ret = smu->ppt_funcs->set_fan_control_mode(smu, value);
2351
2352         mutex_unlock(&smu->mutex);
2353
2354         return ret;
2355 }
2356
2357 int smu_get_fan_speed_percent(struct smu_context *smu, uint32_t *speed)
2358 {
2359         int ret = 0;
2360
2361         if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2362                 return -EOPNOTSUPP;
2363
2364         mutex_lock(&smu->mutex);
2365
2366         if (smu->ppt_funcs->get_fan_speed_percent)
2367                 ret = smu->ppt_funcs->get_fan_speed_percent(smu, speed);
2368
2369         mutex_unlock(&smu->mutex);
2370
2371         return ret;
2372 }
2373
2374 int smu_set_fan_speed_percent(struct smu_context *smu, uint32_t speed)
2375 {
2376         int ret = 0;
2377
2378         if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2379                 return -EOPNOTSUPP;
2380
2381         mutex_lock(&smu->mutex);
2382
2383         if (smu->ppt_funcs->set_fan_speed_percent)
2384                 ret = smu->ppt_funcs->set_fan_speed_percent(smu, speed);
2385
2386         mutex_unlock(&smu->mutex);
2387
2388         return ret;
2389 }
2390
2391 int smu_get_fan_speed_rpm(struct smu_context *smu, uint32_t *speed)
2392 {
2393         int ret = 0;
2394
2395         if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2396                 return -EOPNOTSUPP;
2397
2398         mutex_lock(&smu->mutex);
2399
2400         if (smu->ppt_funcs->get_fan_speed_rpm)
2401                 ret = smu->ppt_funcs->get_fan_speed_rpm(smu, speed);
2402
2403         mutex_unlock(&smu->mutex);
2404
2405         return ret;
2406 }
2407
2408 int smu_set_deep_sleep_dcefclk(struct smu_context *smu, int clk)
2409 {
2410         int ret = 0;
2411
2412         if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2413                 return -EOPNOTSUPP;
2414
2415         mutex_lock(&smu->mutex);
2416
2417         if (smu->ppt_funcs->set_deep_sleep_dcefclk)
2418                 ret = smu->ppt_funcs->set_deep_sleep_dcefclk(smu, clk);
2419
2420         mutex_unlock(&smu->mutex);
2421
2422         return ret;
2423 }
2424
2425 int smu_set_active_display_count(struct smu_context *smu, uint32_t count)
2426 {
2427         int ret = 0;
2428
2429         if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2430                 return -EOPNOTSUPP;
2431
2432         if (smu->ppt_funcs->set_active_display_count)
2433                 ret = smu->ppt_funcs->set_active_display_count(smu, count);
2434
2435         return ret;
2436 }
2437
2438 int smu_get_clock_by_type(struct smu_context *smu,
2439                           enum amd_pp_clock_type type,
2440                           struct amd_pp_clocks *clocks)
2441 {
2442         int ret = 0;
2443
2444         if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2445                 return -EOPNOTSUPP;
2446
2447         mutex_lock(&smu->mutex);
2448
2449         if (smu->ppt_funcs->get_clock_by_type)
2450                 ret = smu->ppt_funcs->get_clock_by_type(smu, type, clocks);
2451
2452         mutex_unlock(&smu->mutex);
2453
2454         return ret;
2455 }
2456
2457 int smu_get_max_high_clocks(struct smu_context *smu,
2458                             struct amd_pp_simple_clock_info *clocks)
2459 {
2460         int ret = 0;
2461
2462         if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2463                 return -EOPNOTSUPP;
2464
2465         mutex_lock(&smu->mutex);
2466
2467         if (smu->ppt_funcs->get_max_high_clocks)
2468                 ret = smu->ppt_funcs->get_max_high_clocks(smu, clocks);
2469
2470         mutex_unlock(&smu->mutex);
2471
2472         return ret;
2473 }
2474
2475 int smu_get_clock_by_type_with_latency(struct smu_context *smu,
2476                                        enum smu_clk_type clk_type,
2477                                        struct pp_clock_levels_with_latency *clocks)
2478 {
2479         int ret = 0;
2480
2481         if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2482                 return -EOPNOTSUPP;
2483
2484         mutex_lock(&smu->mutex);
2485
2486         if (smu->ppt_funcs->get_clock_by_type_with_latency)
2487                 ret = smu->ppt_funcs->get_clock_by_type_with_latency(smu, clk_type, clocks);
2488
2489         mutex_unlock(&smu->mutex);
2490
2491         return ret;
2492 }
2493
2494 int smu_get_clock_by_type_with_voltage(struct smu_context *smu,
2495                                        enum amd_pp_clock_type type,
2496                                        struct pp_clock_levels_with_voltage *clocks)
2497 {
2498         int ret = 0;
2499
2500         if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2501                 return -EOPNOTSUPP;
2502
2503         mutex_lock(&smu->mutex);
2504
2505         if (smu->ppt_funcs->get_clock_by_type_with_voltage)
2506                 ret = smu->ppt_funcs->get_clock_by_type_with_voltage(smu, type, clocks);
2507
2508         mutex_unlock(&smu->mutex);
2509
2510         return ret;
2511 }
2512
2513
2514 int smu_display_clock_voltage_request(struct smu_context *smu,
2515                                       struct pp_display_clock_request *clock_req)
2516 {
2517         int ret = 0;
2518
2519         if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2520                 return -EOPNOTSUPP;
2521
2522         mutex_lock(&smu->mutex);
2523
2524         if (smu->ppt_funcs->display_clock_voltage_request)
2525                 ret = smu->ppt_funcs->display_clock_voltage_request(smu, clock_req);
2526
2527         mutex_unlock(&smu->mutex);
2528
2529         return ret;
2530 }
2531
2532
2533 int smu_display_disable_memory_clock_switch(struct smu_context *smu, bool disable_memory_clock_switch)
2534 {
2535         int ret = -EINVAL;
2536
2537         if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2538                 return -EOPNOTSUPP;
2539
2540         mutex_lock(&smu->mutex);
2541
2542         if (smu->ppt_funcs->display_disable_memory_clock_switch)
2543                 ret = smu->ppt_funcs->display_disable_memory_clock_switch(smu, disable_memory_clock_switch);
2544
2545         mutex_unlock(&smu->mutex);
2546
2547         return ret;
2548 }
2549
2550 int smu_notify_smu_enable_pwe(struct smu_context *smu)
2551 {
2552         int ret = 0;
2553
2554         if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2555                 return -EOPNOTSUPP;
2556
2557         mutex_lock(&smu->mutex);
2558
2559         if (smu->ppt_funcs->notify_smu_enable_pwe)
2560                 ret = smu->ppt_funcs->notify_smu_enable_pwe(smu);
2561
2562         mutex_unlock(&smu->mutex);
2563
2564         return ret;
2565 }
2566
2567 int smu_set_xgmi_pstate(struct smu_context *smu,
2568                         uint32_t pstate)
2569 {
2570         int ret = 0;
2571
2572         if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2573                 return -EOPNOTSUPP;
2574
2575         mutex_lock(&smu->mutex);
2576
2577         if (smu->ppt_funcs->set_xgmi_pstate)
2578                 ret = smu->ppt_funcs->set_xgmi_pstate(smu, pstate);
2579
2580         mutex_unlock(&smu->mutex);
2581
2582         return ret;
2583 }
2584
2585 int smu_set_azalia_d3_pme(struct smu_context *smu)
2586 {
2587         int ret = 0;
2588
2589         if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2590                 return -EOPNOTSUPP;
2591
2592         mutex_lock(&smu->mutex);
2593
2594         if (smu->ppt_funcs->set_azalia_d3_pme)
2595                 ret = smu->ppt_funcs->set_azalia_d3_pme(smu);
2596
2597         mutex_unlock(&smu->mutex);
2598
2599         return ret;
2600 }
2601
2602 /*
2603  * On system suspending or resetting, the dpm_enabled
2604  * flag will be cleared. So that those SMU services which
2605  * are not supported will be gated.
2606  *
2607  * However, the baco/mode1 reset should still be granted
2608  * as they are still supported and necessary.
2609  */
2610 bool smu_baco_is_support(struct smu_context *smu)
2611 {
2612         bool ret = false;
2613
2614         if (!smu->pm_enabled)
2615                 return false;
2616
2617         mutex_lock(&smu->mutex);
2618
2619         if (smu->ppt_funcs && smu->ppt_funcs->baco_is_support)
2620                 ret = smu->ppt_funcs->baco_is_support(smu);
2621
2622         mutex_unlock(&smu->mutex);
2623
2624         return ret;
2625 }
2626
2627 int smu_baco_get_state(struct smu_context *smu, enum smu_baco_state *state)
2628 {
2629         if (smu->ppt_funcs->baco_get_state)
2630                 return -EINVAL;
2631
2632         mutex_lock(&smu->mutex);
2633         *state = smu->ppt_funcs->baco_get_state(smu);
2634         mutex_unlock(&smu->mutex);
2635
2636         return 0;
2637 }
2638
2639 int smu_baco_enter(struct smu_context *smu)
2640 {
2641         int ret = 0;
2642
2643         if (!smu->pm_enabled)
2644                 return -EOPNOTSUPP;
2645
2646         mutex_lock(&smu->mutex);
2647
2648         if (smu->ppt_funcs->baco_enter)
2649                 ret = smu->ppt_funcs->baco_enter(smu);
2650
2651         mutex_unlock(&smu->mutex);
2652
2653         return ret;
2654 }
2655
2656 int smu_baco_exit(struct smu_context *smu)
2657 {
2658         int ret = 0;
2659
2660         if (!smu->pm_enabled)
2661                 return -EOPNOTSUPP;
2662
2663         mutex_lock(&smu->mutex);
2664
2665         if (smu->ppt_funcs->baco_exit)
2666                 ret = smu->ppt_funcs->baco_exit(smu);
2667
2668         mutex_unlock(&smu->mutex);
2669
2670         return ret;
2671 }
2672
2673 int smu_mode2_reset(struct smu_context *smu)
2674 {
2675         int ret = 0;
2676
2677         if (!smu->pm_enabled)
2678                 return -EOPNOTSUPP;
2679
2680         mutex_lock(&smu->mutex);
2681
2682         if (smu->ppt_funcs->mode2_reset)
2683                 ret = smu->ppt_funcs->mode2_reset(smu);
2684
2685         mutex_unlock(&smu->mutex);
2686
2687         return ret;
2688 }
2689
2690 int smu_get_max_sustainable_clocks_by_dc(struct smu_context *smu,
2691                                          struct pp_smu_nv_clock_table *max_clocks)
2692 {
2693         int ret = 0;
2694
2695         if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2696                 return -EOPNOTSUPP;
2697
2698         mutex_lock(&smu->mutex);
2699
2700         if (smu->ppt_funcs->get_max_sustainable_clocks_by_dc)
2701                 ret = smu->ppt_funcs->get_max_sustainable_clocks_by_dc(smu, max_clocks);
2702
2703         mutex_unlock(&smu->mutex);
2704
2705         return ret;
2706 }
2707
2708 int smu_get_uclk_dpm_states(struct smu_context *smu,
2709                             unsigned int *clock_values_in_khz,
2710                             unsigned int *num_states)
2711 {
2712         int ret = 0;
2713
2714         if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2715                 return -EOPNOTSUPP;
2716
2717         mutex_lock(&smu->mutex);
2718
2719         if (smu->ppt_funcs->get_uclk_dpm_states)
2720                 ret = smu->ppt_funcs->get_uclk_dpm_states(smu, clock_values_in_khz, num_states);
2721
2722         mutex_unlock(&smu->mutex);
2723
2724         return ret;
2725 }
2726
2727 enum amd_pm_state_type smu_get_current_power_state(struct smu_context *smu)
2728 {
2729         enum amd_pm_state_type pm_state = POWER_STATE_TYPE_DEFAULT;
2730
2731         if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2732                 return -EOPNOTSUPP;
2733
2734         mutex_lock(&smu->mutex);
2735
2736         if (smu->ppt_funcs->get_current_power_state)
2737                 pm_state = smu->ppt_funcs->get_current_power_state(smu);
2738
2739         mutex_unlock(&smu->mutex);
2740
2741         return pm_state;
2742 }
2743
2744 int smu_get_dpm_clock_table(struct smu_context *smu,
2745                             struct dpm_clocks *clock_table)
2746 {
2747         int ret = 0;
2748
2749         if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2750                 return -EOPNOTSUPP;
2751
2752         mutex_lock(&smu->mutex);
2753
2754         if (smu->ppt_funcs->get_dpm_clock_table)
2755                 ret = smu->ppt_funcs->get_dpm_clock_table(smu, clock_table);
2756
2757         mutex_unlock(&smu->mutex);
2758
2759         return ret;
2760 }
2761
2762 uint32_t smu_get_pptable_power_limit(struct smu_context *smu)
2763 {
2764         uint32_t ret = 0;
2765
2766         if (smu->ppt_funcs->get_pptable_power_limit)
2767                 ret = smu->ppt_funcs->get_pptable_power_limit(smu);
2768
2769         return ret;
2770 }
2771
2772 int smu_powergate_vcn(struct smu_context *smu, bool gate)
2773 {
2774         if (!smu->is_apu)
2775                 return 0;
2776
2777         return smu_dpm_set_uvd_enable(smu, !gate);
2778 }
2779
2780 int smu_powergate_jpeg(struct smu_context *smu, bool gate)
2781 {
2782         if (!smu->is_apu)
2783                 return 0;
2784
2785         return smu_dpm_set_jpeg_enable(smu, !gate);
2786 }