2 * Copyright 2019 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
23 #include <linux/firmware.h>
24 #include <linux/pci.h>
27 #include "amdgpu_smu.h"
28 #include "smu_internal.h"
29 #include "smu_v11_0.h"
30 #include "smu_v12_0.h"
32 #include "vega20_ppt.h"
33 #include "arcturus_ppt.h"
34 #include "navi10_ppt.h"
35 #include "renoir_ppt.h"
37 #undef __SMU_DUMMY_MAP
38 #define __SMU_DUMMY_MAP(type) #type
39 static const char* __smu_message_names[] = {
43 const char *smu_get_message_name(struct smu_context *smu, enum smu_message_type type)
45 if (type < 0 || type >= SMU_MSG_MAX_COUNT)
46 return "unknown smu message";
47 return __smu_message_names[type];
50 #undef __SMU_DUMMY_MAP
51 #define __SMU_DUMMY_MAP(fea) #fea
52 static const char* __smu_feature_names[] = {
56 const char *smu_get_feature_name(struct smu_context *smu, enum smu_feature_mask feature)
58 if (feature < 0 || feature >= SMU_FEATURE_COUNT)
59 return "unknown smu feature";
60 return __smu_feature_names[feature];
63 size_t smu_sys_get_pp_feature_mask(struct smu_context *smu, char *buf)
65 struct amdgpu_device *adev = smu->adev;
68 uint32_t feature_mask[2] = { 0 };
69 int32_t feature_index = 0;
71 uint32_t sort_feature[SMU_FEATURE_COUNT];
72 uint64_t hw_feature_count = 0;
74 if (!adev->pm.dpm_enabled)
77 mutex_lock(&smu->mutex);
79 ret = smu_feature_get_enabled_mask(smu, feature_mask, 2);
83 size = sprintf(buf + size, "features high: 0x%08x low: 0x%08x\n",
84 feature_mask[1], feature_mask[0]);
86 for (i = 0; i < SMU_FEATURE_COUNT; i++) {
87 feature_index = smu_feature_get_index(smu, i);
88 if (feature_index < 0)
90 sort_feature[feature_index] = i;
94 for (i = 0; i < hw_feature_count; i++) {
95 size += sprintf(buf + size, "%02d. %-20s (%2d) : %s\n",
97 smu_get_feature_name(smu, sort_feature[i]),
99 !!smu_feature_is_enabled(smu, sort_feature[i]) ?
100 "enabled" : "disabled");
104 mutex_unlock(&smu->mutex);
109 static int smu_feature_update_enable_state(struct smu_context *smu,
110 uint64_t feature_mask,
113 struct smu_feature *feature = &smu->smu_feature;
114 uint32_t feature_low = 0, feature_high = 0;
117 feature_low = (feature_mask >> 0 ) & 0xffffffff;
118 feature_high = (feature_mask >> 32) & 0xffffffff;
121 ret = smu_send_smc_msg_with_param(smu, SMU_MSG_EnableSmuFeaturesLow,
125 ret = smu_send_smc_msg_with_param(smu, SMU_MSG_EnableSmuFeaturesHigh,
130 ret = smu_send_smc_msg_with_param(smu, SMU_MSG_DisableSmuFeaturesLow,
134 ret = smu_send_smc_msg_with_param(smu, SMU_MSG_DisableSmuFeaturesHigh,
140 mutex_lock(&feature->mutex);
142 bitmap_or(feature->enabled, feature->enabled,
143 (unsigned long *)(&feature_mask), SMU_FEATURE_MAX);
145 bitmap_andnot(feature->enabled, feature->enabled,
146 (unsigned long *)(&feature_mask), SMU_FEATURE_MAX);
147 mutex_unlock(&feature->mutex);
152 int smu_sys_set_pp_feature_mask(struct smu_context *smu, uint64_t new_mask)
155 uint32_t feature_mask[2] = { 0 };
156 uint64_t feature_2_enabled = 0;
157 uint64_t feature_2_disabled = 0;
158 uint64_t feature_enables = 0;
159 struct amdgpu_device *adev = smu->adev;
161 if (!adev->pm.dpm_enabled)
164 mutex_lock(&smu->mutex);
166 ret = smu_feature_get_enabled_mask(smu, feature_mask, 2);
170 feature_enables = ((uint64_t)feature_mask[1] << 32 | (uint64_t)feature_mask[0]);
172 feature_2_enabled = ~feature_enables & new_mask;
173 feature_2_disabled = feature_enables & ~new_mask;
175 if (feature_2_enabled) {
176 ret = smu_feature_update_enable_state(smu, feature_2_enabled, true);
180 if (feature_2_disabled) {
181 ret = smu_feature_update_enable_state(smu, feature_2_disabled, false);
187 mutex_unlock(&smu->mutex);
192 int smu_get_smc_version(struct smu_context *smu, uint32_t *if_version, uint32_t *smu_version)
196 if (!if_version && !smu_version)
199 if (smu->smc_fw_if_version && smu->smc_fw_version)
202 *if_version = smu->smc_fw_if_version;
205 *smu_version = smu->smc_fw_version;
211 ret = smu_send_smc_msg(smu, SMU_MSG_GetDriverIfVersion, if_version);
215 smu->smc_fw_if_version = *if_version;
219 ret = smu_send_smc_msg(smu, SMU_MSG_GetSmuVersion, smu_version);
223 smu->smc_fw_version = *smu_version;
229 int smu_set_soft_freq_range(struct smu_context *smu, enum smu_clk_type clk_type,
230 uint32_t min, uint32_t max, bool lock_needed)
234 if (!smu_clk_dpm_is_enabled(smu, clk_type))
238 mutex_lock(&smu->mutex);
239 ret = smu_set_soft_freq_limited_range(smu, clk_type, min, max);
241 mutex_unlock(&smu->mutex);
246 int smu_set_hard_freq_range(struct smu_context *smu, enum smu_clk_type clk_type,
247 uint32_t min, uint32_t max)
249 int ret = 0, clk_id = 0;
252 if (min <= 0 && max <= 0)
255 if (!smu_clk_dpm_is_enabled(smu, clk_type))
258 clk_id = smu_clk_get_index(smu, clk_type);
263 param = (uint32_t)((clk_id << 16) | (max & 0xffff));
264 ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetHardMaxByFreq,
271 param = (uint32_t)((clk_id << 16) | (min & 0xffff));
272 ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinByFreq,
282 int smu_get_dpm_freq_range(struct smu_context *smu, enum smu_clk_type clk_type,
283 uint32_t *min, uint32_t *max, bool lock_needed)
285 uint32_t clock_limit;
292 mutex_lock(&smu->mutex);
294 if (!smu_clk_dpm_is_enabled(smu, clk_type)) {
298 clock_limit = smu->smu_table.boot_values.uclk;
302 clock_limit = smu->smu_table.boot_values.gfxclk;
305 clock_limit = smu->smu_table.boot_values.socclk;
312 /* clock in Mhz unit */
314 *min = clock_limit / 100;
316 *max = clock_limit / 100;
319 * Todo: Use each asic(ASIC_ppt funcs) control the callbacks exposed to the
320 * core driver and then have helpers for stuff that is common(SMU_v11_x | SMU_v12_x funcs).
322 ret = smu_get_dpm_ultimate_freq(smu, clk_type, min, max);
326 mutex_unlock(&smu->mutex);
331 int smu_get_dpm_freq_by_index(struct smu_context *smu, enum smu_clk_type clk_type,
332 uint16_t level, uint32_t *value)
334 int ret = 0, clk_id = 0;
340 if (!smu_clk_dpm_is_enabled(smu, clk_type))
343 clk_id = smu_clk_get_index(smu, clk_type);
347 param = (uint32_t)(((clk_id & 0xffff) << 16) | (level & 0xffff));
349 ret = smu_send_smc_msg_with_param(smu, SMU_MSG_GetDpmFreqByIndex,
354 /* BIT31: 0 - Fine grained DPM, 1 - Dicrete DPM
355 * now, we un-support it */
356 *value = *value & 0x7fffffff;
361 int smu_get_dpm_level_count(struct smu_context *smu, enum smu_clk_type clk_type,
364 return smu_get_dpm_freq_by_index(smu, clk_type, 0xff, value);
367 int smu_get_dpm_level_range(struct smu_context *smu, enum smu_clk_type clk_type,
368 uint32_t *min_value, uint32_t *max_value)
371 uint32_t level_count = 0;
373 if (!min_value && !max_value)
377 /* by default, level 0 clock value as min value */
378 ret = smu_get_dpm_freq_by_index(smu, clk_type, 0, min_value);
384 ret = smu_get_dpm_level_count(smu, clk_type, &level_count);
388 ret = smu_get_dpm_freq_by_index(smu, clk_type, level_count - 1, max_value);
396 bool smu_clk_dpm_is_enabled(struct smu_context *smu, enum smu_clk_type clk_type)
398 enum smu_feature_mask feature_id = 0;
403 feature_id = SMU_FEATURE_DPM_UCLK_BIT;
407 feature_id = SMU_FEATURE_DPM_GFXCLK_BIT;
410 feature_id = SMU_FEATURE_DPM_SOCCLK_BIT;
416 if(!smu_feature_is_enabled(smu, feature_id)) {
424 * smu_dpm_set_power_gate - power gate/ungate the specific IP block
426 * @smu: smu_context pointer
427 * @block_type: the IP block to power gate/ungate
428 * @gate: to power gate if true, ungate otherwise
430 * This API uses no smu->mutex lock protection due to:
431 * 1. It is either called by other IP block(gfx/sdma/vcn/uvd/vce).
432 * This is guarded to be race condition free by the caller.
433 * 2. Or get called on user setting request of power_dpm_force_performance_level.
434 * Under this case, the smu->mutex lock protection is already enforced on
435 * the parent API smu_force_performance_level of the call path.
437 int smu_dpm_set_power_gate(struct smu_context *smu, uint32_t block_type,
440 struct amdgpu_device *adev = smu->adev;
443 if (!adev->pm.dpm_enabled)
446 switch (block_type) {
447 case AMD_IP_BLOCK_TYPE_UVD:
448 ret = smu_dpm_set_uvd_enable(smu, !gate);
450 case AMD_IP_BLOCK_TYPE_VCE:
451 ret = smu_dpm_set_vce_enable(smu, !gate);
453 case AMD_IP_BLOCK_TYPE_GFX:
454 ret = smu_gfx_off_control(smu, gate);
456 case AMD_IP_BLOCK_TYPE_SDMA:
457 ret = smu_powergate_sdma(smu, gate);
459 case AMD_IP_BLOCK_TYPE_JPEG:
460 ret = smu_dpm_set_jpeg_enable(smu, !gate);
469 int smu_get_power_num_states(struct smu_context *smu,
470 struct pp_states_info *state_info)
475 /* not support power state */
476 memset(state_info, 0, sizeof(struct pp_states_info));
477 state_info->nums = 1;
478 state_info->states[0] = POWER_STATE_TYPE_DEFAULT;
483 int smu_common_read_sensor(struct smu_context *smu, enum amd_pp_sensors sensor,
484 void *data, uint32_t *size)
486 struct smu_power_context *smu_power = &smu->smu_power;
487 struct smu_power_gate *power_gate = &smu_power->power_gate;
494 case AMDGPU_PP_SENSOR_STABLE_PSTATE_SCLK:
495 *((uint32_t *)data) = smu->pstate_sclk;
498 case AMDGPU_PP_SENSOR_STABLE_PSTATE_MCLK:
499 *((uint32_t *)data) = smu->pstate_mclk;
502 case AMDGPU_PP_SENSOR_ENABLED_SMC_FEATURES_MASK:
503 ret = smu_feature_get_enabled_mask(smu, (uint32_t *)data, 2);
506 case AMDGPU_PP_SENSOR_UVD_POWER:
507 *(uint32_t *)data = smu_feature_is_enabled(smu, SMU_FEATURE_DPM_UVD_BIT) ? 1 : 0;
510 case AMDGPU_PP_SENSOR_VCE_POWER:
511 *(uint32_t *)data = smu_feature_is_enabled(smu, SMU_FEATURE_DPM_VCE_BIT) ? 1 : 0;
514 case AMDGPU_PP_SENSOR_VCN_POWER_STATE:
515 *(uint32_t *)data = power_gate->vcn_gated ? 0 : 1;
529 int smu_update_table(struct smu_context *smu, enum smu_table_id table_index, int argument,
530 void *table_data, bool drv2smu)
532 struct smu_table_context *smu_table = &smu->smu_table;
533 struct amdgpu_device *adev = smu->adev;
534 struct smu_table *table = &smu_table->driver_table;
535 int table_id = smu_table_get_index(smu, table_index);
538 if (!table_data || table_id >= SMU_TABLE_COUNT || table_id < 0)
541 table_size = smu_table->tables[table_index].size;
544 memcpy(table->cpu_addr, table_data, table_size);
546 * Flush hdp cache: to guard the content seen by
547 * GPU is consitent with CPU.
549 amdgpu_asic_flush_hdp(adev, NULL);
552 ret = smu_send_smc_msg_with_param(smu, drv2smu ?
553 SMU_MSG_TransferTableDram2Smu :
554 SMU_MSG_TransferTableSmu2Dram,
555 table_id | ((argument & 0xFFFF) << 16),
561 amdgpu_asic_flush_hdp(adev, NULL);
562 memcpy(table_data, table->cpu_addr, table_size);
568 bool is_support_sw_smu(struct amdgpu_device *adev)
570 if (adev->asic_type == CHIP_VEGA20)
571 return (amdgpu_dpm == 2) ? true : false;
572 else if (adev->asic_type >= CHIP_ARCTURUS) {
573 if (amdgpu_sriov_is_pp_one_vf(adev) || !amdgpu_sriov_vf(adev))
579 bool is_support_sw_smu_xgmi(struct amdgpu_device *adev)
581 if (!is_support_sw_smu(adev))
584 if (adev->asic_type == CHIP_VEGA20)
590 int smu_sys_get_pp_table(struct smu_context *smu, void **table)
592 struct smu_table_context *smu_table = &smu->smu_table;
593 struct amdgpu_device *adev = smu->adev;
594 uint32_t powerplay_table_size;
596 if (!adev->pm.dpm_enabled)
599 if (!smu_table->power_play_table && !smu_table->hardcode_pptable)
602 mutex_lock(&smu->mutex);
604 if (smu_table->hardcode_pptable)
605 *table = smu_table->hardcode_pptable;
607 *table = smu_table->power_play_table;
609 powerplay_table_size = smu_table->power_play_table_size;
611 mutex_unlock(&smu->mutex);
613 return powerplay_table_size;
616 int smu_sys_set_pp_table(struct smu_context *smu, void *buf, size_t size)
618 struct smu_table_context *smu_table = &smu->smu_table;
619 struct amdgpu_device *adev = smu->adev;
620 ATOM_COMMON_TABLE_HEADER *header = (ATOM_COMMON_TABLE_HEADER *)buf;
623 if (!adev->pm.dpm_enabled)
626 if (header->usStructureSize != size) {
627 pr_err("pp table size not matched !\n");
631 mutex_lock(&smu->mutex);
632 if (!smu_table->hardcode_pptable)
633 smu_table->hardcode_pptable = kzalloc(size, GFP_KERNEL);
634 if (!smu_table->hardcode_pptable) {
639 memcpy(smu_table->hardcode_pptable, buf, size);
640 smu_table->power_play_table = smu_table->hardcode_pptable;
641 smu_table->power_play_table_size = size;
644 * Special hw_fini action(for Navi1x, the DPMs disablement will be
645 * skipped) may be needed for custom pptable uploading.
647 smu->uploading_custom_pp_table = true;
649 ret = smu_reset(smu);
651 pr_info("smu reset failed, ret = %d\n", ret);
653 smu->uploading_custom_pp_table = false;
656 mutex_unlock(&smu->mutex);
660 int smu_feature_init_dpm(struct smu_context *smu)
662 struct smu_feature *feature = &smu->smu_feature;
664 uint32_t allowed_feature_mask[SMU_FEATURE_MAX/32];
666 mutex_lock(&feature->mutex);
667 bitmap_zero(feature->allowed, SMU_FEATURE_MAX);
668 mutex_unlock(&feature->mutex);
670 ret = smu_get_allowed_feature_mask(smu, allowed_feature_mask,
675 mutex_lock(&feature->mutex);
676 bitmap_or(feature->allowed, feature->allowed,
677 (unsigned long *)allowed_feature_mask,
678 feature->feature_num);
679 mutex_unlock(&feature->mutex);
685 int smu_feature_is_enabled(struct smu_context *smu, enum smu_feature_mask mask)
687 struct smu_feature *feature = &smu->smu_feature;
693 feature_id = smu_feature_get_index(smu, mask);
697 WARN_ON(feature_id > feature->feature_num);
699 mutex_lock(&feature->mutex);
700 ret = test_bit(feature_id, feature->enabled);
701 mutex_unlock(&feature->mutex);
706 int smu_feature_set_enabled(struct smu_context *smu, enum smu_feature_mask mask,
709 struct smu_feature *feature = &smu->smu_feature;
712 feature_id = smu_feature_get_index(smu, mask);
716 WARN_ON(feature_id > feature->feature_num);
718 return smu_feature_update_enable_state(smu,
723 int smu_feature_is_supported(struct smu_context *smu, enum smu_feature_mask mask)
725 struct smu_feature *feature = &smu->smu_feature;
729 feature_id = smu_feature_get_index(smu, mask);
733 WARN_ON(feature_id > feature->feature_num);
735 mutex_lock(&feature->mutex);
736 ret = test_bit(feature_id, feature->supported);
737 mutex_unlock(&feature->mutex);
742 int smu_feature_set_supported(struct smu_context *smu,
743 enum smu_feature_mask mask,
746 struct smu_feature *feature = &smu->smu_feature;
750 feature_id = smu_feature_get_index(smu, mask);
754 WARN_ON(feature_id > feature->feature_num);
756 mutex_lock(&feature->mutex);
758 test_and_set_bit(feature_id, feature->supported);
760 test_and_clear_bit(feature_id, feature->supported);
761 mutex_unlock(&feature->mutex);
766 static int smu_set_funcs(struct amdgpu_device *adev)
768 struct smu_context *smu = &adev->smu;
770 if (adev->pm.pp_feature & PP_OVERDRIVE_MASK)
771 smu->od_enabled = true;
773 switch (adev->asic_type) {
775 adev->pm.pp_feature &= ~PP_GFXOFF_MASK;
776 vega20_set_ppt_funcs(smu);
781 navi10_set_ppt_funcs(smu);
784 adev->pm.pp_feature &= ~PP_GFXOFF_MASK;
785 arcturus_set_ppt_funcs(smu);
786 /* OD is not supported on Arcturus */
787 smu->od_enabled =false;
790 renoir_set_ppt_funcs(smu);
799 static int smu_early_init(void *handle)
801 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
802 struct smu_context *smu = &adev->smu;
805 smu->pm_enabled = !!amdgpu_dpm;
807 mutex_init(&smu->mutex);
809 return smu_set_funcs(adev);
812 static int smu_late_init(void *handle)
814 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
815 struct smu_context *smu = &adev->smu;
817 if (!smu->pm_enabled)
820 smu_handle_task(&adev->smu,
821 smu->smu_dpm.dpm_level,
822 AMD_PP_TASK_COMPLETE_INIT,
828 int smu_get_atom_data_table(struct smu_context *smu, uint32_t table,
829 uint16_t *size, uint8_t *frev, uint8_t *crev,
832 struct amdgpu_device *adev = smu->adev;
835 if (!amdgpu_atom_parse_data_header(adev->mode_info.atom_context, table,
836 size, frev, crev, &data_start))
839 *addr = (uint8_t *)adev->mode_info.atom_context->bios + data_start;
844 static int smu_initialize_pptable(struct smu_context *smu)
850 static int smu_smc_table_sw_init(struct smu_context *smu)
854 ret = smu_initialize_pptable(smu);
856 pr_err("Failed to init smu_initialize_pptable!\n");
861 * Create smu_table structure, and init smc tables such as
862 * TABLE_PPTABLE, TABLE_WATERMARKS, TABLE_SMU_METRICS, and etc.
864 ret = smu_init_smc_tables(smu);
866 pr_err("Failed to init smc tables!\n");
871 * Create smu_power_context structure, and allocate smu_dpm_context and
872 * context size to fill the smu_power_context data.
874 ret = smu_init_power(smu);
876 pr_err("Failed to init smu_init_power!\n");
883 static int smu_smc_table_sw_fini(struct smu_context *smu)
887 ret = smu_fini_smc_tables(smu);
889 pr_err("Failed to smu_fini_smc_tables!\n");
896 static int smu_sw_init(void *handle)
898 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
899 struct smu_context *smu = &adev->smu;
902 smu->pool_size = adev->pm.smu_prv_buffer_size;
903 smu->smu_feature.feature_num = SMU_FEATURE_MAX;
904 mutex_init(&smu->smu_feature.mutex);
905 bitmap_zero(smu->smu_feature.supported, SMU_FEATURE_MAX);
906 bitmap_zero(smu->smu_feature.enabled, SMU_FEATURE_MAX);
907 bitmap_zero(smu->smu_feature.allowed, SMU_FEATURE_MAX);
909 mutex_init(&smu->smu_baco.mutex);
910 smu->smu_baco.state = SMU_BACO_STATE_EXIT;
911 smu->smu_baco.platform_support = false;
913 mutex_init(&smu->sensor_lock);
914 mutex_init(&smu->metrics_lock);
915 mutex_init(&smu->message_lock);
917 smu->watermarks_bitmap = 0;
918 smu->power_profile_mode = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT;
919 smu->default_power_profile_mode = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT;
921 smu->workload_mask = 1 << smu->workload_prority[PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT];
922 smu->workload_prority[PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT] = 0;
923 smu->workload_prority[PP_SMC_POWER_PROFILE_FULLSCREEN3D] = 1;
924 smu->workload_prority[PP_SMC_POWER_PROFILE_POWERSAVING] = 2;
925 smu->workload_prority[PP_SMC_POWER_PROFILE_VIDEO] = 3;
926 smu->workload_prority[PP_SMC_POWER_PROFILE_VR] = 4;
927 smu->workload_prority[PP_SMC_POWER_PROFILE_COMPUTE] = 5;
928 smu->workload_prority[PP_SMC_POWER_PROFILE_CUSTOM] = 6;
930 smu->workload_setting[0] = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT;
931 smu->workload_setting[1] = PP_SMC_POWER_PROFILE_FULLSCREEN3D;
932 smu->workload_setting[2] = PP_SMC_POWER_PROFILE_POWERSAVING;
933 smu->workload_setting[3] = PP_SMC_POWER_PROFILE_VIDEO;
934 smu->workload_setting[4] = PP_SMC_POWER_PROFILE_VR;
935 smu->workload_setting[5] = PP_SMC_POWER_PROFILE_COMPUTE;
936 smu->workload_setting[6] = PP_SMC_POWER_PROFILE_CUSTOM;
937 smu->display_config = &adev->pm.pm_display_cfg;
939 smu->smu_dpm.dpm_level = AMD_DPM_FORCED_LEVEL_AUTO;
940 smu->smu_dpm.requested_dpm_level = AMD_DPM_FORCED_LEVEL_AUTO;
941 ret = smu_init_microcode(smu);
943 pr_err("Failed to load smu firmware!\n");
947 ret = smu_smc_table_sw_init(smu);
949 pr_err("Failed to sw init smc table!\n");
953 ret = smu_register_irq_handler(smu);
955 pr_err("Failed to register smc irq handler!\n");
962 static int smu_sw_fini(void *handle)
964 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
965 struct smu_context *smu = &adev->smu;
968 kfree(smu->irq_source);
969 smu->irq_source = NULL;
971 ret = smu_smc_table_sw_fini(smu);
973 pr_err("Failed to sw fini smc table!\n");
977 ret = smu_fini_power(smu);
979 pr_err("Failed to init smu_fini_power!\n");
986 static int smu_init_fb_allocations(struct smu_context *smu)
988 struct amdgpu_device *adev = smu->adev;
989 struct smu_table_context *smu_table = &smu->smu_table;
990 struct smu_table *tables = smu_table->tables;
991 struct smu_table *driver_table = &(smu_table->driver_table);
992 uint32_t max_table_size = 0;
995 /* VRAM allocation for tool table */
996 if (tables[SMU_TABLE_PMSTATUSLOG].size) {
997 ret = amdgpu_bo_create_kernel(adev,
998 tables[SMU_TABLE_PMSTATUSLOG].size,
999 tables[SMU_TABLE_PMSTATUSLOG].align,
1000 tables[SMU_TABLE_PMSTATUSLOG].domain,
1001 &tables[SMU_TABLE_PMSTATUSLOG].bo,
1002 &tables[SMU_TABLE_PMSTATUSLOG].mc_address,
1003 &tables[SMU_TABLE_PMSTATUSLOG].cpu_addr);
1005 pr_err("VRAM allocation for tool table failed!\n");
1010 /* VRAM allocation for driver table */
1011 for (i = 0; i < SMU_TABLE_COUNT; i++) {
1012 if (tables[i].size == 0)
1015 if (i == SMU_TABLE_PMSTATUSLOG)
1018 if (max_table_size < tables[i].size)
1019 max_table_size = tables[i].size;
1022 driver_table->size = max_table_size;
1023 driver_table->align = PAGE_SIZE;
1024 driver_table->domain = AMDGPU_GEM_DOMAIN_VRAM;
1026 ret = amdgpu_bo_create_kernel(adev,
1028 driver_table->align,
1029 driver_table->domain,
1031 &driver_table->mc_address,
1032 &driver_table->cpu_addr);
1034 pr_err("VRAM allocation for driver table failed!\n");
1035 if (tables[SMU_TABLE_PMSTATUSLOG].mc_address)
1036 amdgpu_bo_free_kernel(&tables[SMU_TABLE_PMSTATUSLOG].bo,
1037 &tables[SMU_TABLE_PMSTATUSLOG].mc_address,
1038 &tables[SMU_TABLE_PMSTATUSLOG].cpu_addr);
1044 static int smu_fini_fb_allocations(struct smu_context *smu)
1046 struct smu_table_context *smu_table = &smu->smu_table;
1047 struct smu_table *tables = smu_table->tables;
1048 struct smu_table *driver_table = &(smu_table->driver_table);
1053 if (tables[SMU_TABLE_PMSTATUSLOG].mc_address)
1054 amdgpu_bo_free_kernel(&tables[SMU_TABLE_PMSTATUSLOG].bo,
1055 &tables[SMU_TABLE_PMSTATUSLOG].mc_address,
1056 &tables[SMU_TABLE_PMSTATUSLOG].cpu_addr);
1058 amdgpu_bo_free_kernel(&driver_table->bo,
1059 &driver_table->mc_address,
1060 &driver_table->cpu_addr);
1065 static int smu_smc_table_hw_init(struct smu_context *smu,
1068 struct amdgpu_device *adev = smu->adev;
1071 if (smu_is_dpm_running(smu) && adev->in_suspend) {
1072 pr_info("dpm has been enabled\n");
1076 if (adev->asic_type != CHIP_ARCTURUS) {
1077 ret = smu_init_display_count(smu, 0);
1083 /* get boot_values from vbios to set revision, gfxclk, and etc. */
1084 ret = smu_get_vbios_bootup_values(smu);
1088 ret = smu_setup_pptable(smu);
1092 ret = smu_get_clk_info_from_vbios(smu);
1097 * check if the format_revision in vbios is up to pptable header
1098 * version, and the structure size is not 0.
1100 ret = smu_check_pptable(smu);
1105 * allocate vram bos to store smc table contents.
1107 ret = smu_init_fb_allocations(smu);
1112 * Parse pptable format and fill PPTable_t smc_pptable to
1113 * smu_table_context structure. And read the smc_dpm_table from vbios,
1114 * then fill it into smc_pptable.
1116 ret = smu_parse_pptable(smu);
1121 * Send msg GetDriverIfVersion to check if the return value is equal
1122 * with DRIVER_IF_VERSION of smc header.
1124 ret = smu_check_fw_version(smu);
1129 ret = smu_set_driver_table_location(smu);
1133 /* smu_dump_pptable(smu); */
1134 if (!amdgpu_sriov_vf(adev)) {
1136 * Copy pptable bo in the vram to smc with SMU MSGs such as
1137 * SetDriverDramAddr and TransferTableDram2Smu.
1139 ret = smu_write_pptable(smu);
1143 /* issue Run*Btc msg */
1144 ret = smu_run_btc(smu);
1147 ret = smu_feature_set_allowed_mask(smu);
1151 ret = smu_system_features_control(smu, true);
1155 if (adev->asic_type == CHIP_NAVI10) {
1156 if ((adev->pdev->device == 0x731f && (adev->pdev->revision == 0xc2 ||
1157 adev->pdev->revision == 0xc3 ||
1158 adev->pdev->revision == 0xca ||
1159 adev->pdev->revision == 0xcb)) ||
1160 (adev->pdev->device == 0x66af && (adev->pdev->revision == 0xf3 ||
1161 adev->pdev->revision == 0xf4 ||
1162 adev->pdev->revision == 0xf5 ||
1163 adev->pdev->revision == 0xf6))) {
1164 ret = smu_disable_umc_cdr_12gbps_workaround(smu);
1166 pr_err("Workaround failed to disable UMC CDR feature on 12Gbps SKU!\n");
1172 if (smu->ppt_funcs->set_power_source) {
1174 * For Navi1X, manually switch it to AC mode as PMFW
1175 * may boot it with DC mode.
1177 if (adev->pm.ac_power)
1178 ret = smu_set_power_source(smu, SMU_POWER_SOURCE_AC);
1180 ret = smu_set_power_source(smu, SMU_POWER_SOURCE_DC);
1182 pr_err("Failed to switch to %s mode!\n", adev->pm.ac_power ? "AC" : "DC");
1187 if (adev->asic_type != CHIP_ARCTURUS) {
1188 ret = smu_notify_display_change(smu);
1193 * Set min deep sleep dce fclk with bootup value from vbios via
1194 * SetMinDeepSleepDcefclk MSG.
1196 ret = smu_set_min_dcef_deep_sleep(smu);
1202 * Set initialized values (get from vbios) to dpm tables context such as
1203 * gfxclk, memclk, dcefclk, and etc. And enable the DPM feature for each
1207 ret = smu_populate_smc_tables(smu);
1211 ret = smu_init_max_sustainable_clocks(smu);
1216 if (adev->asic_type != CHIP_ARCTURUS) {
1217 ret = smu_override_pcie_parameters(smu);
1222 ret = smu_set_default_od_settings(smu, initialize);
1227 ret = smu_populate_umd_state_clk(smu);
1231 ret = smu_get_power_limit(smu, &smu->default_power_limit, false, false);
1237 * Set PMSTATUSLOG table bo address with SetToolsDramAddr MSG for tools.
1239 if (!amdgpu_sriov_vf(adev)) {
1240 ret = smu_set_tool_table_location(smu);
1242 if (!smu_is_dpm_running(smu))
1243 pr_info("dpm has been disabled\n");
1249 * smu_alloc_memory_pool - allocate memory pool in the system memory
1251 * @smu: amdgpu_device pointer
1253 * This memory pool will be used for SMC use and msg SetSystemVirtualDramAddr
1254 * and DramLogSetDramAddr can notify it changed.
1256 * Returns 0 on success, error on failure.
1258 static int smu_alloc_memory_pool(struct smu_context *smu)
1260 struct amdgpu_device *adev = smu->adev;
1261 struct smu_table_context *smu_table = &smu->smu_table;
1262 struct smu_table *memory_pool = &smu_table->memory_pool;
1263 uint64_t pool_size = smu->pool_size;
1266 if (pool_size == SMU_MEMORY_POOL_SIZE_ZERO)
1269 memory_pool->size = pool_size;
1270 memory_pool->align = PAGE_SIZE;
1271 memory_pool->domain = AMDGPU_GEM_DOMAIN_GTT;
1273 switch (pool_size) {
1274 case SMU_MEMORY_POOL_SIZE_256_MB:
1275 case SMU_MEMORY_POOL_SIZE_512_MB:
1276 case SMU_MEMORY_POOL_SIZE_1_GB:
1277 case SMU_MEMORY_POOL_SIZE_2_GB:
1278 ret = amdgpu_bo_create_kernel(adev,
1281 memory_pool->domain,
1283 &memory_pool->mc_address,
1284 &memory_pool->cpu_addr);
1293 static int smu_free_memory_pool(struct smu_context *smu)
1295 struct smu_table_context *smu_table = &smu->smu_table;
1296 struct smu_table *memory_pool = &smu_table->memory_pool;
1298 if (memory_pool->size == SMU_MEMORY_POOL_SIZE_ZERO)
1301 amdgpu_bo_free_kernel(&memory_pool->bo,
1302 &memory_pool->mc_address,
1303 &memory_pool->cpu_addr);
1305 memset(memory_pool, 0, sizeof(struct smu_table));
1310 static int smu_start_smc_engine(struct smu_context *smu)
1312 struct amdgpu_device *adev = smu->adev;
1315 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
1316 if (adev->asic_type < CHIP_NAVI10) {
1317 if (smu->ppt_funcs->load_microcode) {
1318 ret = smu->ppt_funcs->load_microcode(smu);
1325 if (smu->ppt_funcs->check_fw_status) {
1326 ret = smu->ppt_funcs->check_fw_status(smu);
1328 pr_err("SMC is not ready\n");
1334 static int smu_hw_init(void *handle)
1337 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1338 struct smu_context *smu = &adev->smu;
1340 if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev))
1343 ret = smu_start_smc_engine(smu);
1345 pr_err("SMU is not ready yet!\n");
1350 smu_powergate_sdma(&adev->smu, false);
1351 smu_powergate_vcn(&adev->smu, false);
1352 smu_powergate_jpeg(&adev->smu, false);
1353 smu_set_gfx_cgpg(&adev->smu, true);
1356 if (!smu->pm_enabled)
1359 ret = smu_feature_init_dpm(smu);
1363 ret = smu_smc_table_hw_init(smu, true);
1367 ret = smu_alloc_memory_pool(smu);
1372 * Use msg SetSystemVirtualDramAddr and DramLogSetDramAddr can notify
1375 ret = smu_notify_memory_pool_location(smu);
1379 ret = smu_start_thermal_control(smu);
1383 ret = smu_i2c_eeprom_init(smu, &adev->pm.smu_i2c);
1387 adev->pm.dpm_enabled = true;
1389 pr_info("SMU is initialized successfully!\n");
1397 static int smu_stop_dpms(struct smu_context *smu)
1399 if (amdgpu_sriov_vf(smu->adev))
1402 return smu_system_features_control(smu, false);
1405 static int smu_hw_fini(void *handle)
1407 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1408 struct smu_context *smu = &adev->smu;
1409 struct smu_table_context *table_context = &smu->smu_table;
1412 if (amdgpu_sriov_vf(adev)&& !amdgpu_sriov_is_pp_one_vf(adev))
1416 smu_powergate_sdma(&adev->smu, true);
1417 smu_powergate_vcn(&adev->smu, true);
1418 smu_powergate_jpeg(&adev->smu, true);
1421 if (!smu->pm_enabled)
1424 adev->pm.dpm_enabled = false;
1426 smu_i2c_eeprom_fini(smu, &adev->pm.smu_i2c);
1428 if (!amdgpu_sriov_vf(adev)){
1429 ret = smu_stop_thermal_control(smu);
1431 pr_warn("Fail to stop thermal control!\n");
1436 * For custom pptable uploading, skip the DPM features
1437 * disable process on Navi1x ASICs.
1438 * - As the gfx related features are under control of
1439 * RLC on those ASICs. RLC reinitialization will be
1440 * needed to reenable them. That will cost much more
1443 * - SMU firmware can handle the DPM reenablement
1446 if (!smu->uploading_custom_pp_table ||
1447 !((adev->asic_type >= CHIP_NAVI10) &&
1448 (adev->asic_type <= CHIP_NAVI12))) {
1449 ret = smu_stop_dpms(smu);
1451 pr_warn("Fail to stop Dpms!\n");
1457 kfree(table_context->driver_pptable);
1458 table_context->driver_pptable = NULL;
1460 kfree(table_context->max_sustainable_clocks);
1461 table_context->max_sustainable_clocks = NULL;
1463 kfree(table_context->overdrive_table);
1464 table_context->overdrive_table = NULL;
1466 ret = smu_fini_fb_allocations(smu);
1470 ret = smu_free_memory_pool(smu);
1477 int smu_reset(struct smu_context *smu)
1479 struct amdgpu_device *adev = smu->adev;
1482 ret = smu_hw_fini(adev);
1486 ret = smu_hw_init(adev);
1493 static int smu_disable_dpm(struct smu_context *smu)
1495 struct amdgpu_device *adev = smu->adev;
1496 uint32_t smu_version;
1498 bool use_baco = !smu->is_apu &&
1499 ((adev->in_gpu_reset &&
1500 (amdgpu_asic_reset_method(adev) == AMD_RESET_METHOD_BACO)) ||
1501 ((adev->in_runpm || adev->in_hibernate) && amdgpu_asic_supports_baco(adev)));
1503 ret = smu_get_smc_version(smu, NULL, &smu_version);
1505 pr_err("Failed to get smu version.\n");
1510 * Disable all enabled SMU features.
1511 * This should be handled in SMU FW, as a backup
1512 * driver can issue call to SMU FW until sequence
1513 * in SMU FW is operational.
1515 ret = smu_system_features_control(smu, false);
1517 pr_err("Failed to disable smu features.\n");
1522 * Arcturus does not have BACO bit in disable feature mask.
1523 * Enablement of BACO bit on Arcturus should be skipped.
1525 if (adev->asic_type == CHIP_ARCTURUS) {
1526 if (use_baco && (smu_version > 0x360e00))
1530 /* For baco, need to leave BACO feature enabled */
1533 * Correct the way for checking whether SMU_FEATURE_BACO_BIT
1536 * Since 'smu_feature_is_enabled(smu, SMU_FEATURE_BACO_BIT)' will
1537 * always return false as the 'smu_system_features_control(smu, false)'
1538 * was just issued above which disabled all SMU features.
1540 * Thus 'smu_feature_get_index(smu, SMU_FEATURE_BACO_BIT)' is used
1541 * now for the checking.
1543 if (smu_feature_get_index(smu, SMU_FEATURE_BACO_BIT) >= 0) {
1544 ret = smu_feature_set_enabled(smu, SMU_FEATURE_BACO_BIT, true);
1546 pr_warn("set BACO feature enabled failed, return %d\n", ret);
1555 static int smu_suspend(void *handle)
1557 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1558 struct smu_context *smu = &adev->smu;
1561 if (amdgpu_sriov_vf(adev)&& !amdgpu_sriov_is_pp_one_vf(adev))
1564 if (!smu->pm_enabled)
1567 adev->pm.dpm_enabled = false;
1569 smu_i2c_eeprom_fini(smu, &adev->pm.smu_i2c);
1571 if(!amdgpu_sriov_vf(adev)) {
1572 ret = smu_disable_dpm(smu);
1577 smu->watermarks_bitmap &= ~(WATERMARKS_LOADED);
1579 if (adev->asic_type >= CHIP_NAVI10 &&
1580 adev->gfx.rlc.funcs->stop)
1581 adev->gfx.rlc.funcs->stop(adev);
1583 smu_set_gfx_cgpg(&adev->smu, false);
1588 static int smu_resume(void *handle)
1591 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1592 struct smu_context *smu = &adev->smu;
1594 if (amdgpu_sriov_vf(adev)&& !amdgpu_sriov_is_pp_one_vf(adev))
1597 if (!smu->pm_enabled)
1600 pr_info("SMU is resuming...\n");
1602 ret = smu_start_smc_engine(smu);
1604 pr_err("SMU is not ready yet!\n");
1608 ret = smu_smc_table_hw_init(smu, false);
1612 ret = smu_start_thermal_control(smu);
1616 ret = smu_i2c_eeprom_init(smu, &adev->pm.smu_i2c);
1621 smu_set_gfx_cgpg(&adev->smu, true);
1623 smu->disable_uclk_switch = 0;
1625 adev->pm.dpm_enabled = true;
1627 pr_info("SMU is resumed successfully!\n");
1635 int smu_display_configuration_change(struct smu_context *smu,
1636 const struct amd_pp_display_configuration *display_config)
1638 struct amdgpu_device *adev = smu->adev;
1640 int num_of_active_display = 0;
1642 if (!adev->pm.dpm_enabled)
1645 if (!is_support_sw_smu(smu->adev))
1648 if (!display_config)
1651 mutex_lock(&smu->mutex);
1653 if (smu->ppt_funcs->set_deep_sleep_dcefclk)
1654 smu->ppt_funcs->set_deep_sleep_dcefclk(smu,
1655 display_config->min_dcef_deep_sleep_set_clk / 100);
1657 for (index = 0; index < display_config->num_path_including_non_display; index++) {
1658 if (display_config->displays[index].controller_id != 0)
1659 num_of_active_display++;
1662 smu_set_active_display_count(smu, num_of_active_display);
1664 smu_store_cc6_data(smu, display_config->cpu_pstate_separation_time,
1665 display_config->cpu_cc6_disable,
1666 display_config->cpu_pstate_disable,
1667 display_config->nb_pstate_switch_disable);
1669 mutex_unlock(&smu->mutex);
1674 static int smu_get_clock_info(struct smu_context *smu,
1675 struct smu_clock_info *clk_info,
1676 enum smu_perf_level_designation designation)
1679 struct smu_performance_level level = {0};
1684 ret = smu_get_perf_level(smu, PERF_LEVEL_ACTIVITY, &level);
1688 clk_info->min_mem_clk = level.memory_clock;
1689 clk_info->min_eng_clk = level.core_clock;
1690 clk_info->min_bus_bandwidth = level.non_local_mem_freq * level.non_local_mem_width;
1692 ret = smu_get_perf_level(smu, designation, &level);
1696 clk_info->min_mem_clk = level.memory_clock;
1697 clk_info->min_eng_clk = level.core_clock;
1698 clk_info->min_bus_bandwidth = level.non_local_mem_freq * level.non_local_mem_width;
1703 int smu_get_current_clocks(struct smu_context *smu,
1704 struct amd_pp_clock_info *clocks)
1706 struct amd_pp_simple_clock_info simple_clocks = {0};
1707 struct amdgpu_device *adev = smu->adev;
1708 struct smu_clock_info hw_clocks;
1711 if (!is_support_sw_smu(smu->adev))
1714 if (!adev->pm.dpm_enabled)
1717 mutex_lock(&smu->mutex);
1719 smu_get_dal_power_level(smu, &simple_clocks);
1721 if (smu->support_power_containment)
1722 ret = smu_get_clock_info(smu, &hw_clocks,
1723 PERF_LEVEL_POWER_CONTAINMENT);
1725 ret = smu_get_clock_info(smu, &hw_clocks, PERF_LEVEL_ACTIVITY);
1728 pr_err("Error in smu_get_clock_info\n");
1732 clocks->min_engine_clock = hw_clocks.min_eng_clk;
1733 clocks->max_engine_clock = hw_clocks.max_eng_clk;
1734 clocks->min_memory_clock = hw_clocks.min_mem_clk;
1735 clocks->max_memory_clock = hw_clocks.max_mem_clk;
1736 clocks->min_bus_bandwidth = hw_clocks.min_bus_bandwidth;
1737 clocks->max_bus_bandwidth = hw_clocks.max_bus_bandwidth;
1738 clocks->max_engine_clock_in_sr = hw_clocks.max_eng_clk;
1739 clocks->min_engine_clock_in_sr = hw_clocks.min_eng_clk;
1741 if (simple_clocks.level == 0)
1742 clocks->max_clocks_state = PP_DAL_POWERLEVEL_7;
1744 clocks->max_clocks_state = simple_clocks.level;
1746 if (!smu_get_current_shallow_sleep_clocks(smu, &hw_clocks)) {
1747 clocks->max_engine_clock_in_sr = hw_clocks.max_eng_clk;
1748 clocks->min_engine_clock_in_sr = hw_clocks.min_eng_clk;
1752 mutex_unlock(&smu->mutex);
1756 static int smu_set_clockgating_state(void *handle,
1757 enum amd_clockgating_state state)
1762 static int smu_set_powergating_state(void *handle,
1763 enum amd_powergating_state state)
1768 static int smu_enable_umd_pstate(void *handle,
1769 enum amd_dpm_forced_level *level)
1771 uint32_t profile_mode_mask = AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD |
1772 AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK |
1773 AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK |
1774 AMD_DPM_FORCED_LEVEL_PROFILE_PEAK;
1776 struct smu_context *smu = (struct smu_context*)(handle);
1777 struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
1779 if (!smu->is_apu && !smu_dpm_ctx->dpm_context)
1782 if (!(smu_dpm_ctx->dpm_level & profile_mode_mask)) {
1783 /* enter umd pstate, save current level, disable gfx cg*/
1784 if (*level & profile_mode_mask) {
1785 smu_dpm_ctx->saved_dpm_level = smu_dpm_ctx->dpm_level;
1786 smu_dpm_ctx->enable_umd_pstate = true;
1787 amdgpu_device_ip_set_powergating_state(smu->adev,
1788 AMD_IP_BLOCK_TYPE_GFX,
1789 AMD_PG_STATE_UNGATE);
1790 amdgpu_device_ip_set_clockgating_state(smu->adev,
1791 AMD_IP_BLOCK_TYPE_GFX,
1792 AMD_CG_STATE_UNGATE);
1795 /* exit umd pstate, restore level, enable gfx cg*/
1796 if (!(*level & profile_mode_mask)) {
1797 if (*level == AMD_DPM_FORCED_LEVEL_PROFILE_EXIT)
1798 *level = smu_dpm_ctx->saved_dpm_level;
1799 smu_dpm_ctx->enable_umd_pstate = false;
1800 amdgpu_device_ip_set_clockgating_state(smu->adev,
1801 AMD_IP_BLOCK_TYPE_GFX,
1803 amdgpu_device_ip_set_powergating_state(smu->adev,
1804 AMD_IP_BLOCK_TYPE_GFX,
1812 int smu_adjust_power_state_dynamic(struct smu_context *smu,
1813 enum amd_dpm_forced_level level,
1814 bool skip_display_settings)
1819 struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
1821 if (!skip_display_settings) {
1822 ret = smu_display_config_changed(smu);
1824 pr_err("Failed to change display config!");
1829 ret = smu_apply_clocks_adjust_rules(smu);
1831 pr_err("Failed to apply clocks adjust rules!");
1835 if (!skip_display_settings) {
1836 ret = smu_notify_smc_display_config(smu);
1838 pr_err("Failed to notify smc display config!");
1843 if (smu_dpm_ctx->dpm_level != level) {
1844 ret = smu_asic_set_performance_level(smu, level);
1846 pr_err("Failed to set performance level!");
1850 /* update the saved copy */
1851 smu_dpm_ctx->dpm_level = level;
1854 if (smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL) {
1855 index = fls(smu->workload_mask);
1856 index = index > 0 && index <= WORKLOAD_POLICY_MAX ? index - 1 : 0;
1857 workload = smu->workload_setting[index];
1859 if (smu->power_profile_mode != workload)
1860 smu_set_power_profile_mode(smu, &workload, 0, false);
1866 int smu_handle_task(struct smu_context *smu,
1867 enum amd_dpm_forced_level level,
1868 enum amd_pp_task task_id,
1871 struct amdgpu_device *adev = smu->adev;
1874 if (!adev->pm.dpm_enabled)
1878 mutex_lock(&smu->mutex);
1881 case AMD_PP_TASK_DISPLAY_CONFIG_CHANGE:
1882 ret = smu_pre_display_config_changed(smu);
1885 ret = smu_set_cpu_power_state(smu);
1888 ret = smu_adjust_power_state_dynamic(smu, level, false);
1890 case AMD_PP_TASK_COMPLETE_INIT:
1891 case AMD_PP_TASK_READJUST_POWER_STATE:
1892 ret = smu_adjust_power_state_dynamic(smu, level, true);
1900 mutex_unlock(&smu->mutex);
1905 int smu_switch_power_profile(struct smu_context *smu,
1906 enum PP_SMC_POWER_PROFILE type,
1909 struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
1910 struct amdgpu_device *adev = smu->adev;
1914 if (!adev->pm.dpm_enabled)
1917 if (!(type < PP_SMC_POWER_PROFILE_CUSTOM))
1920 mutex_lock(&smu->mutex);
1923 smu->workload_mask &= ~(1 << smu->workload_prority[type]);
1924 index = fls(smu->workload_mask);
1925 index = index > 0 && index <= WORKLOAD_POLICY_MAX ? index - 1 : 0;
1926 workload = smu->workload_setting[index];
1928 smu->workload_mask |= (1 << smu->workload_prority[type]);
1929 index = fls(smu->workload_mask);
1930 index = index <= WORKLOAD_POLICY_MAX ? index - 1 : 0;
1931 workload = smu->workload_setting[index];
1934 if (smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL)
1935 smu_set_power_profile_mode(smu, &workload, 0, false);
1937 mutex_unlock(&smu->mutex);
1942 enum amd_dpm_forced_level smu_get_performance_level(struct smu_context *smu)
1944 struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
1945 struct amdgpu_device *adev = smu->adev;
1946 enum amd_dpm_forced_level level;
1948 if (!adev->pm.dpm_enabled)
1951 if (!smu->is_apu && !smu_dpm_ctx->dpm_context)
1954 mutex_lock(&(smu->mutex));
1955 level = smu_dpm_ctx->dpm_level;
1956 mutex_unlock(&(smu->mutex));
1961 int smu_force_performance_level(struct smu_context *smu, enum amd_dpm_forced_level level)
1963 struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
1964 struct amdgpu_device *adev = smu->adev;
1967 if (!adev->pm.dpm_enabled)
1970 if (!smu->is_apu && !smu_dpm_ctx->dpm_context)
1973 mutex_lock(&smu->mutex);
1975 ret = smu_enable_umd_pstate(smu, &level);
1977 mutex_unlock(&smu->mutex);
1981 ret = smu_handle_task(smu, level,
1982 AMD_PP_TASK_READJUST_POWER_STATE,
1985 mutex_unlock(&smu->mutex);
1990 int smu_set_display_count(struct smu_context *smu, uint32_t count)
1992 struct amdgpu_device *adev = smu->adev;
1995 if (!adev->pm.dpm_enabled)
1998 mutex_lock(&smu->mutex);
1999 ret = smu_init_display_count(smu, count);
2000 mutex_unlock(&smu->mutex);
2005 int smu_force_clk_levels(struct smu_context *smu,
2006 enum smu_clk_type clk_type,
2010 struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
2011 struct amdgpu_device *adev = smu->adev;
2014 if (!adev->pm.dpm_enabled)
2017 if (smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL) {
2018 pr_debug("force clock level is for dpm manual mode only.\n");
2023 mutex_lock(&smu->mutex);
2025 if (smu->ppt_funcs && smu->ppt_funcs->force_clk_levels)
2026 ret = smu->ppt_funcs->force_clk_levels(smu, clk_type, mask);
2029 mutex_unlock(&smu->mutex);
2035 * On system suspending or resetting, the dpm_enabled
2036 * flag will be cleared. So that those SMU services which
2037 * are not supported will be gated.
2038 * However, the mp1 state setting should still be granted
2039 * even if the dpm_enabled cleared.
2041 int smu_set_mp1_state(struct smu_context *smu,
2042 enum pp_mp1_state mp1_state)
2047 mutex_lock(&smu->mutex);
2049 switch (mp1_state) {
2050 case PP_MP1_STATE_SHUTDOWN:
2051 msg = SMU_MSG_PrepareMp1ForShutdown;
2053 case PP_MP1_STATE_UNLOAD:
2054 msg = SMU_MSG_PrepareMp1ForUnload;
2056 case PP_MP1_STATE_RESET:
2057 msg = SMU_MSG_PrepareMp1ForReset;
2059 case PP_MP1_STATE_NONE:
2061 mutex_unlock(&smu->mutex);
2065 /* some asics may not support those messages */
2066 if (smu_msg_get_index(smu, msg) < 0) {
2067 mutex_unlock(&smu->mutex);
2071 ret = smu_send_smc_msg(smu, msg, NULL);
2073 pr_err("[PrepareMp1] Failed!\n");
2075 mutex_unlock(&smu->mutex);
2080 int smu_set_df_cstate(struct smu_context *smu,
2081 enum pp_df_cstate state)
2083 struct amdgpu_device *adev = smu->adev;
2086 if (!adev->pm.dpm_enabled)
2089 if (!smu->ppt_funcs || !smu->ppt_funcs->set_df_cstate)
2092 mutex_lock(&smu->mutex);
2094 ret = smu->ppt_funcs->set_df_cstate(smu, state);
2096 pr_err("[SetDfCstate] failed!\n");
2098 mutex_unlock(&smu->mutex);
2103 int smu_allow_xgmi_power_down(struct smu_context *smu, bool en)
2105 struct amdgpu_device *adev = smu->adev;
2108 if (!adev->pm.dpm_enabled)
2111 if (!smu->ppt_funcs || !smu->ppt_funcs->allow_xgmi_power_down)
2114 mutex_lock(&smu->mutex);
2116 ret = smu->ppt_funcs->allow_xgmi_power_down(smu, en);
2118 pr_err("[AllowXgmiPowerDown] failed!\n");
2120 mutex_unlock(&smu->mutex);
2125 int smu_write_watermarks_table(struct smu_context *smu)
2127 void *watermarks_table = smu->smu_table.watermarks_table;
2129 if (!watermarks_table)
2132 return smu_update_table(smu,
2133 SMU_TABLE_WATERMARKS,
2139 int smu_set_watermarks_for_clock_ranges(struct smu_context *smu,
2140 struct dm_pp_wm_sets_with_clock_ranges_soc15 *clock_ranges)
2142 void *table = smu->smu_table.watermarks_table;
2143 struct amdgpu_device *adev = smu->adev;
2145 if (!adev->pm.dpm_enabled)
2151 mutex_lock(&smu->mutex);
2153 if (!smu->disable_watermark &&
2154 smu_feature_is_enabled(smu, SMU_FEATURE_DPM_DCEFCLK_BIT) &&
2155 smu_feature_is_enabled(smu, SMU_FEATURE_DPM_SOCCLK_BIT)) {
2156 smu_set_watermarks_table(smu, table, clock_ranges);
2158 if (!(smu->watermarks_bitmap & WATERMARKS_EXIST)) {
2159 smu->watermarks_bitmap |= WATERMARKS_EXIST;
2160 smu->watermarks_bitmap &= ~WATERMARKS_LOADED;
2164 mutex_unlock(&smu->mutex);
2169 int smu_set_ac_dc(struct smu_context *smu)
2171 struct amdgpu_device *adev = smu->adev;
2174 if (!adev->pm.dpm_enabled)
2177 /* controlled by firmware */
2178 if (smu->dc_controlled_by_gpio)
2181 mutex_lock(&smu->mutex);
2182 if (smu->ppt_funcs->set_power_source) {
2183 if (smu->adev->pm.ac_power)
2184 ret = smu_set_power_source(smu, SMU_POWER_SOURCE_AC);
2186 ret = smu_set_power_source(smu, SMU_POWER_SOURCE_DC);
2188 pr_err("Failed to switch to %s mode!\n",
2189 smu->adev->pm.ac_power ? "AC" : "DC");
2191 mutex_unlock(&smu->mutex);
2196 const struct amd_ip_funcs smu_ip_funcs = {
2198 .early_init = smu_early_init,
2199 .late_init = smu_late_init,
2200 .sw_init = smu_sw_init,
2201 .sw_fini = smu_sw_fini,
2202 .hw_init = smu_hw_init,
2203 .hw_fini = smu_hw_fini,
2204 .suspend = smu_suspend,
2205 .resume = smu_resume,
2207 .check_soft_reset = NULL,
2208 .wait_for_idle = NULL,
2210 .set_clockgating_state = smu_set_clockgating_state,
2211 .set_powergating_state = smu_set_powergating_state,
2212 .enable_umd_pstate = smu_enable_umd_pstate,
2215 const struct amdgpu_ip_block_version smu_v11_0_ip_block =
2217 .type = AMD_IP_BLOCK_TYPE_SMC,
2221 .funcs = &smu_ip_funcs,
2224 const struct amdgpu_ip_block_version smu_v12_0_ip_block =
2226 .type = AMD_IP_BLOCK_TYPE_SMC,
2230 .funcs = &smu_ip_funcs,
2233 int smu_load_microcode(struct smu_context *smu)
2235 struct amdgpu_device *adev = smu->adev;
2238 if (!adev->pm.dpm_enabled)
2241 mutex_lock(&smu->mutex);
2243 if (smu->ppt_funcs->load_microcode)
2244 ret = smu->ppt_funcs->load_microcode(smu);
2246 mutex_unlock(&smu->mutex);
2251 int smu_check_fw_status(struct smu_context *smu)
2253 struct amdgpu_device *adev = smu->adev;
2256 if (!adev->pm.dpm_enabled)
2259 mutex_lock(&smu->mutex);
2261 if (smu->ppt_funcs->check_fw_status)
2262 ret = smu->ppt_funcs->check_fw_status(smu);
2264 mutex_unlock(&smu->mutex);
2269 int smu_set_gfx_cgpg(struct smu_context *smu, bool enabled)
2273 mutex_lock(&smu->mutex);
2275 if (smu->ppt_funcs->set_gfx_cgpg)
2276 ret = smu->ppt_funcs->set_gfx_cgpg(smu, enabled);
2278 mutex_unlock(&smu->mutex);
2283 int smu_set_fan_speed_rpm(struct smu_context *smu, uint32_t speed)
2285 struct amdgpu_device *adev = smu->adev;
2288 if (!adev->pm.dpm_enabled)
2291 mutex_lock(&smu->mutex);
2293 if (smu->ppt_funcs->set_fan_speed_rpm)
2294 ret = smu->ppt_funcs->set_fan_speed_rpm(smu, speed);
2296 mutex_unlock(&smu->mutex);
2301 int smu_get_power_limit(struct smu_context *smu,
2306 struct amdgpu_device *adev = smu->adev;
2310 if (!adev->pm.dpm_enabled)
2313 mutex_lock(&smu->mutex);
2316 if (smu->ppt_funcs->get_power_limit)
2317 ret = smu->ppt_funcs->get_power_limit(smu, limit, def);
2320 mutex_unlock(&smu->mutex);
2325 int smu_set_power_limit(struct smu_context *smu, uint32_t limit)
2327 struct amdgpu_device *adev = smu->adev;
2330 if (!adev->pm.dpm_enabled)
2333 mutex_lock(&smu->mutex);
2335 if (smu->ppt_funcs->set_power_limit)
2336 ret = smu->ppt_funcs->set_power_limit(smu, limit);
2338 mutex_unlock(&smu->mutex);
2343 int smu_print_clk_levels(struct smu_context *smu, enum smu_clk_type clk_type, char *buf)
2345 struct amdgpu_device *adev = smu->adev;
2348 if (!adev->pm.dpm_enabled)
2351 mutex_lock(&smu->mutex);
2353 if (smu->ppt_funcs->print_clk_levels)
2354 ret = smu->ppt_funcs->print_clk_levels(smu, clk_type, buf);
2356 mutex_unlock(&smu->mutex);
2361 int smu_get_od_percentage(struct smu_context *smu, enum smu_clk_type type)
2363 struct amdgpu_device *adev = smu->adev;
2366 if (!adev->pm.dpm_enabled)
2369 mutex_lock(&smu->mutex);
2371 if (smu->ppt_funcs->get_od_percentage)
2372 ret = smu->ppt_funcs->get_od_percentage(smu, type);
2374 mutex_unlock(&smu->mutex);
2379 int smu_set_od_percentage(struct smu_context *smu, enum smu_clk_type type, uint32_t value)
2381 struct amdgpu_device *adev = smu->adev;
2384 if (!adev->pm.dpm_enabled)
2387 mutex_lock(&smu->mutex);
2389 if (smu->ppt_funcs->set_od_percentage)
2390 ret = smu->ppt_funcs->set_od_percentage(smu, type, value);
2392 mutex_unlock(&smu->mutex);
2397 int smu_od_edit_dpm_table(struct smu_context *smu,
2398 enum PP_OD_DPM_TABLE_COMMAND type,
2399 long *input, uint32_t size)
2401 struct amdgpu_device *adev = smu->adev;
2404 if (!adev->pm.dpm_enabled)
2407 mutex_lock(&smu->mutex);
2409 if (smu->ppt_funcs->od_edit_dpm_table)
2410 ret = smu->ppt_funcs->od_edit_dpm_table(smu, type, input, size);
2412 mutex_unlock(&smu->mutex);
2417 int smu_read_sensor(struct smu_context *smu,
2418 enum amd_pp_sensors sensor,
2419 void *data, uint32_t *size)
2421 struct amdgpu_device *adev = smu->adev;
2424 if (!adev->pm.dpm_enabled)
2427 mutex_lock(&smu->mutex);
2429 if (smu->ppt_funcs->read_sensor)
2430 ret = smu->ppt_funcs->read_sensor(smu, sensor, data, size);
2432 mutex_unlock(&smu->mutex);
2437 int smu_get_power_profile_mode(struct smu_context *smu, char *buf)
2439 struct amdgpu_device *adev = smu->adev;
2442 if (!adev->pm.dpm_enabled)
2445 mutex_lock(&smu->mutex);
2447 if (smu->ppt_funcs->get_power_profile_mode)
2448 ret = smu->ppt_funcs->get_power_profile_mode(smu, buf);
2450 mutex_unlock(&smu->mutex);
2455 int smu_set_power_profile_mode(struct smu_context *smu,
2457 uint32_t param_size,
2460 struct amdgpu_device *adev = smu->adev;
2463 if (!adev->pm.dpm_enabled)
2467 mutex_lock(&smu->mutex);
2469 if (smu->ppt_funcs->set_power_profile_mode)
2470 ret = smu->ppt_funcs->set_power_profile_mode(smu, param, param_size);
2473 mutex_unlock(&smu->mutex);
2479 int smu_get_fan_control_mode(struct smu_context *smu)
2481 struct amdgpu_device *adev = smu->adev;
2484 if (!adev->pm.dpm_enabled)
2487 mutex_lock(&smu->mutex);
2489 if (smu->ppt_funcs->get_fan_control_mode)
2490 ret = smu->ppt_funcs->get_fan_control_mode(smu);
2492 mutex_unlock(&smu->mutex);
2497 int smu_set_fan_control_mode(struct smu_context *smu, int value)
2499 struct amdgpu_device *adev = smu->adev;
2502 if (!adev->pm.dpm_enabled)
2505 mutex_lock(&smu->mutex);
2507 if (smu->ppt_funcs->set_fan_control_mode)
2508 ret = smu->ppt_funcs->set_fan_control_mode(smu, value);
2510 mutex_unlock(&smu->mutex);
2515 int smu_get_fan_speed_percent(struct smu_context *smu, uint32_t *speed)
2517 struct amdgpu_device *adev = smu->adev;
2520 if (!adev->pm.dpm_enabled)
2523 mutex_lock(&smu->mutex);
2525 if (smu->ppt_funcs->get_fan_speed_percent)
2526 ret = smu->ppt_funcs->get_fan_speed_percent(smu, speed);
2528 mutex_unlock(&smu->mutex);
2533 int smu_set_fan_speed_percent(struct smu_context *smu, uint32_t speed)
2535 struct amdgpu_device *adev = smu->adev;
2538 if (!adev->pm.dpm_enabled)
2541 mutex_lock(&smu->mutex);
2543 if (smu->ppt_funcs->set_fan_speed_percent)
2544 ret = smu->ppt_funcs->set_fan_speed_percent(smu, speed);
2546 mutex_unlock(&smu->mutex);
2551 int smu_get_fan_speed_rpm(struct smu_context *smu, uint32_t *speed)
2553 struct amdgpu_device *adev = smu->adev;
2556 if (!adev->pm.dpm_enabled)
2559 mutex_lock(&smu->mutex);
2561 if (smu->ppt_funcs->get_fan_speed_rpm)
2562 ret = smu->ppt_funcs->get_fan_speed_rpm(smu, speed);
2564 mutex_unlock(&smu->mutex);
2569 int smu_set_deep_sleep_dcefclk(struct smu_context *smu, int clk)
2571 struct amdgpu_device *adev = smu->adev;
2574 if (!adev->pm.dpm_enabled)
2577 mutex_lock(&smu->mutex);
2579 if (smu->ppt_funcs->set_deep_sleep_dcefclk)
2580 ret = smu->ppt_funcs->set_deep_sleep_dcefclk(smu, clk);
2582 mutex_unlock(&smu->mutex);
2587 int smu_set_active_display_count(struct smu_context *smu, uint32_t count)
2589 struct amdgpu_device *adev = smu->adev;
2592 if (!adev->pm.dpm_enabled)
2595 if (smu->ppt_funcs->set_active_display_count)
2596 ret = smu->ppt_funcs->set_active_display_count(smu, count);
2601 int smu_get_clock_by_type(struct smu_context *smu,
2602 enum amd_pp_clock_type type,
2603 struct amd_pp_clocks *clocks)
2605 struct amdgpu_device *adev = smu->adev;
2608 if (!adev->pm.dpm_enabled)
2611 mutex_lock(&smu->mutex);
2613 if (smu->ppt_funcs->get_clock_by_type)
2614 ret = smu->ppt_funcs->get_clock_by_type(smu, type, clocks);
2616 mutex_unlock(&smu->mutex);
2621 int smu_get_max_high_clocks(struct smu_context *smu,
2622 struct amd_pp_simple_clock_info *clocks)
2624 struct amdgpu_device *adev = smu->adev;
2627 if (!adev->pm.dpm_enabled)
2630 mutex_lock(&smu->mutex);
2632 if (smu->ppt_funcs->get_max_high_clocks)
2633 ret = smu->ppt_funcs->get_max_high_clocks(smu, clocks);
2635 mutex_unlock(&smu->mutex);
2640 int smu_get_clock_by_type_with_latency(struct smu_context *smu,
2641 enum smu_clk_type clk_type,
2642 struct pp_clock_levels_with_latency *clocks)
2644 struct amdgpu_device *adev = smu->adev;
2647 if (!adev->pm.dpm_enabled)
2650 mutex_lock(&smu->mutex);
2652 if (smu->ppt_funcs->get_clock_by_type_with_latency)
2653 ret = smu->ppt_funcs->get_clock_by_type_with_latency(smu, clk_type, clocks);
2655 mutex_unlock(&smu->mutex);
2660 int smu_get_clock_by_type_with_voltage(struct smu_context *smu,
2661 enum amd_pp_clock_type type,
2662 struct pp_clock_levels_with_voltage *clocks)
2664 struct amdgpu_device *adev = smu->adev;
2667 if (!adev->pm.dpm_enabled)
2670 mutex_lock(&smu->mutex);
2672 if (smu->ppt_funcs->get_clock_by_type_with_voltage)
2673 ret = smu->ppt_funcs->get_clock_by_type_with_voltage(smu, type, clocks);
2675 mutex_unlock(&smu->mutex);
2681 int smu_display_clock_voltage_request(struct smu_context *smu,
2682 struct pp_display_clock_request *clock_req)
2684 struct amdgpu_device *adev = smu->adev;
2687 if (!adev->pm.dpm_enabled)
2690 mutex_lock(&smu->mutex);
2692 if (smu->ppt_funcs->display_clock_voltage_request)
2693 ret = smu->ppt_funcs->display_clock_voltage_request(smu, clock_req);
2695 mutex_unlock(&smu->mutex);
2701 int smu_display_disable_memory_clock_switch(struct smu_context *smu, bool disable_memory_clock_switch)
2703 struct amdgpu_device *adev = smu->adev;
2706 if (!adev->pm.dpm_enabled)
2709 mutex_lock(&smu->mutex);
2711 if (smu->ppt_funcs->display_disable_memory_clock_switch)
2712 ret = smu->ppt_funcs->display_disable_memory_clock_switch(smu, disable_memory_clock_switch);
2714 mutex_unlock(&smu->mutex);
2719 int smu_notify_smu_enable_pwe(struct smu_context *smu)
2721 struct amdgpu_device *adev = smu->adev;
2724 if (!adev->pm.dpm_enabled)
2727 mutex_lock(&smu->mutex);
2729 if (smu->ppt_funcs->notify_smu_enable_pwe)
2730 ret = smu->ppt_funcs->notify_smu_enable_pwe(smu);
2732 mutex_unlock(&smu->mutex);
2737 int smu_set_xgmi_pstate(struct smu_context *smu,
2740 struct amdgpu_device *adev = smu->adev;
2743 if (!adev->pm.dpm_enabled)
2746 mutex_lock(&smu->mutex);
2748 if (smu->ppt_funcs->set_xgmi_pstate)
2749 ret = smu->ppt_funcs->set_xgmi_pstate(smu, pstate);
2751 mutex_unlock(&smu->mutex);
2756 int smu_set_azalia_d3_pme(struct smu_context *smu)
2758 struct amdgpu_device *adev = smu->adev;
2761 if (!adev->pm.dpm_enabled)
2764 mutex_lock(&smu->mutex);
2766 if (smu->ppt_funcs->set_azalia_d3_pme)
2767 ret = smu->ppt_funcs->set_azalia_d3_pme(smu);
2769 mutex_unlock(&smu->mutex);
2775 * On system suspending or resetting, the dpm_enabled
2776 * flag will be cleared. So that those SMU services which
2777 * are not supported will be gated.
2779 * However, the baco/mode1 reset should still be granted
2780 * as they are still supported and necessary.
2782 bool smu_baco_is_support(struct smu_context *smu)
2786 mutex_lock(&smu->mutex);
2788 if (smu->ppt_funcs && smu->ppt_funcs->baco_is_support)
2789 ret = smu->ppt_funcs->baco_is_support(smu);
2791 mutex_unlock(&smu->mutex);
2796 int smu_baco_get_state(struct smu_context *smu, enum smu_baco_state *state)
2798 if (smu->ppt_funcs->baco_get_state)
2801 mutex_lock(&smu->mutex);
2802 *state = smu->ppt_funcs->baco_get_state(smu);
2803 mutex_unlock(&smu->mutex);
2808 int smu_baco_enter(struct smu_context *smu)
2812 mutex_lock(&smu->mutex);
2814 if (smu->ppt_funcs->baco_enter)
2815 ret = smu->ppt_funcs->baco_enter(smu);
2817 mutex_unlock(&smu->mutex);
2822 int smu_baco_exit(struct smu_context *smu)
2826 mutex_lock(&smu->mutex);
2828 if (smu->ppt_funcs->baco_exit)
2829 ret = smu->ppt_funcs->baco_exit(smu);
2831 mutex_unlock(&smu->mutex);
2836 int smu_mode2_reset(struct smu_context *smu)
2840 mutex_lock(&smu->mutex);
2842 if (smu->ppt_funcs->mode2_reset)
2843 ret = smu->ppt_funcs->mode2_reset(smu);
2845 mutex_unlock(&smu->mutex);
2850 int smu_get_max_sustainable_clocks_by_dc(struct smu_context *smu,
2851 struct pp_smu_nv_clock_table *max_clocks)
2853 struct amdgpu_device *adev = smu->adev;
2856 if (!adev->pm.dpm_enabled)
2859 mutex_lock(&smu->mutex);
2861 if (smu->ppt_funcs->get_max_sustainable_clocks_by_dc)
2862 ret = smu->ppt_funcs->get_max_sustainable_clocks_by_dc(smu, max_clocks);
2864 mutex_unlock(&smu->mutex);
2869 int smu_get_uclk_dpm_states(struct smu_context *smu,
2870 unsigned int *clock_values_in_khz,
2871 unsigned int *num_states)
2873 struct amdgpu_device *adev = smu->adev;
2876 if (!adev->pm.dpm_enabled)
2879 mutex_lock(&smu->mutex);
2881 if (smu->ppt_funcs->get_uclk_dpm_states)
2882 ret = smu->ppt_funcs->get_uclk_dpm_states(smu, clock_values_in_khz, num_states);
2884 mutex_unlock(&smu->mutex);
2889 enum amd_pm_state_type smu_get_current_power_state(struct smu_context *smu)
2891 enum amd_pm_state_type pm_state = POWER_STATE_TYPE_DEFAULT;
2892 struct amdgpu_device *adev = smu->adev;
2894 if (!adev->pm.dpm_enabled)
2897 mutex_lock(&smu->mutex);
2899 if (smu->ppt_funcs->get_current_power_state)
2900 pm_state = smu->ppt_funcs->get_current_power_state(smu);
2902 mutex_unlock(&smu->mutex);
2907 int smu_get_dpm_clock_table(struct smu_context *smu,
2908 struct dpm_clocks *clock_table)
2910 struct amdgpu_device *adev = smu->adev;
2913 if (!adev->pm.dpm_enabled)
2916 mutex_lock(&smu->mutex);
2918 if (smu->ppt_funcs->get_dpm_clock_table)
2919 ret = smu->ppt_funcs->get_dpm_clock_table(smu, clock_table);
2921 mutex_unlock(&smu->mutex);
2926 uint32_t smu_get_pptable_power_limit(struct smu_context *smu)
2930 if (smu->ppt_funcs->get_pptable_power_limit)
2931 ret = smu->ppt_funcs->get_pptable_power_limit(smu);