2 * Copyright 2019 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
23 #include <linux/firmware.h>
24 #include <linux/pci.h>
27 #include "amdgpu_smu.h"
28 #include "smu_internal.h"
29 #include "smu_v11_0.h"
30 #include "smu_v12_0.h"
32 #include "arcturus_ppt.h"
33 #include "navi10_ppt.h"
34 #include "sienna_cichlid_ppt.h"
35 #include "renoir_ppt.h"
38 * DO NOT use these for err/warn/info/debug messages.
39 * Use dev_err, dev_warn, dev_info and dev_dbg instead.
40 * They are more MGPU friendly.
47 #undef __SMU_DUMMY_MAP
48 #define __SMU_DUMMY_MAP(type) #type
49 static const char* __smu_message_names[] = {
53 const char *smu_get_message_name(struct smu_context *smu, enum smu_message_type type)
55 if (type < 0 || type >= SMU_MSG_MAX_COUNT)
56 return "unknown smu message";
57 return __smu_message_names[type];
60 #undef __SMU_DUMMY_MAP
61 #define __SMU_DUMMY_MAP(fea) #fea
62 static const char* __smu_feature_names[] = {
66 const char *smu_get_feature_name(struct smu_context *smu, enum smu_feature_mask feature)
68 if (feature < 0 || feature >= SMU_FEATURE_COUNT)
69 return "unknown smu feature";
70 return __smu_feature_names[feature];
73 size_t smu_sys_get_pp_feature_mask(struct smu_context *smu, char *buf)
77 uint32_t feature_mask[2] = { 0 };
78 int32_t feature_index = 0;
80 uint32_t sort_feature[SMU_FEATURE_COUNT];
81 uint64_t hw_feature_count = 0;
83 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
86 mutex_lock(&smu->mutex);
88 ret = smu_feature_get_enabled_mask(smu, feature_mask, 2);
92 size = sprintf(buf + size, "features high: 0x%08x low: 0x%08x\n",
93 feature_mask[1], feature_mask[0]);
95 for (i = 0; i < SMU_FEATURE_COUNT; i++) {
96 feature_index = smu_feature_get_index(smu, i);
97 if (feature_index < 0)
99 sort_feature[feature_index] = i;
103 for (i = 0; i < hw_feature_count; i++) {
104 size += sprintf(buf + size, "%02d. %-20s (%2d) : %s\n",
106 smu_get_feature_name(smu, sort_feature[i]),
108 !!smu_feature_is_enabled(smu, sort_feature[i]) ?
109 "enabled" : "disabled");
113 mutex_unlock(&smu->mutex);
118 static int smu_feature_update_enable_state(struct smu_context *smu,
119 uint64_t feature_mask,
122 struct smu_feature *feature = &smu->smu_feature;
126 ret = smu_send_smc_msg_with_param(smu,
127 SMU_MSG_EnableSmuFeaturesLow,
128 lower_32_bits(feature_mask),
132 ret = smu_send_smc_msg_with_param(smu,
133 SMU_MSG_EnableSmuFeaturesHigh,
134 upper_32_bits(feature_mask),
139 ret = smu_send_smc_msg_with_param(smu,
140 SMU_MSG_DisableSmuFeaturesLow,
141 lower_32_bits(feature_mask),
145 ret = smu_send_smc_msg_with_param(smu,
146 SMU_MSG_DisableSmuFeaturesHigh,
147 upper_32_bits(feature_mask),
153 mutex_lock(&feature->mutex);
155 bitmap_or(feature->enabled, feature->enabled,
156 (unsigned long *)(&feature_mask), SMU_FEATURE_MAX);
158 bitmap_andnot(feature->enabled, feature->enabled,
159 (unsigned long *)(&feature_mask), SMU_FEATURE_MAX);
160 mutex_unlock(&feature->mutex);
165 int smu_sys_set_pp_feature_mask(struct smu_context *smu, uint64_t new_mask)
168 uint32_t feature_mask[2] = { 0 };
169 uint64_t feature_2_enabled = 0;
170 uint64_t feature_2_disabled = 0;
171 uint64_t feature_enables = 0;
173 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
176 mutex_lock(&smu->mutex);
178 ret = smu_feature_get_enabled_mask(smu, feature_mask, 2);
182 feature_enables = ((uint64_t)feature_mask[1] << 32 | (uint64_t)feature_mask[0]);
184 feature_2_enabled = ~feature_enables & new_mask;
185 feature_2_disabled = feature_enables & ~new_mask;
187 if (feature_2_enabled) {
188 ret = smu_feature_update_enable_state(smu, feature_2_enabled, true);
192 if (feature_2_disabled) {
193 ret = smu_feature_update_enable_state(smu, feature_2_disabled, false);
199 mutex_unlock(&smu->mutex);
204 int smu_get_smc_version(struct smu_context *smu, uint32_t *if_version, uint32_t *smu_version)
208 if (!if_version && !smu_version)
211 if (smu->smc_fw_if_version && smu->smc_fw_version)
214 *if_version = smu->smc_fw_if_version;
217 *smu_version = smu->smc_fw_version;
223 ret = smu_send_smc_msg(smu, SMU_MSG_GetDriverIfVersion, if_version);
227 smu->smc_fw_if_version = *if_version;
231 ret = smu_send_smc_msg(smu, SMU_MSG_GetSmuVersion, smu_version);
235 smu->smc_fw_version = *smu_version;
241 int smu_set_soft_freq_range(struct smu_context *smu, enum smu_clk_type clk_type,
242 uint32_t min, uint32_t max, bool lock_needed)
246 if (!smu_clk_dpm_is_enabled(smu, clk_type))
250 mutex_lock(&smu->mutex);
251 ret = smu_set_soft_freq_limited_range(smu, clk_type, min, max);
253 mutex_unlock(&smu->mutex);
258 int smu_set_hard_freq_range(struct smu_context *smu, enum smu_clk_type clk_type,
259 uint32_t min, uint32_t max)
261 int ret = 0, clk_id = 0;
264 if (min <= 0 && max <= 0)
267 if (!smu_clk_dpm_is_enabled(smu, clk_type))
270 clk_id = smu_clk_get_index(smu, clk_type);
275 param = (uint32_t)((clk_id << 16) | (max & 0xffff));
276 ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetHardMaxByFreq,
283 param = (uint32_t)((clk_id << 16) | (min & 0xffff));
284 ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinByFreq,
294 int smu_get_dpm_freq_range(struct smu_context *smu, enum smu_clk_type clk_type,
295 uint32_t *min, uint32_t *max, bool lock_needed)
297 uint32_t clock_limit;
304 mutex_lock(&smu->mutex);
306 if (!smu_clk_dpm_is_enabled(smu, clk_type)) {
310 clock_limit = smu->smu_table.boot_values.uclk;
314 clock_limit = smu->smu_table.boot_values.gfxclk;
317 clock_limit = smu->smu_table.boot_values.socclk;
324 /* clock in Mhz unit */
326 *min = clock_limit / 100;
328 *max = clock_limit / 100;
331 * Todo: Use each asic(ASIC_ppt funcs) control the callbacks exposed to the
332 * core driver and then have helpers for stuff that is common(SMU_v11_x | SMU_v12_x funcs).
334 ret = smu_get_dpm_ultimate_freq(smu, clk_type, min, max);
338 mutex_unlock(&smu->mutex);
343 int smu_get_dpm_freq_by_index(struct smu_context *smu, enum smu_clk_type clk_type,
344 uint16_t level, uint32_t *value)
346 int ret = 0, clk_id = 0;
352 if (!smu_clk_dpm_is_enabled(smu, clk_type))
355 clk_id = smu_clk_get_index(smu, clk_type);
359 param = (uint32_t)(((clk_id & 0xffff) << 16) | (level & 0xffff));
361 ret = smu_send_smc_msg_with_param(smu, SMU_MSG_GetDpmFreqByIndex,
366 /* BIT31: 0 - Fine grained DPM, 1 - Dicrete DPM
367 * now, we un-support it */
368 *value = *value & 0x7fffffff;
373 int smu_get_dpm_level_count(struct smu_context *smu, enum smu_clk_type clk_type,
376 return smu_get_dpm_freq_by_index(smu, clk_type, 0xff, value);
379 int smu_get_dpm_level_range(struct smu_context *smu, enum smu_clk_type clk_type,
380 uint32_t *min_value, uint32_t *max_value)
383 uint32_t level_count = 0;
385 if (!min_value && !max_value)
389 /* by default, level 0 clock value as min value */
390 ret = smu_get_dpm_freq_by_index(smu, clk_type, 0, min_value);
396 ret = smu_get_dpm_level_count(smu, clk_type, &level_count);
400 ret = smu_get_dpm_freq_by_index(smu, clk_type, level_count - 1, max_value);
408 bool smu_clk_dpm_is_enabled(struct smu_context *smu, enum smu_clk_type clk_type)
410 enum smu_feature_mask feature_id = 0;
415 feature_id = SMU_FEATURE_DPM_UCLK_BIT;
419 feature_id = SMU_FEATURE_DPM_GFXCLK_BIT;
422 feature_id = SMU_FEATURE_DPM_SOCCLK_BIT;
428 if(!smu_feature_is_enabled(smu, feature_id)) {
436 * smu_dpm_set_power_gate - power gate/ungate the specific IP block
438 * @smu: smu_context pointer
439 * @block_type: the IP block to power gate/ungate
440 * @gate: to power gate if true, ungate otherwise
442 * This API uses no smu->mutex lock protection due to:
443 * 1. It is either called by other IP block(gfx/sdma/vcn/uvd/vce).
444 * This is guarded to be race condition free by the caller.
445 * 2. Or get called on user setting request of power_dpm_force_performance_level.
446 * Under this case, the smu->mutex lock protection is already enforced on
447 * the parent API smu_force_performance_level of the call path.
449 int smu_dpm_set_power_gate(struct smu_context *smu, uint32_t block_type,
454 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
457 switch (block_type) {
458 case AMD_IP_BLOCK_TYPE_UVD:
459 ret = smu_dpm_set_uvd_enable(smu, !gate);
461 dev_err(smu->adev->dev, "Failed to power %s UVD!\n",
462 gate ? "gate" : "ungate");
464 case AMD_IP_BLOCK_TYPE_GFX:
465 ret = smu_gfx_off_control(smu, gate);
467 dev_err(smu->adev->dev, "Failed to %s gfxoff!\n",
468 gate ? "enable" : "disable");
470 case AMD_IP_BLOCK_TYPE_SDMA:
471 ret = smu_powergate_sdma(smu, gate);
473 dev_err(smu->adev->dev, "Failed to power %s SDMA!\n",
474 gate ? "gate" : "ungate");
476 case AMD_IP_BLOCK_TYPE_JPEG:
477 ret = smu_dpm_set_jpeg_enable(smu, !gate);
479 dev_err(smu->adev->dev, "Failed to power %s JPEG!\n",
480 gate ? "gate" : "ungate");
483 dev_err(smu->adev->dev, "Unsupported block type!\n");
490 int smu_get_power_num_states(struct smu_context *smu,
491 struct pp_states_info *state_info)
496 /* not support power state */
497 memset(state_info, 0, sizeof(struct pp_states_info));
498 state_info->nums = 1;
499 state_info->states[0] = POWER_STATE_TYPE_DEFAULT;
504 int smu_common_read_sensor(struct smu_context *smu, enum amd_pp_sensors sensor,
505 void *data, uint32_t *size)
507 struct smu_power_context *smu_power = &smu->smu_power;
508 struct smu_power_gate *power_gate = &smu_power->power_gate;
515 case AMDGPU_PP_SENSOR_STABLE_PSTATE_SCLK:
516 *((uint32_t *)data) = smu->pstate_sclk;
519 case AMDGPU_PP_SENSOR_STABLE_PSTATE_MCLK:
520 *((uint32_t *)data) = smu->pstate_mclk;
523 case AMDGPU_PP_SENSOR_ENABLED_SMC_FEATURES_MASK:
524 ret = smu_feature_get_enabled_mask(smu, (uint32_t *)data, 2);
527 case AMDGPU_PP_SENSOR_UVD_POWER:
528 *(uint32_t *)data = smu_feature_is_enabled(smu, SMU_FEATURE_DPM_UVD_BIT) ? 1 : 0;
531 case AMDGPU_PP_SENSOR_VCE_POWER:
532 *(uint32_t *)data = smu_feature_is_enabled(smu, SMU_FEATURE_DPM_VCE_BIT) ? 1 : 0;
535 case AMDGPU_PP_SENSOR_VCN_POWER_STATE:
536 *(uint32_t *)data = power_gate->vcn_gated ? 0 : 1;
550 int smu_update_table(struct smu_context *smu, enum smu_table_id table_index, int argument,
551 void *table_data, bool drv2smu)
553 struct smu_table_context *smu_table = &smu->smu_table;
554 struct amdgpu_device *adev = smu->adev;
555 struct smu_table *table = &smu_table->driver_table;
556 int table_id = smu_table_get_index(smu, table_index);
559 if (!table_data || table_id >= SMU_TABLE_COUNT || table_id < 0)
562 table_size = smu_table->tables[table_index].size;
565 memcpy(table->cpu_addr, table_data, table_size);
567 * Flush hdp cache: to guard the content seen by
568 * GPU is consitent with CPU.
570 amdgpu_asic_flush_hdp(adev, NULL);
573 ret = smu_send_smc_msg_with_param(smu, drv2smu ?
574 SMU_MSG_TransferTableDram2Smu :
575 SMU_MSG_TransferTableSmu2Dram,
576 table_id | ((argument & 0xFFFF) << 16),
582 amdgpu_asic_flush_hdp(adev, NULL);
583 memcpy(table_data, table->cpu_addr, table_size);
589 bool is_support_sw_smu(struct amdgpu_device *adev)
591 if (adev->asic_type >= CHIP_ARCTURUS)
597 int smu_sys_get_pp_table(struct smu_context *smu, void **table)
599 struct smu_table_context *smu_table = &smu->smu_table;
600 uint32_t powerplay_table_size;
602 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
605 if (!smu_table->power_play_table && !smu_table->hardcode_pptable)
608 mutex_lock(&smu->mutex);
610 if (smu_table->hardcode_pptable)
611 *table = smu_table->hardcode_pptable;
613 *table = smu_table->power_play_table;
615 powerplay_table_size = smu_table->power_play_table_size;
617 mutex_unlock(&smu->mutex);
619 return powerplay_table_size;
622 int smu_sys_set_pp_table(struct smu_context *smu, void *buf, size_t size)
624 struct smu_table_context *smu_table = &smu->smu_table;
625 ATOM_COMMON_TABLE_HEADER *header = (ATOM_COMMON_TABLE_HEADER *)buf;
628 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
631 if (header->usStructureSize != size) {
632 dev_err(smu->adev->dev, "pp table size not matched !\n");
636 mutex_lock(&smu->mutex);
637 if (!smu_table->hardcode_pptable)
638 smu_table->hardcode_pptable = kzalloc(size, GFP_KERNEL);
639 if (!smu_table->hardcode_pptable) {
644 memcpy(smu_table->hardcode_pptable, buf, size);
645 smu_table->power_play_table = smu_table->hardcode_pptable;
646 smu_table->power_play_table_size = size;
649 * Special hw_fini action(for Navi1x, the DPMs disablement will be
650 * skipped) may be needed for custom pptable uploading.
652 smu->uploading_custom_pp_table = true;
654 ret = smu_reset(smu);
656 dev_info(smu->adev->dev, "smu reset failed, ret = %d\n", ret);
658 smu->uploading_custom_pp_table = false;
661 mutex_unlock(&smu->mutex);
665 static int smu_get_driver_allowed_feature_mask(struct smu_context *smu)
667 struct smu_feature *feature = &smu->smu_feature;
669 uint32_t allowed_feature_mask[SMU_FEATURE_MAX/32];
671 mutex_lock(&feature->mutex);
672 bitmap_zero(feature->allowed, SMU_FEATURE_MAX);
673 mutex_unlock(&feature->mutex);
675 ret = smu_get_allowed_feature_mask(smu, allowed_feature_mask,
680 mutex_lock(&feature->mutex);
681 bitmap_or(feature->allowed, feature->allowed,
682 (unsigned long *)allowed_feature_mask,
683 feature->feature_num);
684 mutex_unlock(&feature->mutex);
689 int smu_feature_is_enabled(struct smu_context *smu, enum smu_feature_mask mask)
691 struct smu_feature *feature = &smu->smu_feature;
697 feature_id = smu_feature_get_index(smu, mask);
701 WARN_ON(feature_id > feature->feature_num);
703 mutex_lock(&feature->mutex);
704 ret = test_bit(feature_id, feature->enabled);
705 mutex_unlock(&feature->mutex);
710 int smu_feature_set_enabled(struct smu_context *smu, enum smu_feature_mask mask,
713 struct smu_feature *feature = &smu->smu_feature;
716 feature_id = smu_feature_get_index(smu, mask);
720 WARN_ON(feature_id > feature->feature_num);
722 return smu_feature_update_enable_state(smu,
727 int smu_feature_is_supported(struct smu_context *smu, enum smu_feature_mask mask)
729 struct smu_feature *feature = &smu->smu_feature;
733 feature_id = smu_feature_get_index(smu, mask);
737 WARN_ON(feature_id > feature->feature_num);
739 mutex_lock(&feature->mutex);
740 ret = test_bit(feature_id, feature->supported);
741 mutex_unlock(&feature->mutex);
746 static int smu_set_funcs(struct amdgpu_device *adev)
748 struct smu_context *smu = &adev->smu;
750 if (adev->pm.pp_feature & PP_OVERDRIVE_MASK)
751 smu->od_enabled = true;
753 switch (adev->asic_type) {
757 navi10_set_ppt_funcs(smu);
760 adev->pm.pp_feature &= ~PP_GFXOFF_MASK;
761 arcturus_set_ppt_funcs(smu);
762 /* OD is not supported on Arcturus */
763 smu->od_enabled =false;
765 case CHIP_SIENNA_CICHLID:
766 sienna_cichlid_set_ppt_funcs(smu);
769 renoir_set_ppt_funcs(smu);
778 static int smu_early_init(void *handle)
780 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
781 struct smu_context *smu = &adev->smu;
784 smu->pm_enabled = !!amdgpu_dpm;
786 mutex_init(&smu->mutex);
788 return smu_set_funcs(adev);
791 static int smu_late_init(void *handle)
793 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
794 struct smu_context *smu = &adev->smu;
797 if (!smu->pm_enabled)
800 ret = smu_set_default_od_settings(smu);
802 dev_err(adev->dev, "Failed to setup default OD settings!\n");
807 * Set initialized values (get from vbios) to dpm tables context such as
808 * gfxclk, memclk, dcefclk, and etc. And enable the DPM feature for each
811 ret = smu_populate_smc_tables(smu);
813 dev_err(adev->dev, "Failed to setup default dpm clock tables!\n");
817 ret = smu_init_max_sustainable_clocks(smu);
819 dev_err(adev->dev, "Failed to init max sustainable clocks!\n");
823 ret = smu_populate_umd_state_clk(smu);
825 dev_err(adev->dev, "Failed to populate UMD state clocks!\n");
829 ret = smu_get_power_limit(smu, &smu->default_power_limit, false, false);
831 dev_err(adev->dev, "Failed to get default power limit!\n");
835 smu_get_unique_id(smu);
837 smu_handle_task(&adev->smu,
838 smu->smu_dpm.dpm_level,
839 AMD_PP_TASK_COMPLETE_INIT,
845 int smu_get_atom_data_table(struct smu_context *smu, uint32_t table,
846 uint16_t *size, uint8_t *frev, uint8_t *crev,
849 struct amdgpu_device *adev = smu->adev;
852 if (!amdgpu_atom_parse_data_header(adev->mode_info.atom_context, table,
853 size, frev, crev, &data_start))
856 *addr = (uint8_t *)adev->mode_info.atom_context->bios + data_start;
861 static int smu_init_fb_allocations(struct smu_context *smu)
863 struct amdgpu_device *adev = smu->adev;
864 struct smu_table_context *smu_table = &smu->smu_table;
865 struct smu_table *tables = smu_table->tables;
866 struct smu_table *driver_table = &(smu_table->driver_table);
867 uint32_t max_table_size = 0;
870 /* VRAM allocation for tool table */
871 if (tables[SMU_TABLE_PMSTATUSLOG].size) {
872 ret = amdgpu_bo_create_kernel(adev,
873 tables[SMU_TABLE_PMSTATUSLOG].size,
874 tables[SMU_TABLE_PMSTATUSLOG].align,
875 tables[SMU_TABLE_PMSTATUSLOG].domain,
876 &tables[SMU_TABLE_PMSTATUSLOG].bo,
877 &tables[SMU_TABLE_PMSTATUSLOG].mc_address,
878 &tables[SMU_TABLE_PMSTATUSLOG].cpu_addr);
880 dev_err(adev->dev, "VRAM allocation for tool table failed!\n");
885 /* VRAM allocation for driver table */
886 for (i = 0; i < SMU_TABLE_COUNT; i++) {
887 if (tables[i].size == 0)
890 if (i == SMU_TABLE_PMSTATUSLOG)
893 if (max_table_size < tables[i].size)
894 max_table_size = tables[i].size;
897 driver_table->size = max_table_size;
898 driver_table->align = PAGE_SIZE;
899 driver_table->domain = AMDGPU_GEM_DOMAIN_VRAM;
901 ret = amdgpu_bo_create_kernel(adev,
904 driver_table->domain,
906 &driver_table->mc_address,
907 &driver_table->cpu_addr);
909 dev_err(adev->dev, "VRAM allocation for driver table failed!\n");
910 if (tables[SMU_TABLE_PMSTATUSLOG].mc_address)
911 amdgpu_bo_free_kernel(&tables[SMU_TABLE_PMSTATUSLOG].bo,
912 &tables[SMU_TABLE_PMSTATUSLOG].mc_address,
913 &tables[SMU_TABLE_PMSTATUSLOG].cpu_addr);
919 static int smu_fini_fb_allocations(struct smu_context *smu)
921 struct smu_table_context *smu_table = &smu->smu_table;
922 struct smu_table *tables = smu_table->tables;
923 struct smu_table *driver_table = &(smu_table->driver_table);
928 if (tables[SMU_TABLE_PMSTATUSLOG].mc_address)
929 amdgpu_bo_free_kernel(&tables[SMU_TABLE_PMSTATUSLOG].bo,
930 &tables[SMU_TABLE_PMSTATUSLOG].mc_address,
931 &tables[SMU_TABLE_PMSTATUSLOG].cpu_addr);
933 amdgpu_bo_free_kernel(&driver_table->bo,
934 &driver_table->mc_address,
935 &driver_table->cpu_addr);
941 * smu_alloc_memory_pool - allocate memory pool in the system memory
943 * @smu: amdgpu_device pointer
945 * This memory pool will be used for SMC use and msg SetSystemVirtualDramAddr
946 * and DramLogSetDramAddr can notify it changed.
948 * Returns 0 on success, error on failure.
950 static int smu_alloc_memory_pool(struct smu_context *smu)
952 struct amdgpu_device *adev = smu->adev;
953 struct smu_table_context *smu_table = &smu->smu_table;
954 struct smu_table *memory_pool = &smu_table->memory_pool;
955 uint64_t pool_size = smu->pool_size;
958 if (pool_size == SMU_MEMORY_POOL_SIZE_ZERO)
961 memory_pool->size = pool_size;
962 memory_pool->align = PAGE_SIZE;
963 memory_pool->domain = AMDGPU_GEM_DOMAIN_GTT;
966 case SMU_MEMORY_POOL_SIZE_256_MB:
967 case SMU_MEMORY_POOL_SIZE_512_MB:
968 case SMU_MEMORY_POOL_SIZE_1_GB:
969 case SMU_MEMORY_POOL_SIZE_2_GB:
970 ret = amdgpu_bo_create_kernel(adev,
975 &memory_pool->mc_address,
976 &memory_pool->cpu_addr);
978 dev_err(adev->dev, "VRAM allocation for dramlog failed!\n");
987 static int smu_free_memory_pool(struct smu_context *smu)
989 struct smu_table_context *smu_table = &smu->smu_table;
990 struct smu_table *memory_pool = &smu_table->memory_pool;
992 if (memory_pool->size == SMU_MEMORY_POOL_SIZE_ZERO)
995 amdgpu_bo_free_kernel(&memory_pool->bo,
996 &memory_pool->mc_address,
997 &memory_pool->cpu_addr);
999 memset(memory_pool, 0, sizeof(struct smu_table));
1004 static int smu_smc_table_sw_init(struct smu_context *smu)
1009 * Create smu_table structure, and init smc tables such as
1010 * TABLE_PPTABLE, TABLE_WATERMARKS, TABLE_SMU_METRICS, and etc.
1012 ret = smu_init_smc_tables(smu);
1014 dev_err(smu->adev->dev, "Failed to init smc tables!\n");
1019 * Create smu_power_context structure, and allocate smu_dpm_context and
1020 * context size to fill the smu_power_context data.
1022 ret = smu_init_power(smu);
1024 dev_err(smu->adev->dev, "Failed to init smu_init_power!\n");
1029 * allocate vram bos to store smc table contents.
1031 ret = smu_init_fb_allocations(smu);
1035 ret = smu_alloc_memory_pool(smu);
1042 static int smu_smc_table_sw_fini(struct smu_context *smu)
1046 ret = smu_free_memory_pool(smu);
1050 ret = smu_fini_fb_allocations(smu);
1054 ret = smu_fini_power(smu);
1056 dev_err(smu->adev->dev, "Failed to init smu_fini_power!\n");
1060 ret = smu_fini_smc_tables(smu);
1062 dev_err(smu->adev->dev, "Failed to smu_fini_smc_tables!\n");
1069 static void smu_throttling_logging_work_fn(struct work_struct *work)
1071 struct smu_context *smu = container_of(work, struct smu_context,
1072 throttling_logging_work);
1074 smu_log_thermal_throttling(smu);
1077 static int smu_sw_init(void *handle)
1079 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1080 struct smu_context *smu = &adev->smu;
1083 smu->pool_size = adev->pm.smu_prv_buffer_size;
1084 smu->smu_feature.feature_num = SMU_FEATURE_MAX;
1085 mutex_init(&smu->smu_feature.mutex);
1086 bitmap_zero(smu->smu_feature.supported, SMU_FEATURE_MAX);
1087 bitmap_zero(smu->smu_feature.enabled, SMU_FEATURE_MAX);
1088 bitmap_zero(smu->smu_feature.allowed, SMU_FEATURE_MAX);
1090 mutex_init(&smu->smu_baco.mutex);
1091 smu->smu_baco.state = SMU_BACO_STATE_EXIT;
1092 smu->smu_baco.platform_support = false;
1094 mutex_init(&smu->sensor_lock);
1095 mutex_init(&smu->metrics_lock);
1096 mutex_init(&smu->message_lock);
1098 INIT_WORK(&smu->throttling_logging_work, smu_throttling_logging_work_fn);
1099 smu->watermarks_bitmap = 0;
1100 smu->power_profile_mode = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT;
1101 smu->default_power_profile_mode = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT;
1103 smu->workload_mask = 1 << smu->workload_prority[PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT];
1104 smu->workload_prority[PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT] = 0;
1105 smu->workload_prority[PP_SMC_POWER_PROFILE_FULLSCREEN3D] = 1;
1106 smu->workload_prority[PP_SMC_POWER_PROFILE_POWERSAVING] = 2;
1107 smu->workload_prority[PP_SMC_POWER_PROFILE_VIDEO] = 3;
1108 smu->workload_prority[PP_SMC_POWER_PROFILE_VR] = 4;
1109 smu->workload_prority[PP_SMC_POWER_PROFILE_COMPUTE] = 5;
1110 smu->workload_prority[PP_SMC_POWER_PROFILE_CUSTOM] = 6;
1112 smu->workload_setting[0] = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT;
1113 smu->workload_setting[1] = PP_SMC_POWER_PROFILE_FULLSCREEN3D;
1114 smu->workload_setting[2] = PP_SMC_POWER_PROFILE_POWERSAVING;
1115 smu->workload_setting[3] = PP_SMC_POWER_PROFILE_VIDEO;
1116 smu->workload_setting[4] = PP_SMC_POWER_PROFILE_VR;
1117 smu->workload_setting[5] = PP_SMC_POWER_PROFILE_COMPUTE;
1118 smu->workload_setting[6] = PP_SMC_POWER_PROFILE_CUSTOM;
1119 smu->display_config = &adev->pm.pm_display_cfg;
1121 smu->smu_dpm.dpm_level = AMD_DPM_FORCED_LEVEL_AUTO;
1122 smu->smu_dpm.requested_dpm_level = AMD_DPM_FORCED_LEVEL_AUTO;
1123 ret = smu_init_microcode(smu);
1125 dev_err(adev->dev, "Failed to load smu firmware!\n");
1129 ret = smu_smc_table_sw_init(smu);
1131 dev_err(adev->dev, "Failed to sw init smc table!\n");
1135 ret = smu_register_irq_handler(smu);
1137 dev_err(adev->dev, "Failed to register smc irq handler!\n");
1144 static int smu_sw_fini(void *handle)
1146 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1147 struct smu_context *smu = &adev->smu;
1150 ret = smu_smc_table_sw_fini(smu);
1152 dev_err(adev->dev, "Failed to sw fini smc table!\n");
1156 smu_fini_microcode(smu);
1161 static int smu_smc_hw_setup(struct smu_context *smu)
1163 struct amdgpu_device *adev = smu->adev;
1166 if (smu_is_dpm_running(smu) && adev->in_suspend) {
1167 dev_info(adev->dev, "dpm has been enabled\n");
1171 ret = smu_init_display_count(smu, 0);
1173 dev_info(adev->dev, "Failed to pre-set display count as 0!\n");
1177 ret = smu_set_driver_table_location(smu);
1179 dev_err(adev->dev, "Failed to SetDriverDramAddr!\n");
1184 * Set PMSTATUSLOG table bo address with SetToolsDramAddr MSG for tools.
1186 ret = smu_set_tool_table_location(smu);
1188 dev_err(adev->dev, "Failed to SetToolsDramAddr!\n");
1193 * Use msg SetSystemVirtualDramAddr and DramLogSetDramAddr can notify
1196 ret = smu_notify_memory_pool_location(smu);
1198 dev_err(adev->dev, "Failed to SetDramLogDramAddr!\n");
1202 /* smu_dump_pptable(smu); */
1204 * Copy pptable bo in the vram to smc with SMU MSGs such as
1205 * SetDriverDramAddr and TransferTableDram2Smu.
1207 ret = smu_write_pptable(smu);
1209 dev_err(adev->dev, "Failed to transfer pptable to SMC!\n");
1213 /* issue Run*Btc msg */
1214 ret = smu_run_btc(smu);
1218 ret = smu_feature_set_allowed_mask(smu);
1220 dev_err(adev->dev, "Failed to set driver allowed features mask!\n");
1224 ret = smu_system_features_control(smu, true);
1226 dev_err(adev->dev, "Failed to enable requested dpm features!\n");
1230 if (!smu_is_dpm_running(smu))
1231 dev_info(adev->dev, "dpm has been disabled\n");
1233 ret = smu_override_pcie_parameters(smu);
1237 ret = smu_enable_thermal_alert(smu);
1239 dev_err(adev->dev, "Failed to enable thermal alert!\n");
1243 ret = smu_i2c_eeprom_init(smu, &adev->pm.smu_i2c);
1247 ret = smu_disable_umc_cdr_12gbps_workaround(smu);
1249 dev_err(adev->dev, "Workaround failed to disable UMC CDR feature on 12Gbps SKU!\n");
1254 * For Navi1X, manually switch it to AC mode as PMFW
1255 * may boot it with DC mode.
1257 ret = smu_set_power_source(smu,
1258 adev->pm.ac_power ? SMU_POWER_SOURCE_AC :
1259 SMU_POWER_SOURCE_DC);
1261 dev_err(adev->dev, "Failed to switch to %s mode!\n", adev->pm.ac_power ? "AC" : "DC");
1265 ret = smu_notify_display_change(smu);
1270 * Set min deep sleep dce fclk with bootup value from vbios via
1271 * SetMinDeepSleepDcefclk MSG.
1273 ret = smu_set_min_dcef_deep_sleep(smu);
1280 static int smu_start_smc_engine(struct smu_context *smu)
1282 struct amdgpu_device *adev = smu->adev;
1285 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
1286 if (adev->asic_type < CHIP_NAVI10) {
1287 if (smu->ppt_funcs->load_microcode) {
1288 ret = smu->ppt_funcs->load_microcode(smu);
1295 if (smu->ppt_funcs->check_fw_status) {
1296 ret = smu->ppt_funcs->check_fw_status(smu);
1298 dev_err(adev->dev, "SMC is not ready\n");
1304 * Send msg GetDriverIfVersion to check if the return value is equal
1305 * with DRIVER_IF_VERSION of smc header.
1307 ret = smu_check_fw_version(smu);
1314 static int smu_hw_init(void *handle)
1317 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1318 struct smu_context *smu = &adev->smu;
1320 if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev))
1323 ret = smu_start_smc_engine(smu);
1325 dev_err(adev->dev, "SMC engine is not correctly up!\n");
1330 smu_powergate_sdma(&adev->smu, false);
1331 smu_powergate_vcn(&adev->smu, false);
1332 smu_powergate_jpeg(&adev->smu, false);
1333 smu_set_gfx_cgpg(&adev->smu, true);
1336 if (!smu->pm_enabled)
1339 /* get boot_values from vbios to set revision, gfxclk, and etc. */
1340 ret = smu_get_vbios_bootup_values(smu);
1342 dev_err(adev->dev, "Failed to get VBIOS boot clock values!\n");
1346 ret = smu_setup_pptable(smu);
1348 dev_err(adev->dev, "Failed to setup pptable!\n");
1352 ret = smu_get_driver_allowed_feature_mask(smu);
1356 ret = smu_smc_hw_setup(smu);
1358 dev_err(adev->dev, "Failed to setup smc hw!\n");
1362 adev->pm.dpm_enabled = true;
1364 dev_info(adev->dev, "SMU is initialized successfully!\n");
1369 static int smu_disable_dpms(struct smu_context *smu)
1371 struct amdgpu_device *adev = smu->adev;
1372 uint64_t features_to_disable;
1374 bool use_baco = !smu->is_apu &&
1375 ((adev->in_gpu_reset &&
1376 (amdgpu_asic_reset_method(adev) == AMD_RESET_METHOD_BACO)) ||
1377 ((adev->in_runpm || adev->in_hibernate) && amdgpu_asic_supports_baco(adev)));
1380 * For custom pptable uploading, skip the DPM features
1381 * disable process on Navi1x ASICs.
1382 * - As the gfx related features are under control of
1383 * RLC on those ASICs. RLC reinitialization will be
1384 * needed to reenable them. That will cost much more
1387 * - SMU firmware can handle the DPM reenablement
1390 if (smu->uploading_custom_pp_table &&
1391 (adev->asic_type >= CHIP_NAVI10) &&
1392 (adev->asic_type <= CHIP_NAVI12))
1396 * For Sienna_Cichlid, PMFW will handle the features disablement properly
1397 * on BACO in. Driver involvement is unnecessary.
1399 if ((adev->asic_type == CHIP_SIENNA_CICHLID) &&
1404 * For gpu reset, runpm and hibernation through BACO,
1405 * BACO feature has to be kept enabled.
1407 if (use_baco && smu_feature_is_enabled(smu, SMU_FEATURE_BACO_BIT)) {
1408 features_to_disable = U64_MAX &
1409 ~(1ULL << smu_feature_get_index(smu, SMU_FEATURE_BACO_BIT));
1410 ret = smu_feature_update_enable_state(smu,
1411 features_to_disable,
1414 dev_err(adev->dev, "Failed to disable smu features except BACO.\n");
1416 ret = smu_system_features_control(smu, false);
1418 dev_err(adev->dev, "Failed to disable smu features.\n");
1421 if (adev->asic_type >= CHIP_NAVI10 &&
1422 adev->gfx.rlc.funcs->stop)
1423 adev->gfx.rlc.funcs->stop(adev);
1428 static int smu_smc_hw_cleanup(struct smu_context *smu)
1430 struct amdgpu_device *adev = smu->adev;
1433 smu_i2c_eeprom_fini(smu, &adev->pm.smu_i2c);
1435 cancel_work_sync(&smu->throttling_logging_work);
1437 ret = smu_disable_thermal_alert(smu);
1439 dev_err(adev->dev, "Fail to disable thermal alert!\n");
1443 ret = smu_disable_dpms(smu);
1445 dev_err(adev->dev, "Fail to disable dpm features!\n");
1452 static int smu_hw_fini(void *handle)
1454 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1455 struct smu_context *smu = &adev->smu;
1458 if (amdgpu_sriov_vf(adev)&& !amdgpu_sriov_is_pp_one_vf(adev))
1462 smu_powergate_sdma(&adev->smu, true);
1463 smu_powergate_vcn(&adev->smu, true);
1464 smu_powergate_jpeg(&adev->smu, true);
1467 if (!smu->pm_enabled)
1470 adev->pm.dpm_enabled = false;
1472 ret = smu_smc_hw_cleanup(smu);
1479 int smu_reset(struct smu_context *smu)
1481 struct amdgpu_device *adev = smu->adev;
1484 ret = smu_hw_fini(adev);
1488 ret = smu_hw_init(adev);
1492 ret = smu_late_init(adev);
1497 static int smu_suspend(void *handle)
1499 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1500 struct smu_context *smu = &adev->smu;
1503 if (amdgpu_sriov_vf(adev)&& !amdgpu_sriov_is_pp_one_vf(adev))
1506 if (!smu->pm_enabled)
1509 adev->pm.dpm_enabled = false;
1511 ret = smu_smc_hw_cleanup(smu);
1515 smu->watermarks_bitmap &= ~(WATERMARKS_LOADED);
1518 smu_set_gfx_cgpg(&adev->smu, false);
1523 static int smu_resume(void *handle)
1526 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1527 struct smu_context *smu = &adev->smu;
1529 if (amdgpu_sriov_vf(adev)&& !amdgpu_sriov_is_pp_one_vf(adev))
1532 if (!smu->pm_enabled)
1535 dev_info(adev->dev, "SMU is resuming...\n");
1537 ret = smu_start_smc_engine(smu);
1539 dev_err(adev->dev, "SMC engine is not correctly up!\n");
1543 ret = smu_smc_hw_setup(smu);
1545 dev_err(adev->dev, "Failed to setup smc hw!\n");
1550 smu_set_gfx_cgpg(&adev->smu, true);
1552 smu->disable_uclk_switch = 0;
1554 adev->pm.dpm_enabled = true;
1556 dev_info(adev->dev, "SMU is resumed successfully!\n");
1561 int smu_display_configuration_change(struct smu_context *smu,
1562 const struct amd_pp_display_configuration *display_config)
1565 int num_of_active_display = 0;
1567 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
1570 if (!display_config)
1573 mutex_lock(&smu->mutex);
1575 if (smu->ppt_funcs->set_deep_sleep_dcefclk)
1576 smu->ppt_funcs->set_deep_sleep_dcefclk(smu,
1577 display_config->min_dcef_deep_sleep_set_clk / 100);
1579 for (index = 0; index < display_config->num_path_including_non_display; index++) {
1580 if (display_config->displays[index].controller_id != 0)
1581 num_of_active_display++;
1584 smu_set_active_display_count(smu, num_of_active_display);
1586 smu_store_cc6_data(smu, display_config->cpu_pstate_separation_time,
1587 display_config->cpu_cc6_disable,
1588 display_config->cpu_pstate_disable,
1589 display_config->nb_pstate_switch_disable);
1591 mutex_unlock(&smu->mutex);
1596 static int smu_get_clock_info(struct smu_context *smu,
1597 struct smu_clock_info *clk_info,
1598 enum smu_perf_level_designation designation)
1601 struct smu_performance_level level = {0};
1606 ret = smu_get_perf_level(smu, PERF_LEVEL_ACTIVITY, &level);
1610 clk_info->min_mem_clk = level.memory_clock;
1611 clk_info->min_eng_clk = level.core_clock;
1612 clk_info->min_bus_bandwidth = level.non_local_mem_freq * level.non_local_mem_width;
1614 ret = smu_get_perf_level(smu, designation, &level);
1618 clk_info->min_mem_clk = level.memory_clock;
1619 clk_info->min_eng_clk = level.core_clock;
1620 clk_info->min_bus_bandwidth = level.non_local_mem_freq * level.non_local_mem_width;
1625 int smu_get_current_clocks(struct smu_context *smu,
1626 struct amd_pp_clock_info *clocks)
1628 struct amd_pp_simple_clock_info simple_clocks = {0};
1629 struct smu_clock_info hw_clocks;
1632 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
1635 mutex_lock(&smu->mutex);
1637 smu_get_dal_power_level(smu, &simple_clocks);
1639 if (smu->support_power_containment)
1640 ret = smu_get_clock_info(smu, &hw_clocks,
1641 PERF_LEVEL_POWER_CONTAINMENT);
1643 ret = smu_get_clock_info(smu, &hw_clocks, PERF_LEVEL_ACTIVITY);
1646 dev_err(smu->adev->dev, "Error in smu_get_clock_info\n");
1650 clocks->min_engine_clock = hw_clocks.min_eng_clk;
1651 clocks->max_engine_clock = hw_clocks.max_eng_clk;
1652 clocks->min_memory_clock = hw_clocks.min_mem_clk;
1653 clocks->max_memory_clock = hw_clocks.max_mem_clk;
1654 clocks->min_bus_bandwidth = hw_clocks.min_bus_bandwidth;
1655 clocks->max_bus_bandwidth = hw_clocks.max_bus_bandwidth;
1656 clocks->max_engine_clock_in_sr = hw_clocks.max_eng_clk;
1657 clocks->min_engine_clock_in_sr = hw_clocks.min_eng_clk;
1659 if (simple_clocks.level == 0)
1660 clocks->max_clocks_state = PP_DAL_POWERLEVEL_7;
1662 clocks->max_clocks_state = simple_clocks.level;
1664 if (!smu_get_current_shallow_sleep_clocks(smu, &hw_clocks)) {
1665 clocks->max_engine_clock_in_sr = hw_clocks.max_eng_clk;
1666 clocks->min_engine_clock_in_sr = hw_clocks.min_eng_clk;
1670 mutex_unlock(&smu->mutex);
1674 static int smu_set_clockgating_state(void *handle,
1675 enum amd_clockgating_state state)
1680 static int smu_set_powergating_state(void *handle,
1681 enum amd_powergating_state state)
1686 static int smu_enable_umd_pstate(void *handle,
1687 enum amd_dpm_forced_level *level)
1689 uint32_t profile_mode_mask = AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD |
1690 AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK |
1691 AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK |
1692 AMD_DPM_FORCED_LEVEL_PROFILE_PEAK;
1694 struct smu_context *smu = (struct smu_context*)(handle);
1695 struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
1697 if (!smu->is_apu && !smu_dpm_ctx->dpm_context)
1700 if (!(smu_dpm_ctx->dpm_level & profile_mode_mask)) {
1701 /* enter umd pstate, save current level, disable gfx cg*/
1702 if (*level & profile_mode_mask) {
1703 smu_dpm_ctx->saved_dpm_level = smu_dpm_ctx->dpm_level;
1704 smu_dpm_ctx->enable_umd_pstate = true;
1705 amdgpu_device_ip_set_powergating_state(smu->adev,
1706 AMD_IP_BLOCK_TYPE_GFX,
1707 AMD_PG_STATE_UNGATE);
1708 amdgpu_device_ip_set_clockgating_state(smu->adev,
1709 AMD_IP_BLOCK_TYPE_GFX,
1710 AMD_CG_STATE_UNGATE);
1713 /* exit umd pstate, restore level, enable gfx cg*/
1714 if (!(*level & profile_mode_mask)) {
1715 if (*level == AMD_DPM_FORCED_LEVEL_PROFILE_EXIT)
1716 *level = smu_dpm_ctx->saved_dpm_level;
1717 smu_dpm_ctx->enable_umd_pstate = false;
1718 amdgpu_device_ip_set_clockgating_state(smu->adev,
1719 AMD_IP_BLOCK_TYPE_GFX,
1721 amdgpu_device_ip_set_powergating_state(smu->adev,
1722 AMD_IP_BLOCK_TYPE_GFX,
1730 int smu_adjust_power_state_dynamic(struct smu_context *smu,
1731 enum amd_dpm_forced_level level,
1732 bool skip_display_settings)
1737 struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
1739 if (!skip_display_settings) {
1740 ret = smu_display_config_changed(smu);
1742 dev_err(smu->adev->dev, "Failed to change display config!");
1747 ret = smu_apply_clocks_adjust_rules(smu);
1749 dev_err(smu->adev->dev, "Failed to apply clocks adjust rules!");
1753 if (!skip_display_settings) {
1754 ret = smu_notify_smc_display_config(smu);
1756 dev_err(smu->adev->dev, "Failed to notify smc display config!");
1761 if (smu_dpm_ctx->dpm_level != level) {
1762 ret = smu_asic_set_performance_level(smu, level);
1764 dev_err(smu->adev->dev, "Failed to set performance level!");
1768 /* update the saved copy */
1769 smu_dpm_ctx->dpm_level = level;
1772 if (smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL) {
1773 index = fls(smu->workload_mask);
1774 index = index > 0 && index <= WORKLOAD_POLICY_MAX ? index - 1 : 0;
1775 workload = smu->workload_setting[index];
1777 if (smu->power_profile_mode != workload)
1778 smu_set_power_profile_mode(smu, &workload, 0, false);
1784 int smu_handle_task(struct smu_context *smu,
1785 enum amd_dpm_forced_level level,
1786 enum amd_pp_task task_id,
1791 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
1795 mutex_lock(&smu->mutex);
1798 case AMD_PP_TASK_DISPLAY_CONFIG_CHANGE:
1799 ret = smu_pre_display_config_changed(smu);
1802 ret = smu_set_cpu_power_state(smu);
1805 ret = smu_adjust_power_state_dynamic(smu, level, false);
1807 case AMD_PP_TASK_COMPLETE_INIT:
1808 case AMD_PP_TASK_READJUST_POWER_STATE:
1809 ret = smu_adjust_power_state_dynamic(smu, level, true);
1817 mutex_unlock(&smu->mutex);
1822 int smu_switch_power_profile(struct smu_context *smu,
1823 enum PP_SMC_POWER_PROFILE type,
1826 struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
1830 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
1833 if (!(type < PP_SMC_POWER_PROFILE_CUSTOM))
1836 mutex_lock(&smu->mutex);
1839 smu->workload_mask &= ~(1 << smu->workload_prority[type]);
1840 index = fls(smu->workload_mask);
1841 index = index > 0 && index <= WORKLOAD_POLICY_MAX ? index - 1 : 0;
1842 workload = smu->workload_setting[index];
1844 smu->workload_mask |= (1 << smu->workload_prority[type]);
1845 index = fls(smu->workload_mask);
1846 index = index <= WORKLOAD_POLICY_MAX ? index - 1 : 0;
1847 workload = smu->workload_setting[index];
1850 if (smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL)
1851 smu_set_power_profile_mode(smu, &workload, 0, false);
1853 mutex_unlock(&smu->mutex);
1858 enum amd_dpm_forced_level smu_get_performance_level(struct smu_context *smu)
1860 struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
1861 enum amd_dpm_forced_level level;
1863 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
1866 if (!smu->is_apu && !smu_dpm_ctx->dpm_context)
1869 mutex_lock(&(smu->mutex));
1870 level = smu_dpm_ctx->dpm_level;
1871 mutex_unlock(&(smu->mutex));
1876 int smu_force_performance_level(struct smu_context *smu, enum amd_dpm_forced_level level)
1878 struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
1881 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
1884 if (!smu->is_apu && !smu_dpm_ctx->dpm_context)
1887 mutex_lock(&smu->mutex);
1889 ret = smu_enable_umd_pstate(smu, &level);
1891 mutex_unlock(&smu->mutex);
1895 ret = smu_handle_task(smu, level,
1896 AMD_PP_TASK_READJUST_POWER_STATE,
1899 mutex_unlock(&smu->mutex);
1904 int smu_set_display_count(struct smu_context *smu, uint32_t count)
1908 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
1911 mutex_lock(&smu->mutex);
1912 ret = smu_init_display_count(smu, count);
1913 mutex_unlock(&smu->mutex);
1918 int smu_force_clk_levels(struct smu_context *smu,
1919 enum smu_clk_type clk_type,
1923 struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
1926 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
1929 if (smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL) {
1930 dev_dbg(smu->adev->dev, "force clock level is for dpm manual mode only.\n");
1935 mutex_lock(&smu->mutex);
1937 if (smu->ppt_funcs && smu->ppt_funcs->force_clk_levels)
1938 ret = smu->ppt_funcs->force_clk_levels(smu, clk_type, mask);
1941 mutex_unlock(&smu->mutex);
1947 * On system suspending or resetting, the dpm_enabled
1948 * flag will be cleared. So that those SMU services which
1949 * are not supported will be gated.
1950 * However, the mp1 state setting should still be granted
1951 * even if the dpm_enabled cleared.
1953 int smu_set_mp1_state(struct smu_context *smu,
1954 enum pp_mp1_state mp1_state)
1959 if (!smu->pm_enabled)
1962 mutex_lock(&smu->mutex);
1964 switch (mp1_state) {
1965 case PP_MP1_STATE_SHUTDOWN:
1966 msg = SMU_MSG_PrepareMp1ForShutdown;
1968 case PP_MP1_STATE_UNLOAD:
1969 msg = SMU_MSG_PrepareMp1ForUnload;
1971 case PP_MP1_STATE_RESET:
1972 msg = SMU_MSG_PrepareMp1ForReset;
1974 case PP_MP1_STATE_NONE:
1976 mutex_unlock(&smu->mutex);
1980 /* some asics may not support those messages */
1981 if (smu_msg_get_index(smu, msg) < 0) {
1982 mutex_unlock(&smu->mutex);
1986 ret = smu_send_smc_msg(smu, msg, NULL);
1988 dev_err(smu->adev->dev, "[PrepareMp1] Failed!\n");
1990 mutex_unlock(&smu->mutex);
1995 int smu_set_df_cstate(struct smu_context *smu,
1996 enum pp_df_cstate state)
2000 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2003 if (!smu->ppt_funcs || !smu->ppt_funcs->set_df_cstate)
2006 mutex_lock(&smu->mutex);
2008 ret = smu->ppt_funcs->set_df_cstate(smu, state);
2010 dev_err(smu->adev->dev, "[SetDfCstate] failed!\n");
2012 mutex_unlock(&smu->mutex);
2017 int smu_allow_xgmi_power_down(struct smu_context *smu, bool en)
2021 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2024 if (!smu->ppt_funcs || !smu->ppt_funcs->allow_xgmi_power_down)
2027 mutex_lock(&smu->mutex);
2029 ret = smu->ppt_funcs->allow_xgmi_power_down(smu, en);
2031 dev_err(smu->adev->dev, "[AllowXgmiPowerDown] failed!\n");
2033 mutex_unlock(&smu->mutex);
2038 int smu_write_watermarks_table(struct smu_context *smu)
2040 void *watermarks_table = smu->smu_table.watermarks_table;
2042 if (!watermarks_table)
2045 return smu_update_table(smu,
2046 SMU_TABLE_WATERMARKS,
2052 int smu_set_watermarks_for_clock_ranges(struct smu_context *smu,
2053 struct dm_pp_wm_sets_with_clock_ranges_soc15 *clock_ranges)
2055 void *table = smu->smu_table.watermarks_table;
2057 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2063 mutex_lock(&smu->mutex);
2065 if (!smu->disable_watermark &&
2066 smu_feature_is_enabled(smu, SMU_FEATURE_DPM_DCEFCLK_BIT) &&
2067 smu_feature_is_enabled(smu, SMU_FEATURE_DPM_SOCCLK_BIT)) {
2068 smu_set_watermarks_table(smu, table, clock_ranges);
2070 if (!(smu->watermarks_bitmap & WATERMARKS_EXIST)) {
2071 smu->watermarks_bitmap |= WATERMARKS_EXIST;
2072 smu->watermarks_bitmap &= ~WATERMARKS_LOADED;
2076 mutex_unlock(&smu->mutex);
2081 int smu_set_ac_dc(struct smu_context *smu)
2085 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2088 /* controlled by firmware */
2089 if (smu->dc_controlled_by_gpio)
2092 mutex_lock(&smu->mutex);
2093 ret = smu_set_power_source(smu,
2094 smu->adev->pm.ac_power ? SMU_POWER_SOURCE_AC :
2095 SMU_POWER_SOURCE_DC);
2097 dev_err(smu->adev->dev, "Failed to switch to %s mode!\n",
2098 smu->adev->pm.ac_power ? "AC" : "DC");
2099 mutex_unlock(&smu->mutex);
2104 const struct amd_ip_funcs smu_ip_funcs = {
2106 .early_init = smu_early_init,
2107 .late_init = smu_late_init,
2108 .sw_init = smu_sw_init,
2109 .sw_fini = smu_sw_fini,
2110 .hw_init = smu_hw_init,
2111 .hw_fini = smu_hw_fini,
2112 .suspend = smu_suspend,
2113 .resume = smu_resume,
2115 .check_soft_reset = NULL,
2116 .wait_for_idle = NULL,
2118 .set_clockgating_state = smu_set_clockgating_state,
2119 .set_powergating_state = smu_set_powergating_state,
2120 .enable_umd_pstate = smu_enable_umd_pstate,
2123 const struct amdgpu_ip_block_version smu_v11_0_ip_block =
2125 .type = AMD_IP_BLOCK_TYPE_SMC,
2129 .funcs = &smu_ip_funcs,
2132 const struct amdgpu_ip_block_version smu_v12_0_ip_block =
2134 .type = AMD_IP_BLOCK_TYPE_SMC,
2138 .funcs = &smu_ip_funcs,
2141 int smu_load_microcode(struct smu_context *smu)
2145 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2148 mutex_lock(&smu->mutex);
2150 if (smu->ppt_funcs->load_microcode)
2151 ret = smu->ppt_funcs->load_microcode(smu);
2153 mutex_unlock(&smu->mutex);
2158 int smu_check_fw_status(struct smu_context *smu)
2162 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2165 mutex_lock(&smu->mutex);
2167 if (smu->ppt_funcs->check_fw_status)
2168 ret = smu->ppt_funcs->check_fw_status(smu);
2170 mutex_unlock(&smu->mutex);
2175 int smu_set_gfx_cgpg(struct smu_context *smu, bool enabled)
2179 mutex_lock(&smu->mutex);
2181 if (smu->ppt_funcs->set_gfx_cgpg)
2182 ret = smu->ppt_funcs->set_gfx_cgpg(smu, enabled);
2184 mutex_unlock(&smu->mutex);
2189 int smu_set_fan_speed_rpm(struct smu_context *smu, uint32_t speed)
2193 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2196 mutex_lock(&smu->mutex);
2198 if (smu->ppt_funcs->set_fan_speed_rpm)
2199 ret = smu->ppt_funcs->set_fan_speed_rpm(smu, speed);
2201 mutex_unlock(&smu->mutex);
2206 int smu_get_power_limit(struct smu_context *smu,
2214 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2217 mutex_lock(&smu->mutex);
2220 if (smu->ppt_funcs->get_power_limit)
2221 ret = smu->ppt_funcs->get_power_limit(smu, limit, def);
2224 mutex_unlock(&smu->mutex);
2229 int smu_set_power_limit(struct smu_context *smu, uint32_t limit)
2233 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2236 mutex_lock(&smu->mutex);
2238 if (smu->ppt_funcs->set_power_limit)
2239 ret = smu->ppt_funcs->set_power_limit(smu, limit);
2241 mutex_unlock(&smu->mutex);
2246 int smu_print_clk_levels(struct smu_context *smu, enum smu_clk_type clk_type, char *buf)
2250 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2253 mutex_lock(&smu->mutex);
2255 if (smu->ppt_funcs->print_clk_levels)
2256 ret = smu->ppt_funcs->print_clk_levels(smu, clk_type, buf);
2258 mutex_unlock(&smu->mutex);
2263 int smu_get_od_percentage(struct smu_context *smu, enum smu_clk_type type)
2267 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2270 mutex_lock(&smu->mutex);
2272 if (smu->ppt_funcs->get_od_percentage)
2273 ret = smu->ppt_funcs->get_od_percentage(smu, type);
2275 mutex_unlock(&smu->mutex);
2280 int smu_set_od_percentage(struct smu_context *smu, enum smu_clk_type type, uint32_t value)
2284 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2287 mutex_lock(&smu->mutex);
2289 if (smu->ppt_funcs->set_od_percentage)
2290 ret = smu->ppt_funcs->set_od_percentage(smu, type, value);
2292 mutex_unlock(&smu->mutex);
2297 int smu_od_edit_dpm_table(struct smu_context *smu,
2298 enum PP_OD_DPM_TABLE_COMMAND type,
2299 long *input, uint32_t size)
2303 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2306 mutex_lock(&smu->mutex);
2308 if (smu->ppt_funcs->od_edit_dpm_table)
2309 ret = smu->ppt_funcs->od_edit_dpm_table(smu, type, input, size);
2311 mutex_unlock(&smu->mutex);
2316 int smu_read_sensor(struct smu_context *smu,
2317 enum amd_pp_sensors sensor,
2318 void *data, uint32_t *size)
2322 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2325 mutex_lock(&smu->mutex);
2327 if (smu->ppt_funcs->read_sensor)
2328 ret = smu->ppt_funcs->read_sensor(smu, sensor, data, size);
2330 mutex_unlock(&smu->mutex);
2335 int smu_get_power_profile_mode(struct smu_context *smu, char *buf)
2339 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2342 mutex_lock(&smu->mutex);
2344 if (smu->ppt_funcs->get_power_profile_mode)
2345 ret = smu->ppt_funcs->get_power_profile_mode(smu, buf);
2347 mutex_unlock(&smu->mutex);
2352 int smu_set_power_profile_mode(struct smu_context *smu,
2354 uint32_t param_size,
2359 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2363 mutex_lock(&smu->mutex);
2365 if (smu->ppt_funcs->set_power_profile_mode)
2366 ret = smu->ppt_funcs->set_power_profile_mode(smu, param, param_size);
2369 mutex_unlock(&smu->mutex);
2375 int smu_get_fan_control_mode(struct smu_context *smu)
2379 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2382 mutex_lock(&smu->mutex);
2384 if (smu->ppt_funcs->get_fan_control_mode)
2385 ret = smu->ppt_funcs->get_fan_control_mode(smu);
2387 mutex_unlock(&smu->mutex);
2392 int smu_set_fan_control_mode(struct smu_context *smu, int value)
2396 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2399 mutex_lock(&smu->mutex);
2401 if (smu->ppt_funcs->set_fan_control_mode)
2402 ret = smu->ppt_funcs->set_fan_control_mode(smu, value);
2404 mutex_unlock(&smu->mutex);
2409 int smu_get_fan_speed_percent(struct smu_context *smu, uint32_t *speed)
2413 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2416 mutex_lock(&smu->mutex);
2418 if (smu->ppt_funcs->get_fan_speed_percent)
2419 ret = smu->ppt_funcs->get_fan_speed_percent(smu, speed);
2421 mutex_unlock(&smu->mutex);
2426 int smu_set_fan_speed_percent(struct smu_context *smu, uint32_t speed)
2430 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2433 mutex_lock(&smu->mutex);
2435 if (smu->ppt_funcs->set_fan_speed_percent)
2436 ret = smu->ppt_funcs->set_fan_speed_percent(smu, speed);
2438 mutex_unlock(&smu->mutex);
2443 int smu_get_fan_speed_rpm(struct smu_context *smu, uint32_t *speed)
2447 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2450 mutex_lock(&smu->mutex);
2452 if (smu->ppt_funcs->get_fan_speed_rpm)
2453 ret = smu->ppt_funcs->get_fan_speed_rpm(smu, speed);
2455 mutex_unlock(&smu->mutex);
2460 int smu_set_deep_sleep_dcefclk(struct smu_context *smu, int clk)
2464 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2467 mutex_lock(&smu->mutex);
2469 if (smu->ppt_funcs->set_deep_sleep_dcefclk)
2470 ret = smu->ppt_funcs->set_deep_sleep_dcefclk(smu, clk);
2472 mutex_unlock(&smu->mutex);
2477 int smu_set_active_display_count(struct smu_context *smu, uint32_t count)
2481 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2484 if (smu->ppt_funcs->set_active_display_count)
2485 ret = smu->ppt_funcs->set_active_display_count(smu, count);
2490 int smu_get_clock_by_type(struct smu_context *smu,
2491 enum amd_pp_clock_type type,
2492 struct amd_pp_clocks *clocks)
2496 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2499 mutex_lock(&smu->mutex);
2501 if (smu->ppt_funcs->get_clock_by_type)
2502 ret = smu->ppt_funcs->get_clock_by_type(smu, type, clocks);
2504 mutex_unlock(&smu->mutex);
2509 int smu_get_max_high_clocks(struct smu_context *smu,
2510 struct amd_pp_simple_clock_info *clocks)
2514 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2517 mutex_lock(&smu->mutex);
2519 if (smu->ppt_funcs->get_max_high_clocks)
2520 ret = smu->ppt_funcs->get_max_high_clocks(smu, clocks);
2522 mutex_unlock(&smu->mutex);
2527 int smu_get_clock_by_type_with_latency(struct smu_context *smu,
2528 enum smu_clk_type clk_type,
2529 struct pp_clock_levels_with_latency *clocks)
2533 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2536 mutex_lock(&smu->mutex);
2538 if (smu->ppt_funcs->get_clock_by_type_with_latency)
2539 ret = smu->ppt_funcs->get_clock_by_type_with_latency(smu, clk_type, clocks);
2541 mutex_unlock(&smu->mutex);
2546 int smu_get_clock_by_type_with_voltage(struct smu_context *smu,
2547 enum amd_pp_clock_type type,
2548 struct pp_clock_levels_with_voltage *clocks)
2552 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2555 mutex_lock(&smu->mutex);
2557 if (smu->ppt_funcs->get_clock_by_type_with_voltage)
2558 ret = smu->ppt_funcs->get_clock_by_type_with_voltage(smu, type, clocks);
2560 mutex_unlock(&smu->mutex);
2566 int smu_display_clock_voltage_request(struct smu_context *smu,
2567 struct pp_display_clock_request *clock_req)
2571 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2574 mutex_lock(&smu->mutex);
2576 if (smu->ppt_funcs->display_clock_voltage_request)
2577 ret = smu->ppt_funcs->display_clock_voltage_request(smu, clock_req);
2579 mutex_unlock(&smu->mutex);
2585 int smu_display_disable_memory_clock_switch(struct smu_context *smu, bool disable_memory_clock_switch)
2589 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2592 mutex_lock(&smu->mutex);
2594 if (smu->ppt_funcs->display_disable_memory_clock_switch)
2595 ret = smu->ppt_funcs->display_disable_memory_clock_switch(smu, disable_memory_clock_switch);
2597 mutex_unlock(&smu->mutex);
2602 int smu_notify_smu_enable_pwe(struct smu_context *smu)
2606 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2609 mutex_lock(&smu->mutex);
2611 if (smu->ppt_funcs->notify_smu_enable_pwe)
2612 ret = smu->ppt_funcs->notify_smu_enable_pwe(smu);
2614 mutex_unlock(&smu->mutex);
2619 int smu_set_xgmi_pstate(struct smu_context *smu,
2624 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2627 mutex_lock(&smu->mutex);
2629 if (smu->ppt_funcs->set_xgmi_pstate)
2630 ret = smu->ppt_funcs->set_xgmi_pstate(smu, pstate);
2632 mutex_unlock(&smu->mutex);
2635 dev_err(smu->adev->dev, "Failed to set XGMI pstate!\n");
2640 int smu_set_azalia_d3_pme(struct smu_context *smu)
2644 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2647 mutex_lock(&smu->mutex);
2649 if (smu->ppt_funcs->set_azalia_d3_pme)
2650 ret = smu->ppt_funcs->set_azalia_d3_pme(smu);
2652 mutex_unlock(&smu->mutex);
2658 * On system suspending or resetting, the dpm_enabled
2659 * flag will be cleared. So that those SMU services which
2660 * are not supported will be gated.
2662 * However, the baco/mode1 reset should still be granted
2663 * as they are still supported and necessary.
2665 bool smu_baco_is_support(struct smu_context *smu)
2669 if (!smu->pm_enabled)
2672 mutex_lock(&smu->mutex);
2674 if (smu->ppt_funcs && smu->ppt_funcs->baco_is_support)
2675 ret = smu->ppt_funcs->baco_is_support(smu);
2677 mutex_unlock(&smu->mutex);
2682 int smu_baco_get_state(struct smu_context *smu, enum smu_baco_state *state)
2684 if (smu->ppt_funcs->baco_get_state)
2687 mutex_lock(&smu->mutex);
2688 *state = smu->ppt_funcs->baco_get_state(smu);
2689 mutex_unlock(&smu->mutex);
2694 int smu_baco_enter(struct smu_context *smu)
2698 if (!smu->pm_enabled)
2701 mutex_lock(&smu->mutex);
2703 if (smu->ppt_funcs->baco_enter)
2704 ret = smu->ppt_funcs->baco_enter(smu);
2706 mutex_unlock(&smu->mutex);
2709 dev_err(smu->adev->dev, "Failed to enter BACO state!\n");
2714 int smu_baco_exit(struct smu_context *smu)
2718 if (!smu->pm_enabled)
2721 mutex_lock(&smu->mutex);
2723 if (smu->ppt_funcs->baco_exit)
2724 ret = smu->ppt_funcs->baco_exit(smu);
2726 mutex_unlock(&smu->mutex);
2729 dev_err(smu->adev->dev, "Failed to exit BACO state!\n");
2734 int smu_mode2_reset(struct smu_context *smu)
2738 if (!smu->pm_enabled)
2741 mutex_lock(&smu->mutex);
2743 if (smu->ppt_funcs->mode2_reset)
2744 ret = smu->ppt_funcs->mode2_reset(smu);
2746 mutex_unlock(&smu->mutex);
2749 dev_err(smu->adev->dev, "Mode2 reset failed!\n");
2754 int smu_get_max_sustainable_clocks_by_dc(struct smu_context *smu,
2755 struct pp_smu_nv_clock_table *max_clocks)
2759 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2762 mutex_lock(&smu->mutex);
2764 if (smu->ppt_funcs->get_max_sustainable_clocks_by_dc)
2765 ret = smu->ppt_funcs->get_max_sustainable_clocks_by_dc(smu, max_clocks);
2767 mutex_unlock(&smu->mutex);
2772 int smu_get_uclk_dpm_states(struct smu_context *smu,
2773 unsigned int *clock_values_in_khz,
2774 unsigned int *num_states)
2778 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2781 mutex_lock(&smu->mutex);
2783 if (smu->ppt_funcs->get_uclk_dpm_states)
2784 ret = smu->ppt_funcs->get_uclk_dpm_states(smu, clock_values_in_khz, num_states);
2786 mutex_unlock(&smu->mutex);
2791 enum amd_pm_state_type smu_get_current_power_state(struct smu_context *smu)
2793 enum amd_pm_state_type pm_state = POWER_STATE_TYPE_DEFAULT;
2795 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2798 mutex_lock(&smu->mutex);
2800 if (smu->ppt_funcs->get_current_power_state)
2801 pm_state = smu->ppt_funcs->get_current_power_state(smu);
2803 mutex_unlock(&smu->mutex);
2808 int smu_get_dpm_clock_table(struct smu_context *smu,
2809 struct dpm_clocks *clock_table)
2813 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2816 mutex_lock(&smu->mutex);
2818 if (smu->ppt_funcs->get_dpm_clock_table)
2819 ret = smu->ppt_funcs->get_dpm_clock_table(smu, clock_table);
2821 mutex_unlock(&smu->mutex);
2826 uint32_t smu_get_pptable_power_limit(struct smu_context *smu)
2830 if (smu->ppt_funcs->get_pptable_power_limit)
2831 ret = smu->ppt_funcs->get_pptable_power_limit(smu);
2836 int smu_powergate_vcn(struct smu_context *smu, bool gate)
2841 return smu_dpm_set_uvd_enable(smu, !gate);
2844 int smu_powergate_jpeg(struct smu_context *smu, bool gate)
2849 return smu_dpm_set_jpeg_enable(smu, !gate);