2 * Copyright 2019 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
23 #include <linux/firmware.h>
24 #include <linux/pci.h>
27 #include "amdgpu_smu.h"
28 #include "smu_internal.h"
29 #include "smu_v11_0.h"
30 #include "smu_v12_0.h"
32 #include "arcturus_ppt.h"
33 #include "navi10_ppt.h"
34 #include "sienna_cichlid_ppt.h"
35 #include "renoir_ppt.h"
37 #undef __SMU_DUMMY_MAP
38 #define __SMU_DUMMY_MAP(type) #type
39 static const char* __smu_message_names[] = {
43 const char *smu_get_message_name(struct smu_context *smu, enum smu_message_type type)
45 if (type < 0 || type >= SMU_MSG_MAX_COUNT)
46 return "unknown smu message";
47 return __smu_message_names[type];
50 #undef __SMU_DUMMY_MAP
51 #define __SMU_DUMMY_MAP(fea) #fea
52 static const char* __smu_feature_names[] = {
56 const char *smu_get_feature_name(struct smu_context *smu, enum smu_feature_mask feature)
58 if (feature < 0 || feature >= SMU_FEATURE_COUNT)
59 return "unknown smu feature";
60 return __smu_feature_names[feature];
63 size_t smu_sys_get_pp_feature_mask(struct smu_context *smu, char *buf)
67 uint32_t feature_mask[2] = { 0 };
68 int32_t feature_index = 0;
70 uint32_t sort_feature[SMU_FEATURE_COUNT];
71 uint64_t hw_feature_count = 0;
73 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
76 mutex_lock(&smu->mutex);
78 ret = smu_feature_get_enabled_mask(smu, feature_mask, 2);
82 size = sprintf(buf + size, "features high: 0x%08x low: 0x%08x\n",
83 feature_mask[1], feature_mask[0]);
85 for (i = 0; i < SMU_FEATURE_COUNT; i++) {
86 feature_index = smu_feature_get_index(smu, i);
87 if (feature_index < 0)
89 sort_feature[feature_index] = i;
93 for (i = 0; i < hw_feature_count; i++) {
94 size += sprintf(buf + size, "%02d. %-20s (%2d) : %s\n",
96 smu_get_feature_name(smu, sort_feature[i]),
98 !!smu_feature_is_enabled(smu, sort_feature[i]) ?
99 "enabled" : "disabled");
103 mutex_unlock(&smu->mutex);
108 static int smu_feature_update_enable_state(struct smu_context *smu,
109 uint64_t feature_mask,
112 struct smu_feature *feature = &smu->smu_feature;
116 ret = smu_send_smc_msg_with_param(smu,
117 SMU_MSG_EnableSmuFeaturesLow,
118 lower_32_bits(feature_mask),
122 ret = smu_send_smc_msg_with_param(smu,
123 SMU_MSG_EnableSmuFeaturesHigh,
124 upper_32_bits(feature_mask),
129 ret = smu_send_smc_msg_with_param(smu,
130 SMU_MSG_DisableSmuFeaturesLow,
131 lower_32_bits(feature_mask),
135 ret = smu_send_smc_msg_with_param(smu,
136 SMU_MSG_DisableSmuFeaturesHigh,
137 upper_32_bits(feature_mask),
143 mutex_lock(&feature->mutex);
145 bitmap_or(feature->enabled, feature->enabled,
146 (unsigned long *)(&feature_mask), SMU_FEATURE_MAX);
148 bitmap_andnot(feature->enabled, feature->enabled,
149 (unsigned long *)(&feature_mask), SMU_FEATURE_MAX);
150 mutex_unlock(&feature->mutex);
155 int smu_sys_set_pp_feature_mask(struct smu_context *smu, uint64_t new_mask)
158 uint32_t feature_mask[2] = { 0 };
159 uint64_t feature_2_enabled = 0;
160 uint64_t feature_2_disabled = 0;
161 uint64_t feature_enables = 0;
163 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
166 mutex_lock(&smu->mutex);
168 ret = smu_feature_get_enabled_mask(smu, feature_mask, 2);
172 feature_enables = ((uint64_t)feature_mask[1] << 32 | (uint64_t)feature_mask[0]);
174 feature_2_enabled = ~feature_enables & new_mask;
175 feature_2_disabled = feature_enables & ~new_mask;
177 if (feature_2_enabled) {
178 ret = smu_feature_update_enable_state(smu, feature_2_enabled, true);
182 if (feature_2_disabled) {
183 ret = smu_feature_update_enable_state(smu, feature_2_disabled, false);
189 mutex_unlock(&smu->mutex);
194 int smu_get_smc_version(struct smu_context *smu, uint32_t *if_version, uint32_t *smu_version)
198 if (!if_version && !smu_version)
201 if (smu->smc_fw_if_version && smu->smc_fw_version)
204 *if_version = smu->smc_fw_if_version;
207 *smu_version = smu->smc_fw_version;
213 ret = smu_send_smc_msg(smu, SMU_MSG_GetDriverIfVersion, if_version);
217 smu->smc_fw_if_version = *if_version;
221 ret = smu_send_smc_msg(smu, SMU_MSG_GetSmuVersion, smu_version);
225 smu->smc_fw_version = *smu_version;
231 int smu_set_soft_freq_range(struct smu_context *smu, enum smu_clk_type clk_type,
232 uint32_t min, uint32_t max, bool lock_needed)
236 if (!smu_clk_dpm_is_enabled(smu, clk_type))
240 mutex_lock(&smu->mutex);
241 ret = smu_set_soft_freq_limited_range(smu, clk_type, min, max);
243 mutex_unlock(&smu->mutex);
248 int smu_set_hard_freq_range(struct smu_context *smu, enum smu_clk_type clk_type,
249 uint32_t min, uint32_t max)
251 int ret = 0, clk_id = 0;
254 if (min <= 0 && max <= 0)
257 if (!smu_clk_dpm_is_enabled(smu, clk_type))
260 clk_id = smu_clk_get_index(smu, clk_type);
265 param = (uint32_t)((clk_id << 16) | (max & 0xffff));
266 ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetHardMaxByFreq,
273 param = (uint32_t)((clk_id << 16) | (min & 0xffff));
274 ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinByFreq,
284 int smu_get_dpm_freq_range(struct smu_context *smu, enum smu_clk_type clk_type,
285 uint32_t *min, uint32_t *max, bool lock_needed)
287 uint32_t clock_limit;
294 mutex_lock(&smu->mutex);
296 if (!smu_clk_dpm_is_enabled(smu, clk_type)) {
300 clock_limit = smu->smu_table.boot_values.uclk;
304 clock_limit = smu->smu_table.boot_values.gfxclk;
307 clock_limit = smu->smu_table.boot_values.socclk;
314 /* clock in Mhz unit */
316 *min = clock_limit / 100;
318 *max = clock_limit / 100;
321 * Todo: Use each asic(ASIC_ppt funcs) control the callbacks exposed to the
322 * core driver and then have helpers for stuff that is common(SMU_v11_x | SMU_v12_x funcs).
324 ret = smu_get_dpm_ultimate_freq(smu, clk_type, min, max);
328 mutex_unlock(&smu->mutex);
333 int smu_get_dpm_freq_by_index(struct smu_context *smu, enum smu_clk_type clk_type,
334 uint16_t level, uint32_t *value)
336 int ret = 0, clk_id = 0;
342 if (!smu_clk_dpm_is_enabled(smu, clk_type))
345 clk_id = smu_clk_get_index(smu, clk_type);
349 param = (uint32_t)(((clk_id & 0xffff) << 16) | (level & 0xffff));
351 ret = smu_send_smc_msg_with_param(smu, SMU_MSG_GetDpmFreqByIndex,
356 /* BIT31: 0 - Fine grained DPM, 1 - Dicrete DPM
357 * now, we un-support it */
358 *value = *value & 0x7fffffff;
363 int smu_get_dpm_level_count(struct smu_context *smu, enum smu_clk_type clk_type,
366 return smu_get_dpm_freq_by_index(smu, clk_type, 0xff, value);
369 int smu_get_dpm_level_range(struct smu_context *smu, enum smu_clk_type clk_type,
370 uint32_t *min_value, uint32_t *max_value)
373 uint32_t level_count = 0;
375 if (!min_value && !max_value)
379 /* by default, level 0 clock value as min value */
380 ret = smu_get_dpm_freq_by_index(smu, clk_type, 0, min_value);
386 ret = smu_get_dpm_level_count(smu, clk_type, &level_count);
390 ret = smu_get_dpm_freq_by_index(smu, clk_type, level_count - 1, max_value);
398 bool smu_clk_dpm_is_enabled(struct smu_context *smu, enum smu_clk_type clk_type)
400 enum smu_feature_mask feature_id = 0;
405 feature_id = SMU_FEATURE_DPM_UCLK_BIT;
409 feature_id = SMU_FEATURE_DPM_GFXCLK_BIT;
412 feature_id = SMU_FEATURE_DPM_SOCCLK_BIT;
418 if(!smu_feature_is_enabled(smu, feature_id)) {
426 * smu_dpm_set_power_gate - power gate/ungate the specific IP block
428 * @smu: smu_context pointer
429 * @block_type: the IP block to power gate/ungate
430 * @gate: to power gate if true, ungate otherwise
432 * This API uses no smu->mutex lock protection due to:
433 * 1. It is either called by other IP block(gfx/sdma/vcn/uvd/vce).
434 * This is guarded to be race condition free by the caller.
435 * 2. Or get called on user setting request of power_dpm_force_performance_level.
436 * Under this case, the smu->mutex lock protection is already enforced on
437 * the parent API smu_force_performance_level of the call path.
439 int smu_dpm_set_power_gate(struct smu_context *smu, uint32_t block_type,
444 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
447 switch (block_type) {
448 case AMD_IP_BLOCK_TYPE_UVD:
449 ret = smu_dpm_set_uvd_enable(smu, !gate);
451 case AMD_IP_BLOCK_TYPE_VCE:
452 ret = smu_dpm_set_vce_enable(smu, !gate);
454 case AMD_IP_BLOCK_TYPE_GFX:
455 ret = smu_gfx_off_control(smu, gate);
457 case AMD_IP_BLOCK_TYPE_SDMA:
458 ret = smu_powergate_sdma(smu, gate);
460 case AMD_IP_BLOCK_TYPE_JPEG:
461 ret = smu_dpm_set_jpeg_enable(smu, !gate);
470 int smu_get_power_num_states(struct smu_context *smu,
471 struct pp_states_info *state_info)
476 /* not support power state */
477 memset(state_info, 0, sizeof(struct pp_states_info));
478 state_info->nums = 1;
479 state_info->states[0] = POWER_STATE_TYPE_DEFAULT;
484 int smu_common_read_sensor(struct smu_context *smu, enum amd_pp_sensors sensor,
485 void *data, uint32_t *size)
487 struct smu_power_context *smu_power = &smu->smu_power;
488 struct smu_power_gate *power_gate = &smu_power->power_gate;
495 case AMDGPU_PP_SENSOR_STABLE_PSTATE_SCLK:
496 *((uint32_t *)data) = smu->pstate_sclk;
499 case AMDGPU_PP_SENSOR_STABLE_PSTATE_MCLK:
500 *((uint32_t *)data) = smu->pstate_mclk;
503 case AMDGPU_PP_SENSOR_ENABLED_SMC_FEATURES_MASK:
504 ret = smu_feature_get_enabled_mask(smu, (uint32_t *)data, 2);
507 case AMDGPU_PP_SENSOR_UVD_POWER:
508 *(uint32_t *)data = smu_feature_is_enabled(smu, SMU_FEATURE_DPM_UVD_BIT) ? 1 : 0;
511 case AMDGPU_PP_SENSOR_VCE_POWER:
512 *(uint32_t *)data = smu_feature_is_enabled(smu, SMU_FEATURE_DPM_VCE_BIT) ? 1 : 0;
515 case AMDGPU_PP_SENSOR_VCN_POWER_STATE:
516 *(uint32_t *)data = power_gate->vcn_gated ? 0 : 1;
530 int smu_update_table(struct smu_context *smu, enum smu_table_id table_index, int argument,
531 void *table_data, bool drv2smu)
533 struct smu_table_context *smu_table = &smu->smu_table;
534 struct amdgpu_device *adev = smu->adev;
535 struct smu_table *table = &smu_table->driver_table;
536 int table_id = smu_table_get_index(smu, table_index);
539 if (!table_data || table_id >= SMU_TABLE_COUNT || table_id < 0)
542 table_size = smu_table->tables[table_index].size;
545 memcpy(table->cpu_addr, table_data, table_size);
547 * Flush hdp cache: to guard the content seen by
548 * GPU is consitent with CPU.
550 amdgpu_asic_flush_hdp(adev, NULL);
553 ret = smu_send_smc_msg_with_param(smu, drv2smu ?
554 SMU_MSG_TransferTableDram2Smu :
555 SMU_MSG_TransferTableSmu2Dram,
556 table_id | ((argument & 0xFFFF) << 16),
562 amdgpu_asic_flush_hdp(adev, NULL);
563 memcpy(table_data, table->cpu_addr, table_size);
569 bool is_support_sw_smu(struct amdgpu_device *adev)
571 if (adev->asic_type >= CHIP_ARCTURUS)
577 int smu_sys_get_pp_table(struct smu_context *smu, void **table)
579 struct smu_table_context *smu_table = &smu->smu_table;
580 uint32_t powerplay_table_size;
582 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
585 if (!smu_table->power_play_table && !smu_table->hardcode_pptable)
588 mutex_lock(&smu->mutex);
590 if (smu_table->hardcode_pptable)
591 *table = smu_table->hardcode_pptable;
593 *table = smu_table->power_play_table;
595 powerplay_table_size = smu_table->power_play_table_size;
597 mutex_unlock(&smu->mutex);
599 return powerplay_table_size;
602 int smu_sys_set_pp_table(struct smu_context *smu, void *buf, size_t size)
604 struct smu_table_context *smu_table = &smu->smu_table;
605 ATOM_COMMON_TABLE_HEADER *header = (ATOM_COMMON_TABLE_HEADER *)buf;
608 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
611 if (header->usStructureSize != size) {
612 pr_err("pp table size not matched !\n");
616 mutex_lock(&smu->mutex);
617 if (!smu_table->hardcode_pptable)
618 smu_table->hardcode_pptable = kzalloc(size, GFP_KERNEL);
619 if (!smu_table->hardcode_pptable) {
624 memcpy(smu_table->hardcode_pptable, buf, size);
625 smu_table->power_play_table = smu_table->hardcode_pptable;
626 smu_table->power_play_table_size = size;
629 * Special hw_fini action(for Navi1x, the DPMs disablement will be
630 * skipped) may be needed for custom pptable uploading.
632 smu->uploading_custom_pp_table = true;
634 ret = smu_reset(smu);
636 pr_info("smu reset failed, ret = %d\n", ret);
638 smu->uploading_custom_pp_table = false;
641 mutex_unlock(&smu->mutex);
645 static int smu_get_driver_allowed_feature_mask(struct smu_context *smu)
647 struct smu_feature *feature = &smu->smu_feature;
649 uint32_t allowed_feature_mask[SMU_FEATURE_MAX/32];
651 mutex_lock(&feature->mutex);
652 bitmap_zero(feature->allowed, SMU_FEATURE_MAX);
653 mutex_unlock(&feature->mutex);
655 ret = smu_get_allowed_feature_mask(smu, allowed_feature_mask,
660 mutex_lock(&feature->mutex);
661 bitmap_or(feature->allowed, feature->allowed,
662 (unsigned long *)allowed_feature_mask,
663 feature->feature_num);
664 mutex_unlock(&feature->mutex);
669 int smu_feature_is_enabled(struct smu_context *smu, enum smu_feature_mask mask)
671 struct smu_feature *feature = &smu->smu_feature;
677 feature_id = smu_feature_get_index(smu, mask);
681 WARN_ON(feature_id > feature->feature_num);
683 mutex_lock(&feature->mutex);
684 ret = test_bit(feature_id, feature->enabled);
685 mutex_unlock(&feature->mutex);
690 int smu_feature_set_enabled(struct smu_context *smu, enum smu_feature_mask mask,
693 struct smu_feature *feature = &smu->smu_feature;
696 feature_id = smu_feature_get_index(smu, mask);
700 WARN_ON(feature_id > feature->feature_num);
702 return smu_feature_update_enable_state(smu,
707 int smu_feature_is_supported(struct smu_context *smu, enum smu_feature_mask mask)
709 struct smu_feature *feature = &smu->smu_feature;
713 feature_id = smu_feature_get_index(smu, mask);
717 WARN_ON(feature_id > feature->feature_num);
719 mutex_lock(&feature->mutex);
720 ret = test_bit(feature_id, feature->supported);
721 mutex_unlock(&feature->mutex);
726 static int smu_set_funcs(struct amdgpu_device *adev)
728 struct smu_context *smu = &adev->smu;
730 if (adev->pm.pp_feature & PP_OVERDRIVE_MASK)
731 smu->od_enabled = true;
733 switch (adev->asic_type) {
737 navi10_set_ppt_funcs(smu);
740 adev->pm.pp_feature &= ~PP_GFXOFF_MASK;
741 arcturus_set_ppt_funcs(smu);
742 /* OD is not supported on Arcturus */
743 smu->od_enabled =false;
745 case CHIP_SIENNA_CICHLID:
746 sienna_cichlid_set_ppt_funcs(smu);
749 renoir_set_ppt_funcs(smu);
758 static int smu_early_init(void *handle)
760 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
761 struct smu_context *smu = &adev->smu;
764 smu->pm_enabled = !!amdgpu_dpm;
766 mutex_init(&smu->mutex);
768 return smu_set_funcs(adev);
771 static int smu_late_init(void *handle)
773 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
774 struct smu_context *smu = &adev->smu;
777 if (!smu->pm_enabled)
780 ret = smu_set_default_od_settings(smu);
785 * Set initialized values (get from vbios) to dpm tables context such as
786 * gfxclk, memclk, dcefclk, and etc. And enable the DPM feature for each
789 ret = smu_populate_smc_tables(smu);
793 ret = smu_init_max_sustainable_clocks(smu);
797 ret = smu_populate_umd_state_clk(smu);
801 ret = smu_get_power_limit(smu, &smu->default_power_limit, false, false);
805 smu_get_unique_id(smu);
807 smu_handle_task(&adev->smu,
808 smu->smu_dpm.dpm_level,
809 AMD_PP_TASK_COMPLETE_INIT,
815 int smu_get_atom_data_table(struct smu_context *smu, uint32_t table,
816 uint16_t *size, uint8_t *frev, uint8_t *crev,
819 struct amdgpu_device *adev = smu->adev;
822 if (!amdgpu_atom_parse_data_header(adev->mode_info.atom_context, table,
823 size, frev, crev, &data_start))
826 *addr = (uint8_t *)adev->mode_info.atom_context->bios + data_start;
831 static int smu_init_fb_allocations(struct smu_context *smu)
833 struct amdgpu_device *adev = smu->adev;
834 struct smu_table_context *smu_table = &smu->smu_table;
835 struct smu_table *tables = smu_table->tables;
836 struct smu_table *driver_table = &(smu_table->driver_table);
837 uint32_t max_table_size = 0;
840 /* VRAM allocation for tool table */
841 if (tables[SMU_TABLE_PMSTATUSLOG].size) {
842 ret = amdgpu_bo_create_kernel(adev,
843 tables[SMU_TABLE_PMSTATUSLOG].size,
844 tables[SMU_TABLE_PMSTATUSLOG].align,
845 tables[SMU_TABLE_PMSTATUSLOG].domain,
846 &tables[SMU_TABLE_PMSTATUSLOG].bo,
847 &tables[SMU_TABLE_PMSTATUSLOG].mc_address,
848 &tables[SMU_TABLE_PMSTATUSLOG].cpu_addr);
850 pr_err("VRAM allocation for tool table failed!\n");
855 /* VRAM allocation for driver table */
856 for (i = 0; i < SMU_TABLE_COUNT; i++) {
857 if (tables[i].size == 0)
860 if (i == SMU_TABLE_PMSTATUSLOG)
863 if (max_table_size < tables[i].size)
864 max_table_size = tables[i].size;
867 driver_table->size = max_table_size;
868 driver_table->align = PAGE_SIZE;
869 driver_table->domain = AMDGPU_GEM_DOMAIN_VRAM;
871 ret = amdgpu_bo_create_kernel(adev,
874 driver_table->domain,
876 &driver_table->mc_address,
877 &driver_table->cpu_addr);
879 pr_err("VRAM allocation for driver table failed!\n");
880 if (tables[SMU_TABLE_PMSTATUSLOG].mc_address)
881 amdgpu_bo_free_kernel(&tables[SMU_TABLE_PMSTATUSLOG].bo,
882 &tables[SMU_TABLE_PMSTATUSLOG].mc_address,
883 &tables[SMU_TABLE_PMSTATUSLOG].cpu_addr);
889 static int smu_fini_fb_allocations(struct smu_context *smu)
891 struct smu_table_context *smu_table = &smu->smu_table;
892 struct smu_table *tables = smu_table->tables;
893 struct smu_table *driver_table = &(smu_table->driver_table);
898 if (tables[SMU_TABLE_PMSTATUSLOG].mc_address)
899 amdgpu_bo_free_kernel(&tables[SMU_TABLE_PMSTATUSLOG].bo,
900 &tables[SMU_TABLE_PMSTATUSLOG].mc_address,
901 &tables[SMU_TABLE_PMSTATUSLOG].cpu_addr);
903 amdgpu_bo_free_kernel(&driver_table->bo,
904 &driver_table->mc_address,
905 &driver_table->cpu_addr);
911 * smu_alloc_memory_pool - allocate memory pool in the system memory
913 * @smu: amdgpu_device pointer
915 * This memory pool will be used for SMC use and msg SetSystemVirtualDramAddr
916 * and DramLogSetDramAddr can notify it changed.
918 * Returns 0 on success, error on failure.
920 static int smu_alloc_memory_pool(struct smu_context *smu)
922 struct amdgpu_device *adev = smu->adev;
923 struct smu_table_context *smu_table = &smu->smu_table;
924 struct smu_table *memory_pool = &smu_table->memory_pool;
925 uint64_t pool_size = smu->pool_size;
928 if (pool_size == SMU_MEMORY_POOL_SIZE_ZERO)
931 memory_pool->size = pool_size;
932 memory_pool->align = PAGE_SIZE;
933 memory_pool->domain = AMDGPU_GEM_DOMAIN_GTT;
936 case SMU_MEMORY_POOL_SIZE_256_MB:
937 case SMU_MEMORY_POOL_SIZE_512_MB:
938 case SMU_MEMORY_POOL_SIZE_1_GB:
939 case SMU_MEMORY_POOL_SIZE_2_GB:
940 ret = amdgpu_bo_create_kernel(adev,
945 &memory_pool->mc_address,
946 &memory_pool->cpu_addr);
955 static int smu_free_memory_pool(struct smu_context *smu)
957 struct smu_table_context *smu_table = &smu->smu_table;
958 struct smu_table *memory_pool = &smu_table->memory_pool;
960 if (memory_pool->size == SMU_MEMORY_POOL_SIZE_ZERO)
963 amdgpu_bo_free_kernel(&memory_pool->bo,
964 &memory_pool->mc_address,
965 &memory_pool->cpu_addr);
967 memset(memory_pool, 0, sizeof(struct smu_table));
972 static int smu_smc_table_sw_init(struct smu_context *smu)
977 * Create smu_table structure, and init smc tables such as
978 * TABLE_PPTABLE, TABLE_WATERMARKS, TABLE_SMU_METRICS, and etc.
980 ret = smu_init_smc_tables(smu);
982 pr_err("Failed to init smc tables!\n");
987 * Create smu_power_context structure, and allocate smu_dpm_context and
988 * context size to fill the smu_power_context data.
990 ret = smu_init_power(smu);
992 pr_err("Failed to init smu_init_power!\n");
997 * allocate vram bos to store smc table contents.
999 ret = smu_init_fb_allocations(smu);
1003 ret = smu_alloc_memory_pool(smu);
1010 static int smu_smc_table_sw_fini(struct smu_context *smu)
1014 ret = smu_free_memory_pool(smu);
1018 ret = smu_fini_fb_allocations(smu);
1022 ret = smu_fini_power(smu);
1024 pr_err("Failed to init smu_fini_power!\n");
1028 ret = smu_fini_smc_tables(smu);
1030 pr_err("Failed to smu_fini_smc_tables!\n");
1037 static void smu_throttling_logging_work_fn(struct work_struct *work)
1039 struct smu_context *smu = container_of(work, struct smu_context,
1040 throttling_logging_work);
1042 smu_log_thermal_throttling(smu);
1045 static int smu_sw_init(void *handle)
1047 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1048 struct smu_context *smu = &adev->smu;
1051 smu->pool_size = adev->pm.smu_prv_buffer_size;
1052 smu->smu_feature.feature_num = SMU_FEATURE_MAX;
1053 mutex_init(&smu->smu_feature.mutex);
1054 bitmap_zero(smu->smu_feature.supported, SMU_FEATURE_MAX);
1055 bitmap_zero(smu->smu_feature.enabled, SMU_FEATURE_MAX);
1056 bitmap_zero(smu->smu_feature.allowed, SMU_FEATURE_MAX);
1058 mutex_init(&smu->smu_baco.mutex);
1059 smu->smu_baco.state = SMU_BACO_STATE_EXIT;
1060 smu->smu_baco.platform_support = false;
1062 mutex_init(&smu->sensor_lock);
1063 mutex_init(&smu->metrics_lock);
1064 mutex_init(&smu->message_lock);
1066 INIT_WORK(&smu->throttling_logging_work, smu_throttling_logging_work_fn);
1067 smu->watermarks_bitmap = 0;
1068 smu->power_profile_mode = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT;
1069 smu->default_power_profile_mode = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT;
1071 smu->workload_mask = 1 << smu->workload_prority[PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT];
1072 smu->workload_prority[PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT] = 0;
1073 smu->workload_prority[PP_SMC_POWER_PROFILE_FULLSCREEN3D] = 1;
1074 smu->workload_prority[PP_SMC_POWER_PROFILE_POWERSAVING] = 2;
1075 smu->workload_prority[PP_SMC_POWER_PROFILE_VIDEO] = 3;
1076 smu->workload_prority[PP_SMC_POWER_PROFILE_VR] = 4;
1077 smu->workload_prority[PP_SMC_POWER_PROFILE_COMPUTE] = 5;
1078 smu->workload_prority[PP_SMC_POWER_PROFILE_CUSTOM] = 6;
1080 smu->workload_setting[0] = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT;
1081 smu->workload_setting[1] = PP_SMC_POWER_PROFILE_FULLSCREEN3D;
1082 smu->workload_setting[2] = PP_SMC_POWER_PROFILE_POWERSAVING;
1083 smu->workload_setting[3] = PP_SMC_POWER_PROFILE_VIDEO;
1084 smu->workload_setting[4] = PP_SMC_POWER_PROFILE_VR;
1085 smu->workload_setting[5] = PP_SMC_POWER_PROFILE_COMPUTE;
1086 smu->workload_setting[6] = PP_SMC_POWER_PROFILE_CUSTOM;
1087 smu->display_config = &adev->pm.pm_display_cfg;
1089 smu->smu_dpm.dpm_level = AMD_DPM_FORCED_LEVEL_AUTO;
1090 smu->smu_dpm.requested_dpm_level = AMD_DPM_FORCED_LEVEL_AUTO;
1091 ret = smu_init_microcode(smu);
1093 pr_err("Failed to load smu firmware!\n");
1097 ret = smu_smc_table_sw_init(smu);
1099 pr_err("Failed to sw init smc table!\n");
1103 ret = smu_register_irq_handler(smu);
1105 pr_err("Failed to register smc irq handler!\n");
1112 static int smu_sw_fini(void *handle)
1114 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1115 struct smu_context *smu = &adev->smu;
1118 ret = smu_smc_table_sw_fini(smu);
1120 pr_err("Failed to sw fini smc table!\n");
1124 smu_fini_microcode(smu);
1129 static int smu_smc_hw_setup(struct smu_context *smu)
1131 struct amdgpu_device *adev = smu->adev;
1134 if (smu_is_dpm_running(smu) && adev->in_suspend) {
1135 pr_info("dpm has been enabled\n");
1139 ret = smu_init_display_count(smu, 0);
1143 ret = smu_set_driver_table_location(smu);
1148 * Set PMSTATUSLOG table bo address with SetToolsDramAddr MSG for tools.
1150 ret = smu_set_tool_table_location(smu);
1155 * Use msg SetSystemVirtualDramAddr and DramLogSetDramAddr can notify
1158 ret = smu_notify_memory_pool_location(smu);
1162 /* smu_dump_pptable(smu); */
1164 * Copy pptable bo in the vram to smc with SMU MSGs such as
1165 * SetDriverDramAddr and TransferTableDram2Smu.
1167 ret = smu_write_pptable(smu);
1171 /* issue Run*Btc msg */
1172 ret = smu_run_btc(smu);
1176 ret = smu_feature_set_allowed_mask(smu);
1180 ret = smu_system_features_control(smu, true);
1184 if (!smu_is_dpm_running(smu))
1185 pr_info("dpm has been disabled\n");
1187 ret = smu_override_pcie_parameters(smu);
1191 ret = smu_enable_thermal_alert(smu);
1195 ret = smu_i2c_eeprom_init(smu, &adev->pm.smu_i2c);
1199 ret = smu_disable_umc_cdr_12gbps_workaround(smu);
1201 pr_err("Workaround failed to disable UMC CDR feature on 12Gbps SKU!\n");
1206 * For Navi1X, manually switch it to AC mode as PMFW
1207 * may boot it with DC mode.
1209 ret = smu_set_power_source(smu,
1210 adev->pm.ac_power ? SMU_POWER_SOURCE_AC :
1211 SMU_POWER_SOURCE_DC);
1213 pr_err("Failed to switch to %s mode!\n", adev->pm.ac_power ? "AC" : "DC");
1217 ret = smu_notify_display_change(smu);
1222 * Set min deep sleep dce fclk with bootup value from vbios via
1223 * SetMinDeepSleepDcefclk MSG.
1225 ret = smu_set_min_dcef_deep_sleep(smu);
1232 static int smu_start_smc_engine(struct smu_context *smu)
1234 struct amdgpu_device *adev = smu->adev;
1237 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
1238 if (adev->asic_type < CHIP_NAVI10) {
1239 if (smu->ppt_funcs->load_microcode) {
1240 ret = smu->ppt_funcs->load_microcode(smu);
1247 if (smu->ppt_funcs->check_fw_status) {
1248 ret = smu->ppt_funcs->check_fw_status(smu);
1250 pr_err("SMC is not ready\n");
1256 * Send msg GetDriverIfVersion to check if the return value is equal
1257 * with DRIVER_IF_VERSION of smc header.
1259 ret = smu_check_fw_version(smu);
1266 static int smu_hw_init(void *handle)
1269 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1270 struct smu_context *smu = &adev->smu;
1272 if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev))
1275 ret = smu_start_smc_engine(smu);
1277 pr_err("SMU is not ready yet!\n");
1282 smu_powergate_sdma(&adev->smu, false);
1283 smu_powergate_vcn(&adev->smu, false);
1284 smu_powergate_jpeg(&adev->smu, false);
1285 smu_set_gfx_cgpg(&adev->smu, true);
1288 if (!smu->pm_enabled)
1291 /* get boot_values from vbios to set revision, gfxclk, and etc. */
1292 ret = smu_get_vbios_bootup_values(smu);
1296 ret = smu_setup_pptable(smu);
1300 ret = smu_get_driver_allowed_feature_mask(smu);
1304 ret = smu_smc_hw_setup(smu);
1308 adev->pm.dpm_enabled = true;
1310 pr_info("SMU is initialized successfully!\n");
1318 static int smu_disable_dpms(struct smu_context *smu)
1320 struct amdgpu_device *adev = smu->adev;
1321 uint64_t features_to_disable;
1323 bool use_baco = !smu->is_apu &&
1324 ((adev->in_gpu_reset &&
1325 (amdgpu_asic_reset_method(adev) == AMD_RESET_METHOD_BACO)) ||
1326 ((adev->in_runpm || adev->in_hibernate) && amdgpu_asic_supports_baco(adev)));
1329 * For custom pptable uploading, skip the DPM features
1330 * disable process on Navi1x ASICs.
1331 * - As the gfx related features are under control of
1332 * RLC on those ASICs. RLC reinitialization will be
1333 * needed to reenable them. That will cost much more
1336 * - SMU firmware can handle the DPM reenablement
1339 if (smu->uploading_custom_pp_table &&
1340 (adev->asic_type >= CHIP_NAVI10) &&
1341 (adev->asic_type <= CHIP_NAVI12))
1345 * For Sienna_Cichlid, PMFW will handle the features disablement properly
1346 * on BACO in. Driver involvement is unnecessary.
1348 if ((adev->asic_type == CHIP_SIENNA_CICHLID) &&
1353 * For gpu reset, runpm and hibernation through BACO,
1354 * BACO feature has to be kept enabled.
1356 if (use_baco && smu_feature_is_enabled(smu, SMU_FEATURE_BACO_BIT)) {
1357 features_to_disable = U64_MAX &
1358 ~(1ULL << smu_feature_get_index(smu, SMU_FEATURE_BACO_BIT));
1359 ret = smu_feature_update_enable_state(smu,
1360 features_to_disable,
1363 pr_err("Failed to disable smu features except BACO.\n");
1365 ret = smu_system_features_control(smu, false);
1367 pr_err("Failed to disable smu features.\n");
1370 if (adev->asic_type >= CHIP_NAVI10 &&
1371 adev->gfx.rlc.funcs->stop)
1372 adev->gfx.rlc.funcs->stop(adev);
1377 static int smu_smc_hw_cleanup(struct smu_context *smu)
1379 struct amdgpu_device *adev = smu->adev;
1382 smu_i2c_eeprom_fini(smu, &adev->pm.smu_i2c);
1384 cancel_work_sync(&smu->throttling_logging_work);
1386 ret = smu_disable_thermal_alert(smu);
1388 pr_warn("Fail to stop thermal control!\n");
1392 ret = smu_disable_dpms(smu);
1399 static int smu_hw_fini(void *handle)
1401 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1402 struct smu_context *smu = &adev->smu;
1405 if (amdgpu_sriov_vf(adev)&& !amdgpu_sriov_is_pp_one_vf(adev))
1409 smu_powergate_sdma(&adev->smu, true);
1410 smu_powergate_vcn(&adev->smu, true);
1411 smu_powergate_jpeg(&adev->smu, true);
1414 if (!smu->pm_enabled)
1417 adev->pm.dpm_enabled = false;
1419 ret = smu_smc_hw_cleanup(smu);
1426 int smu_reset(struct smu_context *smu)
1428 struct amdgpu_device *adev = smu->adev;
1431 ret = smu_hw_fini(adev);
1435 ret = smu_hw_init(adev);
1439 ret = smu_late_init(adev);
1444 static int smu_suspend(void *handle)
1446 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1447 struct smu_context *smu = &adev->smu;
1450 if (amdgpu_sriov_vf(adev)&& !amdgpu_sriov_is_pp_one_vf(adev))
1453 if (!smu->pm_enabled)
1456 adev->pm.dpm_enabled = false;
1458 ret = smu_smc_hw_cleanup(smu);
1462 smu->watermarks_bitmap &= ~(WATERMARKS_LOADED);
1465 smu_set_gfx_cgpg(&adev->smu, false);
1470 static int smu_resume(void *handle)
1473 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1474 struct smu_context *smu = &adev->smu;
1476 if (amdgpu_sriov_vf(adev)&& !amdgpu_sriov_is_pp_one_vf(adev))
1479 if (!smu->pm_enabled)
1482 pr_info("SMU is resuming...\n");
1484 ret = smu_start_smc_engine(smu);
1486 pr_err("SMU is not ready yet!\n");
1490 ret = smu_smc_hw_setup(smu);
1495 smu_set_gfx_cgpg(&adev->smu, true);
1497 smu->disable_uclk_switch = 0;
1499 adev->pm.dpm_enabled = true;
1501 pr_info("SMU is resumed successfully!\n");
1509 int smu_display_configuration_change(struct smu_context *smu,
1510 const struct amd_pp_display_configuration *display_config)
1513 int num_of_active_display = 0;
1515 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
1518 if (!display_config)
1521 mutex_lock(&smu->mutex);
1523 if (smu->ppt_funcs->set_deep_sleep_dcefclk)
1524 smu->ppt_funcs->set_deep_sleep_dcefclk(smu,
1525 display_config->min_dcef_deep_sleep_set_clk / 100);
1527 for (index = 0; index < display_config->num_path_including_non_display; index++) {
1528 if (display_config->displays[index].controller_id != 0)
1529 num_of_active_display++;
1532 smu_set_active_display_count(smu, num_of_active_display);
1534 smu_store_cc6_data(smu, display_config->cpu_pstate_separation_time,
1535 display_config->cpu_cc6_disable,
1536 display_config->cpu_pstate_disable,
1537 display_config->nb_pstate_switch_disable);
1539 mutex_unlock(&smu->mutex);
1544 static int smu_get_clock_info(struct smu_context *smu,
1545 struct smu_clock_info *clk_info,
1546 enum smu_perf_level_designation designation)
1549 struct smu_performance_level level = {0};
1554 ret = smu_get_perf_level(smu, PERF_LEVEL_ACTIVITY, &level);
1558 clk_info->min_mem_clk = level.memory_clock;
1559 clk_info->min_eng_clk = level.core_clock;
1560 clk_info->min_bus_bandwidth = level.non_local_mem_freq * level.non_local_mem_width;
1562 ret = smu_get_perf_level(smu, designation, &level);
1566 clk_info->min_mem_clk = level.memory_clock;
1567 clk_info->min_eng_clk = level.core_clock;
1568 clk_info->min_bus_bandwidth = level.non_local_mem_freq * level.non_local_mem_width;
1573 int smu_get_current_clocks(struct smu_context *smu,
1574 struct amd_pp_clock_info *clocks)
1576 struct amd_pp_simple_clock_info simple_clocks = {0};
1577 struct smu_clock_info hw_clocks;
1580 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
1583 mutex_lock(&smu->mutex);
1585 smu_get_dal_power_level(smu, &simple_clocks);
1587 if (smu->support_power_containment)
1588 ret = smu_get_clock_info(smu, &hw_clocks,
1589 PERF_LEVEL_POWER_CONTAINMENT);
1591 ret = smu_get_clock_info(smu, &hw_clocks, PERF_LEVEL_ACTIVITY);
1594 pr_err("Error in smu_get_clock_info\n");
1598 clocks->min_engine_clock = hw_clocks.min_eng_clk;
1599 clocks->max_engine_clock = hw_clocks.max_eng_clk;
1600 clocks->min_memory_clock = hw_clocks.min_mem_clk;
1601 clocks->max_memory_clock = hw_clocks.max_mem_clk;
1602 clocks->min_bus_bandwidth = hw_clocks.min_bus_bandwidth;
1603 clocks->max_bus_bandwidth = hw_clocks.max_bus_bandwidth;
1604 clocks->max_engine_clock_in_sr = hw_clocks.max_eng_clk;
1605 clocks->min_engine_clock_in_sr = hw_clocks.min_eng_clk;
1607 if (simple_clocks.level == 0)
1608 clocks->max_clocks_state = PP_DAL_POWERLEVEL_7;
1610 clocks->max_clocks_state = simple_clocks.level;
1612 if (!smu_get_current_shallow_sleep_clocks(smu, &hw_clocks)) {
1613 clocks->max_engine_clock_in_sr = hw_clocks.max_eng_clk;
1614 clocks->min_engine_clock_in_sr = hw_clocks.min_eng_clk;
1618 mutex_unlock(&smu->mutex);
1622 static int smu_set_clockgating_state(void *handle,
1623 enum amd_clockgating_state state)
1628 static int smu_set_powergating_state(void *handle,
1629 enum amd_powergating_state state)
1634 static int smu_enable_umd_pstate(void *handle,
1635 enum amd_dpm_forced_level *level)
1637 uint32_t profile_mode_mask = AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD |
1638 AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK |
1639 AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK |
1640 AMD_DPM_FORCED_LEVEL_PROFILE_PEAK;
1642 struct smu_context *smu = (struct smu_context*)(handle);
1643 struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
1645 if (!smu->is_apu && !smu_dpm_ctx->dpm_context)
1648 if (!(smu_dpm_ctx->dpm_level & profile_mode_mask)) {
1649 /* enter umd pstate, save current level, disable gfx cg*/
1650 if (*level & profile_mode_mask) {
1651 smu_dpm_ctx->saved_dpm_level = smu_dpm_ctx->dpm_level;
1652 smu_dpm_ctx->enable_umd_pstate = true;
1653 amdgpu_device_ip_set_powergating_state(smu->adev,
1654 AMD_IP_BLOCK_TYPE_GFX,
1655 AMD_PG_STATE_UNGATE);
1656 amdgpu_device_ip_set_clockgating_state(smu->adev,
1657 AMD_IP_BLOCK_TYPE_GFX,
1658 AMD_CG_STATE_UNGATE);
1661 /* exit umd pstate, restore level, enable gfx cg*/
1662 if (!(*level & profile_mode_mask)) {
1663 if (*level == AMD_DPM_FORCED_LEVEL_PROFILE_EXIT)
1664 *level = smu_dpm_ctx->saved_dpm_level;
1665 smu_dpm_ctx->enable_umd_pstate = false;
1666 amdgpu_device_ip_set_clockgating_state(smu->adev,
1667 AMD_IP_BLOCK_TYPE_GFX,
1669 amdgpu_device_ip_set_powergating_state(smu->adev,
1670 AMD_IP_BLOCK_TYPE_GFX,
1678 int smu_adjust_power_state_dynamic(struct smu_context *smu,
1679 enum amd_dpm_forced_level level,
1680 bool skip_display_settings)
1685 struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
1687 if (!skip_display_settings) {
1688 ret = smu_display_config_changed(smu);
1690 pr_err("Failed to change display config!");
1695 ret = smu_apply_clocks_adjust_rules(smu);
1697 pr_err("Failed to apply clocks adjust rules!");
1701 if (!skip_display_settings) {
1702 ret = smu_notify_smc_display_config(smu);
1704 pr_err("Failed to notify smc display config!");
1709 if (smu_dpm_ctx->dpm_level != level) {
1710 ret = smu_asic_set_performance_level(smu, level);
1712 pr_err("Failed to set performance level!");
1716 /* update the saved copy */
1717 smu_dpm_ctx->dpm_level = level;
1720 if (smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL) {
1721 index = fls(smu->workload_mask);
1722 index = index > 0 && index <= WORKLOAD_POLICY_MAX ? index - 1 : 0;
1723 workload = smu->workload_setting[index];
1725 if (smu->power_profile_mode != workload)
1726 smu_set_power_profile_mode(smu, &workload, 0, false);
1732 int smu_handle_task(struct smu_context *smu,
1733 enum amd_dpm_forced_level level,
1734 enum amd_pp_task task_id,
1739 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
1743 mutex_lock(&smu->mutex);
1746 case AMD_PP_TASK_DISPLAY_CONFIG_CHANGE:
1747 ret = smu_pre_display_config_changed(smu);
1750 ret = smu_set_cpu_power_state(smu);
1753 ret = smu_adjust_power_state_dynamic(smu, level, false);
1755 case AMD_PP_TASK_COMPLETE_INIT:
1756 case AMD_PP_TASK_READJUST_POWER_STATE:
1757 ret = smu_adjust_power_state_dynamic(smu, level, true);
1765 mutex_unlock(&smu->mutex);
1770 int smu_switch_power_profile(struct smu_context *smu,
1771 enum PP_SMC_POWER_PROFILE type,
1774 struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
1778 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
1781 if (!(type < PP_SMC_POWER_PROFILE_CUSTOM))
1784 mutex_lock(&smu->mutex);
1787 smu->workload_mask &= ~(1 << smu->workload_prority[type]);
1788 index = fls(smu->workload_mask);
1789 index = index > 0 && index <= WORKLOAD_POLICY_MAX ? index - 1 : 0;
1790 workload = smu->workload_setting[index];
1792 smu->workload_mask |= (1 << smu->workload_prority[type]);
1793 index = fls(smu->workload_mask);
1794 index = index <= WORKLOAD_POLICY_MAX ? index - 1 : 0;
1795 workload = smu->workload_setting[index];
1798 if (smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL)
1799 smu_set_power_profile_mode(smu, &workload, 0, false);
1801 mutex_unlock(&smu->mutex);
1806 enum amd_dpm_forced_level smu_get_performance_level(struct smu_context *smu)
1808 struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
1809 enum amd_dpm_forced_level level;
1811 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
1814 if (!smu->is_apu && !smu_dpm_ctx->dpm_context)
1817 mutex_lock(&(smu->mutex));
1818 level = smu_dpm_ctx->dpm_level;
1819 mutex_unlock(&(smu->mutex));
1824 int smu_force_performance_level(struct smu_context *smu, enum amd_dpm_forced_level level)
1826 struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
1829 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
1832 if (!smu->is_apu && !smu_dpm_ctx->dpm_context)
1835 mutex_lock(&smu->mutex);
1837 ret = smu_enable_umd_pstate(smu, &level);
1839 mutex_unlock(&smu->mutex);
1843 ret = smu_handle_task(smu, level,
1844 AMD_PP_TASK_READJUST_POWER_STATE,
1847 mutex_unlock(&smu->mutex);
1852 int smu_set_display_count(struct smu_context *smu, uint32_t count)
1856 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
1859 mutex_lock(&smu->mutex);
1860 ret = smu_init_display_count(smu, count);
1861 mutex_unlock(&smu->mutex);
1866 int smu_force_clk_levels(struct smu_context *smu,
1867 enum smu_clk_type clk_type,
1871 struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
1874 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
1877 if (smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL) {
1878 pr_debug("force clock level is for dpm manual mode only.\n");
1883 mutex_lock(&smu->mutex);
1885 if (smu->ppt_funcs && smu->ppt_funcs->force_clk_levels)
1886 ret = smu->ppt_funcs->force_clk_levels(smu, clk_type, mask);
1889 mutex_unlock(&smu->mutex);
1895 * On system suspending or resetting, the dpm_enabled
1896 * flag will be cleared. So that those SMU services which
1897 * are not supported will be gated.
1898 * However, the mp1 state setting should still be granted
1899 * even if the dpm_enabled cleared.
1901 int smu_set_mp1_state(struct smu_context *smu,
1902 enum pp_mp1_state mp1_state)
1907 if (!smu->pm_enabled)
1910 mutex_lock(&smu->mutex);
1912 switch (mp1_state) {
1913 case PP_MP1_STATE_SHUTDOWN:
1914 msg = SMU_MSG_PrepareMp1ForShutdown;
1916 case PP_MP1_STATE_UNLOAD:
1917 msg = SMU_MSG_PrepareMp1ForUnload;
1919 case PP_MP1_STATE_RESET:
1920 msg = SMU_MSG_PrepareMp1ForReset;
1922 case PP_MP1_STATE_NONE:
1924 mutex_unlock(&smu->mutex);
1928 /* some asics may not support those messages */
1929 if (smu_msg_get_index(smu, msg) < 0) {
1930 mutex_unlock(&smu->mutex);
1934 ret = smu_send_smc_msg(smu, msg, NULL);
1936 pr_err("[PrepareMp1] Failed!\n");
1938 mutex_unlock(&smu->mutex);
1943 int smu_set_df_cstate(struct smu_context *smu,
1944 enum pp_df_cstate state)
1948 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
1951 if (!smu->ppt_funcs || !smu->ppt_funcs->set_df_cstate)
1954 mutex_lock(&smu->mutex);
1956 ret = smu->ppt_funcs->set_df_cstate(smu, state);
1958 pr_err("[SetDfCstate] failed!\n");
1960 mutex_unlock(&smu->mutex);
1965 int smu_allow_xgmi_power_down(struct smu_context *smu, bool en)
1969 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
1972 if (!smu->ppt_funcs || !smu->ppt_funcs->allow_xgmi_power_down)
1975 mutex_lock(&smu->mutex);
1977 ret = smu->ppt_funcs->allow_xgmi_power_down(smu, en);
1979 pr_err("[AllowXgmiPowerDown] failed!\n");
1981 mutex_unlock(&smu->mutex);
1986 int smu_write_watermarks_table(struct smu_context *smu)
1988 void *watermarks_table = smu->smu_table.watermarks_table;
1990 if (!watermarks_table)
1993 return smu_update_table(smu,
1994 SMU_TABLE_WATERMARKS,
2000 int smu_set_watermarks_for_clock_ranges(struct smu_context *smu,
2001 struct dm_pp_wm_sets_with_clock_ranges_soc15 *clock_ranges)
2003 void *table = smu->smu_table.watermarks_table;
2005 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2011 mutex_lock(&smu->mutex);
2013 if (!smu->disable_watermark &&
2014 smu_feature_is_enabled(smu, SMU_FEATURE_DPM_DCEFCLK_BIT) &&
2015 smu_feature_is_enabled(smu, SMU_FEATURE_DPM_SOCCLK_BIT)) {
2016 smu_set_watermarks_table(smu, table, clock_ranges);
2018 if (!(smu->watermarks_bitmap & WATERMARKS_EXIST)) {
2019 smu->watermarks_bitmap |= WATERMARKS_EXIST;
2020 smu->watermarks_bitmap &= ~WATERMARKS_LOADED;
2024 mutex_unlock(&smu->mutex);
2029 int smu_set_ac_dc(struct smu_context *smu)
2033 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2036 /* controlled by firmware */
2037 if (smu->dc_controlled_by_gpio)
2040 mutex_lock(&smu->mutex);
2041 ret = smu_set_power_source(smu,
2042 smu->adev->pm.ac_power ? SMU_POWER_SOURCE_AC :
2043 SMU_POWER_SOURCE_DC);
2045 pr_err("Failed to switch to %s mode!\n",
2046 smu->adev->pm.ac_power ? "AC" : "DC");
2047 mutex_unlock(&smu->mutex);
2052 const struct amd_ip_funcs smu_ip_funcs = {
2054 .early_init = smu_early_init,
2055 .late_init = smu_late_init,
2056 .sw_init = smu_sw_init,
2057 .sw_fini = smu_sw_fini,
2058 .hw_init = smu_hw_init,
2059 .hw_fini = smu_hw_fini,
2060 .suspend = smu_suspend,
2061 .resume = smu_resume,
2063 .check_soft_reset = NULL,
2064 .wait_for_idle = NULL,
2066 .set_clockgating_state = smu_set_clockgating_state,
2067 .set_powergating_state = smu_set_powergating_state,
2068 .enable_umd_pstate = smu_enable_umd_pstate,
2071 const struct amdgpu_ip_block_version smu_v11_0_ip_block =
2073 .type = AMD_IP_BLOCK_TYPE_SMC,
2077 .funcs = &smu_ip_funcs,
2080 const struct amdgpu_ip_block_version smu_v12_0_ip_block =
2082 .type = AMD_IP_BLOCK_TYPE_SMC,
2086 .funcs = &smu_ip_funcs,
2089 int smu_load_microcode(struct smu_context *smu)
2093 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2096 mutex_lock(&smu->mutex);
2098 if (smu->ppt_funcs->load_microcode)
2099 ret = smu->ppt_funcs->load_microcode(smu);
2101 mutex_unlock(&smu->mutex);
2106 int smu_check_fw_status(struct smu_context *smu)
2110 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2113 mutex_lock(&smu->mutex);
2115 if (smu->ppt_funcs->check_fw_status)
2116 ret = smu->ppt_funcs->check_fw_status(smu);
2118 mutex_unlock(&smu->mutex);
2123 int smu_set_gfx_cgpg(struct smu_context *smu, bool enabled)
2127 mutex_lock(&smu->mutex);
2129 if (smu->ppt_funcs->set_gfx_cgpg)
2130 ret = smu->ppt_funcs->set_gfx_cgpg(smu, enabled);
2132 mutex_unlock(&smu->mutex);
2137 int smu_set_fan_speed_rpm(struct smu_context *smu, uint32_t speed)
2141 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2144 mutex_lock(&smu->mutex);
2146 if (smu->ppt_funcs->set_fan_speed_rpm)
2147 ret = smu->ppt_funcs->set_fan_speed_rpm(smu, speed);
2149 mutex_unlock(&smu->mutex);
2154 int smu_get_power_limit(struct smu_context *smu,
2162 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2165 mutex_lock(&smu->mutex);
2168 if (smu->ppt_funcs->get_power_limit)
2169 ret = smu->ppt_funcs->get_power_limit(smu, limit, def);
2172 mutex_unlock(&smu->mutex);
2177 int smu_set_power_limit(struct smu_context *smu, uint32_t limit)
2181 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2184 mutex_lock(&smu->mutex);
2186 if (smu->ppt_funcs->set_power_limit)
2187 ret = smu->ppt_funcs->set_power_limit(smu, limit);
2189 mutex_unlock(&smu->mutex);
2194 int smu_print_clk_levels(struct smu_context *smu, enum smu_clk_type clk_type, char *buf)
2198 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2201 mutex_lock(&smu->mutex);
2203 if (smu->ppt_funcs->print_clk_levels)
2204 ret = smu->ppt_funcs->print_clk_levels(smu, clk_type, buf);
2206 mutex_unlock(&smu->mutex);
2211 int smu_get_od_percentage(struct smu_context *smu, enum smu_clk_type type)
2215 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2218 mutex_lock(&smu->mutex);
2220 if (smu->ppt_funcs->get_od_percentage)
2221 ret = smu->ppt_funcs->get_od_percentage(smu, type);
2223 mutex_unlock(&smu->mutex);
2228 int smu_set_od_percentage(struct smu_context *smu, enum smu_clk_type type, uint32_t value)
2232 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2235 mutex_lock(&smu->mutex);
2237 if (smu->ppt_funcs->set_od_percentage)
2238 ret = smu->ppt_funcs->set_od_percentage(smu, type, value);
2240 mutex_unlock(&smu->mutex);
2245 int smu_od_edit_dpm_table(struct smu_context *smu,
2246 enum PP_OD_DPM_TABLE_COMMAND type,
2247 long *input, uint32_t size)
2251 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2254 mutex_lock(&smu->mutex);
2256 if (smu->ppt_funcs->od_edit_dpm_table)
2257 ret = smu->ppt_funcs->od_edit_dpm_table(smu, type, input, size);
2259 mutex_unlock(&smu->mutex);
2264 int smu_read_sensor(struct smu_context *smu,
2265 enum amd_pp_sensors sensor,
2266 void *data, uint32_t *size)
2270 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2273 mutex_lock(&smu->mutex);
2275 if (smu->ppt_funcs->read_sensor)
2276 ret = smu->ppt_funcs->read_sensor(smu, sensor, data, size);
2278 mutex_unlock(&smu->mutex);
2283 int smu_get_power_profile_mode(struct smu_context *smu, char *buf)
2287 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2290 mutex_lock(&smu->mutex);
2292 if (smu->ppt_funcs->get_power_profile_mode)
2293 ret = smu->ppt_funcs->get_power_profile_mode(smu, buf);
2295 mutex_unlock(&smu->mutex);
2300 int smu_set_power_profile_mode(struct smu_context *smu,
2302 uint32_t param_size,
2307 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2311 mutex_lock(&smu->mutex);
2313 if (smu->ppt_funcs->set_power_profile_mode)
2314 ret = smu->ppt_funcs->set_power_profile_mode(smu, param, param_size);
2317 mutex_unlock(&smu->mutex);
2323 int smu_get_fan_control_mode(struct smu_context *smu)
2327 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2330 mutex_lock(&smu->mutex);
2332 if (smu->ppt_funcs->get_fan_control_mode)
2333 ret = smu->ppt_funcs->get_fan_control_mode(smu);
2335 mutex_unlock(&smu->mutex);
2340 int smu_set_fan_control_mode(struct smu_context *smu, int value)
2344 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2347 mutex_lock(&smu->mutex);
2349 if (smu->ppt_funcs->set_fan_control_mode)
2350 ret = smu->ppt_funcs->set_fan_control_mode(smu, value);
2352 mutex_unlock(&smu->mutex);
2357 int smu_get_fan_speed_percent(struct smu_context *smu, uint32_t *speed)
2361 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2364 mutex_lock(&smu->mutex);
2366 if (smu->ppt_funcs->get_fan_speed_percent)
2367 ret = smu->ppt_funcs->get_fan_speed_percent(smu, speed);
2369 mutex_unlock(&smu->mutex);
2374 int smu_set_fan_speed_percent(struct smu_context *smu, uint32_t speed)
2378 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2381 mutex_lock(&smu->mutex);
2383 if (smu->ppt_funcs->set_fan_speed_percent)
2384 ret = smu->ppt_funcs->set_fan_speed_percent(smu, speed);
2386 mutex_unlock(&smu->mutex);
2391 int smu_get_fan_speed_rpm(struct smu_context *smu, uint32_t *speed)
2395 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2398 mutex_lock(&smu->mutex);
2400 if (smu->ppt_funcs->get_fan_speed_rpm)
2401 ret = smu->ppt_funcs->get_fan_speed_rpm(smu, speed);
2403 mutex_unlock(&smu->mutex);
2408 int smu_set_deep_sleep_dcefclk(struct smu_context *smu, int clk)
2412 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2415 mutex_lock(&smu->mutex);
2417 if (smu->ppt_funcs->set_deep_sleep_dcefclk)
2418 ret = smu->ppt_funcs->set_deep_sleep_dcefclk(smu, clk);
2420 mutex_unlock(&smu->mutex);
2425 int smu_set_active_display_count(struct smu_context *smu, uint32_t count)
2429 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2432 if (smu->ppt_funcs->set_active_display_count)
2433 ret = smu->ppt_funcs->set_active_display_count(smu, count);
2438 int smu_get_clock_by_type(struct smu_context *smu,
2439 enum amd_pp_clock_type type,
2440 struct amd_pp_clocks *clocks)
2444 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2447 mutex_lock(&smu->mutex);
2449 if (smu->ppt_funcs->get_clock_by_type)
2450 ret = smu->ppt_funcs->get_clock_by_type(smu, type, clocks);
2452 mutex_unlock(&smu->mutex);
2457 int smu_get_max_high_clocks(struct smu_context *smu,
2458 struct amd_pp_simple_clock_info *clocks)
2462 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2465 mutex_lock(&smu->mutex);
2467 if (smu->ppt_funcs->get_max_high_clocks)
2468 ret = smu->ppt_funcs->get_max_high_clocks(smu, clocks);
2470 mutex_unlock(&smu->mutex);
2475 int smu_get_clock_by_type_with_latency(struct smu_context *smu,
2476 enum smu_clk_type clk_type,
2477 struct pp_clock_levels_with_latency *clocks)
2481 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2484 mutex_lock(&smu->mutex);
2486 if (smu->ppt_funcs->get_clock_by_type_with_latency)
2487 ret = smu->ppt_funcs->get_clock_by_type_with_latency(smu, clk_type, clocks);
2489 mutex_unlock(&smu->mutex);
2494 int smu_get_clock_by_type_with_voltage(struct smu_context *smu,
2495 enum amd_pp_clock_type type,
2496 struct pp_clock_levels_with_voltage *clocks)
2500 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2503 mutex_lock(&smu->mutex);
2505 if (smu->ppt_funcs->get_clock_by_type_with_voltage)
2506 ret = smu->ppt_funcs->get_clock_by_type_with_voltage(smu, type, clocks);
2508 mutex_unlock(&smu->mutex);
2514 int smu_display_clock_voltage_request(struct smu_context *smu,
2515 struct pp_display_clock_request *clock_req)
2519 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2522 mutex_lock(&smu->mutex);
2524 if (smu->ppt_funcs->display_clock_voltage_request)
2525 ret = smu->ppt_funcs->display_clock_voltage_request(smu, clock_req);
2527 mutex_unlock(&smu->mutex);
2533 int smu_display_disable_memory_clock_switch(struct smu_context *smu, bool disable_memory_clock_switch)
2537 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2540 mutex_lock(&smu->mutex);
2542 if (smu->ppt_funcs->display_disable_memory_clock_switch)
2543 ret = smu->ppt_funcs->display_disable_memory_clock_switch(smu, disable_memory_clock_switch);
2545 mutex_unlock(&smu->mutex);
2550 int smu_notify_smu_enable_pwe(struct smu_context *smu)
2554 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2557 mutex_lock(&smu->mutex);
2559 if (smu->ppt_funcs->notify_smu_enable_pwe)
2560 ret = smu->ppt_funcs->notify_smu_enable_pwe(smu);
2562 mutex_unlock(&smu->mutex);
2567 int smu_set_xgmi_pstate(struct smu_context *smu,
2572 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2575 mutex_lock(&smu->mutex);
2577 if (smu->ppt_funcs->set_xgmi_pstate)
2578 ret = smu->ppt_funcs->set_xgmi_pstate(smu, pstate);
2580 mutex_unlock(&smu->mutex);
2585 int smu_set_azalia_d3_pme(struct smu_context *smu)
2589 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2592 mutex_lock(&smu->mutex);
2594 if (smu->ppt_funcs->set_azalia_d3_pme)
2595 ret = smu->ppt_funcs->set_azalia_d3_pme(smu);
2597 mutex_unlock(&smu->mutex);
2603 * On system suspending or resetting, the dpm_enabled
2604 * flag will be cleared. So that those SMU services which
2605 * are not supported will be gated.
2607 * However, the baco/mode1 reset should still be granted
2608 * as they are still supported and necessary.
2610 bool smu_baco_is_support(struct smu_context *smu)
2614 if (!smu->pm_enabled)
2617 mutex_lock(&smu->mutex);
2619 if (smu->ppt_funcs && smu->ppt_funcs->baco_is_support)
2620 ret = smu->ppt_funcs->baco_is_support(smu);
2622 mutex_unlock(&smu->mutex);
2627 int smu_baco_get_state(struct smu_context *smu, enum smu_baco_state *state)
2629 if (smu->ppt_funcs->baco_get_state)
2632 mutex_lock(&smu->mutex);
2633 *state = smu->ppt_funcs->baco_get_state(smu);
2634 mutex_unlock(&smu->mutex);
2639 int smu_baco_enter(struct smu_context *smu)
2643 if (!smu->pm_enabled)
2646 mutex_lock(&smu->mutex);
2648 if (smu->ppt_funcs->baco_enter)
2649 ret = smu->ppt_funcs->baco_enter(smu);
2651 mutex_unlock(&smu->mutex);
2656 int smu_baco_exit(struct smu_context *smu)
2660 if (!smu->pm_enabled)
2663 mutex_lock(&smu->mutex);
2665 if (smu->ppt_funcs->baco_exit)
2666 ret = smu->ppt_funcs->baco_exit(smu);
2668 mutex_unlock(&smu->mutex);
2673 int smu_mode2_reset(struct smu_context *smu)
2677 if (!smu->pm_enabled)
2680 mutex_lock(&smu->mutex);
2682 if (smu->ppt_funcs->mode2_reset)
2683 ret = smu->ppt_funcs->mode2_reset(smu);
2685 mutex_unlock(&smu->mutex);
2690 int smu_get_max_sustainable_clocks_by_dc(struct smu_context *smu,
2691 struct pp_smu_nv_clock_table *max_clocks)
2695 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2698 mutex_lock(&smu->mutex);
2700 if (smu->ppt_funcs->get_max_sustainable_clocks_by_dc)
2701 ret = smu->ppt_funcs->get_max_sustainable_clocks_by_dc(smu, max_clocks);
2703 mutex_unlock(&smu->mutex);
2708 int smu_get_uclk_dpm_states(struct smu_context *smu,
2709 unsigned int *clock_values_in_khz,
2710 unsigned int *num_states)
2714 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2717 mutex_lock(&smu->mutex);
2719 if (smu->ppt_funcs->get_uclk_dpm_states)
2720 ret = smu->ppt_funcs->get_uclk_dpm_states(smu, clock_values_in_khz, num_states);
2722 mutex_unlock(&smu->mutex);
2727 enum amd_pm_state_type smu_get_current_power_state(struct smu_context *smu)
2729 enum amd_pm_state_type pm_state = POWER_STATE_TYPE_DEFAULT;
2731 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2734 mutex_lock(&smu->mutex);
2736 if (smu->ppt_funcs->get_current_power_state)
2737 pm_state = smu->ppt_funcs->get_current_power_state(smu);
2739 mutex_unlock(&smu->mutex);
2744 int smu_get_dpm_clock_table(struct smu_context *smu,
2745 struct dpm_clocks *clock_table)
2749 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2752 mutex_lock(&smu->mutex);
2754 if (smu->ppt_funcs->get_dpm_clock_table)
2755 ret = smu->ppt_funcs->get_dpm_clock_table(smu, clock_table);
2757 mutex_unlock(&smu->mutex);
2762 uint32_t smu_get_pptable_power_limit(struct smu_context *smu)
2766 if (smu->ppt_funcs->get_pptable_power_limit)
2767 ret = smu->ppt_funcs->get_pptable_power_limit(smu);
2772 int smu_powergate_vcn(struct smu_context *smu, bool gate)
2777 return smu_dpm_set_uvd_enable(smu, !gate);
2780 int smu_powergate_jpeg(struct smu_context *smu, bool gate)
2785 return smu_dpm_set_jpeg_enable(smu, !gate);