drm/amd/powerplay: implement power_dpm_force_performance_level for SMU11
authorChengming Gui <Jack.Gui@amd.com>
Fri, 18 Jan 2019 03:27:25 +0000 (11:27 +0800)
committerAlex Deucher <alexander.deucher@amd.com>
Tue, 19 Mar 2019 20:04:00 +0000 (15:04 -0500)
add get_performance_level and force_performance_level
to implement the sys interface for SMU11.

Signed-off-by: Chengming Gui <Jack.Gui@amd.com>
Reviewed-by: Huang Rui <ray.huang@amd.com>
Acked-by: Alex Deucher <alexander.deucher@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
drivers/gpu/drm/amd/powerplay/amdgpu_smu.c
drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h
drivers/gpu/drm/amd/powerplay/smu_v11_0.c
drivers/gpu/drm/amd/powerplay/vega20_ppt.c

index 694d85b0f0a0546b542ff7a93a19bbb77af59436..77d946f8fca5bf9e0cd314aba5861801baaea84d 100644 (file)
@@ -264,7 +264,9 @@ static ssize_t amdgpu_get_dpm_forced_performance_level(struct device *dev,
             (ddev->switch_power_state != DRM_SWITCH_POWER_ON))
                return snprintf(buf, PAGE_SIZE, "off\n");
 
-       if (adev->powerplay.pp_funcs->get_performance_level)
+       if (is_support_sw_smu(adev))
+               level = smu_get_performance_level(&adev->smu);
+       else if (adev->powerplay.pp_funcs->get_performance_level)
                level = amdgpu_dpm_get_performance_level(adev);
        else
                level = adev->pm.dpm.forced_level;
@@ -297,7 +299,9 @@ static ssize_t amdgpu_set_dpm_forced_performance_level(struct device *dev,
             (ddev->switch_power_state != DRM_SWITCH_POWER_ON))
                return -EINVAL;
 
-       if (adev->powerplay.pp_funcs->get_performance_level)
+       if (is_support_sw_smu(adev))
+               current_level = smu_get_performance_level(&adev->smu);
+       else if (adev->powerplay.pp_funcs->get_performance_level)
                current_level = amdgpu_dpm_get_performance_level(adev);
 
        if (strncmp("low", buf, strlen("low")) == 0) {
@@ -326,7 +330,20 @@ static ssize_t amdgpu_set_dpm_forced_performance_level(struct device *dev,
        if (current_level == level)
                return count;
 
-       if (adev->powerplay.pp_funcs->force_performance_level) {
+       if (is_support_sw_smu(adev)) {
+               mutex_lock(&adev->pm.mutex);
+               if (adev->pm.dpm.thermal_active) {
+                       count = -EINVAL;
+                       mutex_unlock(&adev->pm.mutex);
+                       goto fail;
+               }
+               ret = smu_force_performance_level(&adev->smu, level);
+               if (ret)
+                       count = -EINVAL;
+               else
+                       adev->pm.dpm.forced_level = level;
+               mutex_unlock(&adev->pm.mutex);
+       } else if (adev->powerplay.pp_funcs->force_performance_level) {
                mutex_lock(&adev->pm.mutex);
                if (adev->pm.dpm.thermal_active) {
                        count = -EINVAL;
index 83fadcac18e663bad33a032c9bc61f95c4dffd8f..2917411a10eb335817bc55787cf9db5e00c8f5f2 100644 (file)
@@ -351,6 +351,8 @@ static int smu_sw_init(void *handle)
        smu->workload_setting[6] = PP_SMC_POWER_PROFILE_CUSTOM;
        smu->display_config = &adev->pm.pm_display_cfg;
 
+       smu->smu_dpm.dpm_level = AMD_DPM_FORCED_LEVEL_AUTO;
+       smu->smu_dpm.requested_dpm_level = AMD_DPM_FORCED_LEVEL_AUTO;
        ret = smu_init_microcode(smu);
        if (ret) {
                pr_err("Failed to load smu firmware!\n");
index 63cd1ba60dc20d65c47ada3765f6744b3df0976c..111424d0581dfbc36ff2977a6f61c1a2128498e9 100644 (file)
@@ -423,6 +423,8 @@ struct pptable_funcs {
                                              *clocks);
        int (*get_power_profile_mode)(struct smu_context *smu, char *buf);
        int (*set_power_profile_mode)(struct smu_context *smu, long *input, uint32_t size);
+       enum amd_dpm_forced_level (*get_performance_level)(struct smu_context *smu);
+       int (*force_performance_level)(struct smu_context *smu, enum amd_dpm_forced_level level);
 };
 
 struct smu_funcs
@@ -594,6 +596,10 @@ struct smu_funcs
        ((smu)->funcs->get_power_profile_mode ? (smu)->funcs->get_power_profile_mode((smu), buf) : 0)
 #define smu_set_power_profile_mode(smu, param, param_size) \
        ((smu)->funcs->set_power_profile_mode ? (smu)->funcs->set_power_profile_mode((smu), (param), (param_size)) : 0)
+#define smu_get_performance_level(smu) \
+       ((smu)->ppt_funcs->get_performance_level ? (smu)->ppt_funcs->get_performance_level((smu)) : 0)
+#define smu_force_performance_level(smu, level) \
+       ((smu)->ppt_funcs->force_performance_level ? (smu)->ppt_funcs->force_performance_level((smu), (level)) : 0)
 
 #define smu_msg_get_index(smu, msg) \
        ((smu)->ppt_funcs? ((smu)->ppt_funcs->get_smu_msg_index? (smu)->ppt_funcs->get_smu_msg_index((smu), (msg)) : -EINVAL) : -EINVAL)
index 93552d8c39fb4cd67cbfe075f4d63ff160d55b9c..74ad160e1335434402931dcc8f5ee122e374a268 100644 (file)
@@ -1100,7 +1100,6 @@ smu_v11_0_display_clock_voltage_request(struct smu_context *smu,
        PPCLK_e clk_select = 0;
        uint32_t clk_freq = clock_req->clock_freq_in_khz / 1000;
 
-       mutex_lock(&smu->mutex);
        if (smu_feature_is_enabled(smu, FEATURE_DPM_DCEFCLK_BIT)) {
                switch (clk_type) {
                case amd_pp_dcef_clock:
@@ -1129,7 +1128,6 @@ smu_v11_0_display_clock_voltage_request(struct smu_context *smu,
        }
 
 failed:
-       mutex_unlock(&smu->mutex);
        return ret;
 }
 
index 5de0eabbeb2938b1ef9e45d271dcc3ef4f253089..911296d1f7ccecb9d6a9128ef6c2609e1899d69b 100644 (file)
@@ -1744,6 +1744,103 @@ static int vega20_unforce_dpm_levels(struct smu_context *smu)
        return ret;
 }
 
+static enum amd_dpm_forced_level vega20_get_performance_level(struct smu_context *smu)
+{
+       struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
+       if (!smu_dpm_ctx->dpm_context)
+               return -EINVAL;
+
+       if (smu_dpm_ctx->dpm_level != smu_dpm_ctx->saved_dpm_level) {
+               mutex_lock(&(smu->mutex));
+               smu_dpm_ctx->saved_dpm_level = smu_dpm_ctx->dpm_level;
+               mutex_unlock(&(smu->mutex));
+       }
+       return smu_dpm_ctx->dpm_level;
+}
+
+static int
+vega20_force_performance_level(struct smu_context *smu, enum amd_dpm_forced_level level)
+{
+       int ret = 0;
+       int index = 0;
+       int i = 0;
+       uint32_t sclk_mask, mclk_mask, soc_mask;
+       long workload;
+       struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
+       if (!smu_dpm_ctx->dpm_context)
+               return -EINVAL;
+
+       for (i = 0; i < smu->adev->num_ip_blocks; i++) {
+               if (smu->adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC)
+                       break;
+       }
+       mutex_lock(&smu->mutex);
+       smu->adev->ip_blocks[i].version->funcs->enable_umd_pstate(smu, &level);
+       ret = vega20_display_config_changed(smu);
+       if (ret) {
+               pr_err("Failed to change display config!");
+               goto failed;
+       }
+       ret = vega20_apply_clocks_adjust_rules(smu);
+       if (ret) {
+               pr_err("Failed to apply clocks adjust rules!");
+               goto failed;
+       }
+       ret = vega20_notify_smc_dispaly_config(smu);
+       if (ret) {
+               pr_err("Failed to notify smc display config!");
+               goto failed;
+       }
+       switch (level) {
+       case AMD_DPM_FORCED_LEVEL_HIGH:
+               ret = vega20_force_dpm_highest(smu);
+               break;
+
+       case AMD_DPM_FORCED_LEVEL_LOW:
+               ret = vega20_force_dpm_lowest(smu);
+               break;
+
+       case AMD_DPM_FORCED_LEVEL_AUTO:
+               ret = vega20_unforce_dpm_levels(smu);
+               break;
+
+       case AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD:
+       case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK:
+       case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK:
+       case AMD_DPM_FORCED_LEVEL_PROFILE_PEAK:
+               ret = vega20_get_profiling_clk_mask(smu, level,
+                                                   &sclk_mask,
+                                                   &mclk_mask,
+                                                   &soc_mask);
+               if (ret)
+                       goto failed;
+               vega20_force_clk_levels(smu, PP_SCLK, 1 << sclk_mask);
+               vega20_force_clk_levels(smu, PP_MCLK, 1 << mclk_mask);
+               break;
+
+       case AMD_DPM_FORCED_LEVEL_MANUAL:
+       case AMD_DPM_FORCED_LEVEL_PROFILE_EXIT:
+       default:
+               break;
+       }
+
+       if (!ret)
+               smu_dpm_ctx->dpm_level = level;
+
+       if (smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL) {
+               index = fls(smu->workload_mask);
+               index = index > 0 && index <= WORKLOAD_POLICY_MAX ? index - 1 : 0;
+               workload = smu->workload_setting[index];
+
+               if (smu->power_profile_mode != workload)
+                       smu->funcs->set_power_profile_mode(smu, &workload, 0);
+       }
+
+failed:
+       mutex_unlock(&smu->mutex);
+       return ret;
+}
+
 static const struct pptable_funcs vega20_ppt_funcs = {
        .alloc_dpm_context = vega20_allocate_dpm_context,
        .store_powerplay_table = vega20_store_powerplay_table,
@@ -1761,6 +1858,8 @@ static const struct pptable_funcs vega20_ppt_funcs = {
        .get_clock_by_type_with_latency = vega20_get_clock_by_type_with_latency,
        .set_default_od8_settings = vega20_set_default_od8_setttings,
        .get_od_percentage = vega20_get_od_percentage,
+       .get_performance_level = vega20_get_performance_level,
+       .force_performance_level = vega20_force_performance_level,
 };
 
 void vega20_set_ppt_funcs(struct smu_context *smu)