drm/amd/pm: correct the workload setting
authorKenneth Feng <kenneth.feng@amd.com>
Wed, 30 Oct 2024 05:22:44 +0000 (13:22 +0800)
committerAlex Deucher <alexander.deucher@amd.com>
Mon, 4 Nov 2024 17:51:01 +0000 (12:51 -0500)
Correct the workload setting in order not to mix the setting
with the end user. Update the workload mask accordingly.

v2: changes as below:
1. the end user can not erase the workload from driver except default workload.
2. always shows the real highest priority workoad to the end user.
3. the real workload mask is combined with driver workload mask and end user workload mask.

v3: apply this to the other ASICs as well.
v4: simplify the code
v5: refine the code based on the review comments.

Signed-off-by: Kenneth Feng <kenneth.feng@amd.com>
Acked-by: Alex Deucher <alexander.deucher@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
(cherry picked from commit 8cc438be5d49b8326b2fcade0bdb7e6a97df9e0b)
Cc: stable@vger.kernel.org # 6.11.x
12 files changed:
drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h
drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c
drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c
drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c
drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c
drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c
drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_2_ppt.c
drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c
drivers/gpu/drm/amd/pm/swsmu/smu_cmn.h

index 80e60ea2d11e3c6bbfc629edea9161627eec5f83..ee1bcfaae3e3db80e964e916a7d5f430fc08de4c 100644 (file)
@@ -1259,26 +1259,33 @@ static int smu_sw_init(void *handle)
        smu->watermarks_bitmap = 0;
        smu->power_profile_mode = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT;
        smu->default_power_profile_mode = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT;
+       smu->user_dpm_profile.user_workload_mask = 0;
 
        atomic_set(&smu->smu_power.power_gate.vcn_gated, 1);
        atomic_set(&smu->smu_power.power_gate.jpeg_gated, 1);
        atomic_set(&smu->smu_power.power_gate.vpe_gated, 1);
        atomic_set(&smu->smu_power.power_gate.umsch_mm_gated, 1);
 
-       smu->workload_prority[PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT] = 0;
-       smu->workload_prority[PP_SMC_POWER_PROFILE_FULLSCREEN3D] = 1;
-       smu->workload_prority[PP_SMC_POWER_PROFILE_POWERSAVING] = 2;
-       smu->workload_prority[PP_SMC_POWER_PROFILE_VIDEO] = 3;
-       smu->workload_prority[PP_SMC_POWER_PROFILE_VR] = 4;
-       smu->workload_prority[PP_SMC_POWER_PROFILE_COMPUTE] = 5;
-       smu->workload_prority[PP_SMC_POWER_PROFILE_CUSTOM] = 6;
+       smu->workload_priority[PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT] = 0;
+       smu->workload_priority[PP_SMC_POWER_PROFILE_FULLSCREEN3D] = 1;
+       smu->workload_priority[PP_SMC_POWER_PROFILE_POWERSAVING] = 2;
+       smu->workload_priority[PP_SMC_POWER_PROFILE_VIDEO] = 3;
+       smu->workload_priority[PP_SMC_POWER_PROFILE_VR] = 4;
+       smu->workload_priority[PP_SMC_POWER_PROFILE_COMPUTE] = 5;
+       smu->workload_priority[PP_SMC_POWER_PROFILE_CUSTOM] = 6;
 
        if (smu->is_apu ||
-           !smu_is_workload_profile_available(smu, PP_SMC_POWER_PROFILE_FULLSCREEN3D))
-               smu->workload_mask = 1 << smu->workload_prority[PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT];
-       else
-               smu->workload_mask = 1 << smu->workload_prority[PP_SMC_POWER_PROFILE_FULLSCREEN3D];
+           !smu_is_workload_profile_available(smu, PP_SMC_POWER_PROFILE_FULLSCREEN3D)) {
+               smu->driver_workload_mask =
+                       1 << smu->workload_priority[PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT];
+       } else {
+               smu->driver_workload_mask =
+                       1 << smu->workload_priority[PP_SMC_POWER_PROFILE_FULLSCREEN3D];
+               smu->default_power_profile_mode = PP_SMC_POWER_PROFILE_FULLSCREEN3D;
+       }
 
+       smu->workload_mask = smu->driver_workload_mask |
+                                                       smu->user_dpm_profile.user_workload_mask;
        smu->workload_setting[0] = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT;
        smu->workload_setting[1] = PP_SMC_POWER_PROFILE_FULLSCREEN3D;
        smu->workload_setting[2] = PP_SMC_POWER_PROFILE_POWERSAVING;
@@ -2348,17 +2355,20 @@ static int smu_switch_power_profile(void *handle,
                return -EINVAL;
 
        if (!en) {
-               smu->workload_mask &= ~(1 << smu->workload_prority[type]);
+               smu->driver_workload_mask &= ~(1 << smu->workload_priority[type]);
                index = fls(smu->workload_mask);
                index = index > 0 && index <= WORKLOAD_POLICY_MAX ? index - 1 : 0;
                workload[0] = smu->workload_setting[index];
        } else {
-               smu->workload_mask |= (1 << smu->workload_prority[type]);
+               smu->driver_workload_mask |= (1 << smu->workload_priority[type]);
                index = fls(smu->workload_mask);
                index = index <= WORKLOAD_POLICY_MAX ? index - 1 : 0;
                workload[0] = smu->workload_setting[index];
        }
 
+       smu->workload_mask = smu->driver_workload_mask |
+                                                smu->user_dpm_profile.user_workload_mask;
+
        if (smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL &&
                smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM)
                smu_bump_power_profile_mode(smu, workload, 0);
@@ -3049,12 +3059,23 @@ static int smu_set_power_profile_mode(void *handle,
                                      uint32_t param_size)
 {
        struct smu_context *smu = handle;
+       int ret;
 
        if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled ||
            !smu->ppt_funcs->set_power_profile_mode)
                return -EOPNOTSUPP;
 
-       return smu_bump_power_profile_mode(smu, param, param_size);
+       if (smu->user_dpm_profile.user_workload_mask &
+          (1 << smu->workload_priority[param[param_size]]))
+          return 0;
+
+       smu->user_dpm_profile.user_workload_mask =
+               (1 << smu->workload_priority[param[param_size]]);
+       smu->workload_mask = smu->user_dpm_profile.user_workload_mask |
+               smu->driver_workload_mask;
+       ret = smu_bump_power_profile_mode(smu, param, param_size);
+
+       return ret;
 }
 
 static int smu_get_fan_control_mode(void *handle, u32 *fan_mode)
index b44a185d07e84c868233d36876cb201033881da4..d60d9a12a47ef761b6704c6712a93b396b872218 100644 (file)
@@ -240,6 +240,7 @@ struct smu_user_dpm_profile {
        /* user clock state information */
        uint32_t clk_mask[SMU_CLK_COUNT];
        uint32_t clk_dependency;
+       uint32_t user_workload_mask;
 };
 
 #define SMU_TABLE_INIT(tables, table_id, s, a, d)      \
@@ -557,7 +558,8 @@ struct smu_context {
        bool disable_uclk_switch;
 
        uint32_t workload_mask;
-       uint32_t workload_prority[WORKLOAD_POLICY_MAX];
+       uint32_t driver_workload_mask;
+       uint32_t workload_priority[WORKLOAD_POLICY_MAX];
        uint32_t workload_setting[WORKLOAD_POLICY_MAX];
        uint32_t power_profile_mode;
        uint32_t default_power_profile_mode;
index c0f6b59369b7c4e3a93f4fe719bdace5f5f837bf..31fe512028f460421e1425620a63a8b0bd650149 100644 (file)
@@ -1455,7 +1455,6 @@ static int arcturus_set_power_profile_mode(struct smu_context *smu,
                return -EINVAL;
        }
 
-
        if ((profile_mode == PP_SMC_POWER_PROFILE_CUSTOM) &&
             (smu->smc_fw_version >= 0x360d00)) {
                if (size != 10)
@@ -1523,14 +1522,14 @@ static int arcturus_set_power_profile_mode(struct smu_context *smu,
 
        ret = smu_cmn_send_smc_msg_with_param(smu,
                                          SMU_MSG_SetWorkloadMask,
-                                         1 << workload_type,
+                                         smu->workload_mask,
                                          NULL);
        if (ret) {
                dev_err(smu->adev->dev, "Fail to set workload type %d\n", workload_type);
                return ret;
        }
 
-       smu->power_profile_mode = profile_mode;
+       smu_cmn_assign_power_profile(smu);
 
        return 0;
 }
index 16af1a329621f19007a875ac773ce8efafa27eac..12223f507977d0e4944bd32acf5cb8cbb31dd343 100644 (file)
@@ -2081,10 +2081,13 @@ static int navi10_set_power_profile_mode(struct smu_context *smu, long *input, u
                                                       smu->power_profile_mode);
        if (workload_type < 0)
                return -EINVAL;
+
        ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetWorkloadMask,
-                                   1 << workload_type, NULL);
+                                   smu->workload_mask, NULL);
        if (ret)
                dev_err(smu->adev->dev, "[%s] Failed to set work load mask!", __func__);
+       else
+               smu_cmn_assign_power_profile(smu);
 
        return ret;
 }
index 9c3c48297cba0349c882c439875cb60623e01adc..3b7b2ec8319a874118bb4a0a76a01e64f4dd14e9 100644 (file)
@@ -1786,10 +1786,13 @@ static int sienna_cichlid_set_power_profile_mode(struct smu_context *smu, long *
                                                       smu->power_profile_mode);
        if (workload_type < 0)
                return -EINVAL;
+
        ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetWorkloadMask,
-                                   1 << workload_type, NULL);
+                                   smu->workload_mask, NULL);
        if (ret)
                dev_err(smu->adev->dev, "[%s] Failed to set work load mask!", __func__);
+       else
+               smu_cmn_assign_power_profile(smu);
 
        return ret;
 }
index 1fe020f1f4dbe25b763cf36ce8f3ea0b8bb06206..952ee22cbc90e0217c76bb44d30914a7e7a00aa3 100644 (file)
@@ -1079,7 +1079,7 @@ static int vangogh_set_power_profile_mode(struct smu_context *smu, long *input,
        }
 
        ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_ActiveProcessNotify,
-                                   1 << workload_type,
+                                   smu->workload_mask,
                                    NULL);
        if (ret) {
                dev_err_once(smu->adev->dev, "Fail to set workload type %d\n",
@@ -1087,7 +1087,7 @@ static int vangogh_set_power_profile_mode(struct smu_context *smu, long *input,
                return ret;
        }
 
-       smu->power_profile_mode = profile_mode;
+       smu_cmn_assign_power_profile(smu);
 
        return 0;
 }
index cc0504b063fa3a2c7454ad5424e4991c569e6507..62316a6707ef2ffb531c50efde1841340b7cd63d 100644 (file)
@@ -890,14 +890,14 @@ static int renoir_set_power_profile_mode(struct smu_context *smu, long *input, u
        }
 
        ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_ActiveProcessNotify,
-                                   1 << workload_type,
+                                   smu->workload_mask,
                                    NULL);
        if (ret) {
                dev_err_once(smu->adev->dev, "Fail to set workload type %d\n", workload_type);
                return ret;
        }
 
-       smu->power_profile_mode = profile_mode;
+       smu_cmn_assign_power_profile(smu);
 
        return 0;
 }
index d53e162dcd8de2f591fd7add9240519a3731dc09..5dd7ceca64feeda7370013c734166095d88103bb 100644 (file)
@@ -2485,7 +2485,7 @@ static int smu_v13_0_0_set_power_profile_mode(struct smu_context *smu,
        DpmActivityMonitorCoeffInt_t *activity_monitor =
                &(activity_monitor_external.DpmActivityMonitorCoeffInt);
        int workload_type, ret = 0;
-       u32 workload_mask, selected_workload_mask;
+       u32 workload_mask;
 
        smu->power_profile_mode = input[size];
 
@@ -2552,7 +2552,7 @@ static int smu_v13_0_0_set_power_profile_mode(struct smu_context *smu,
        if (workload_type < 0)
                return -EINVAL;
 
-       selected_workload_mask = workload_mask = 1 << workload_type;
+       workload_mask = 1 << workload_type;
 
        /* Add optimizations for SMU13.0.0/10.  Reuse the power saving profile */
        if ((amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == IP_VERSION(13, 0, 0) &&
@@ -2567,12 +2567,22 @@ static int smu_v13_0_0_set_power_profile_mode(struct smu_context *smu,
                        workload_mask |= 1 << workload_type;
        }
 
+       smu->workload_mask |= workload_mask;
        ret = smu_cmn_send_smc_msg_with_param(smu,
                                               SMU_MSG_SetWorkloadMask,
-                                              workload_mask,
+                                              smu->workload_mask,
                                               NULL);
-       if (!ret)
-               smu->workload_mask = selected_workload_mask;
+       if (!ret) {
+               smu_cmn_assign_power_profile(smu);
+               if (smu->power_profile_mode == PP_SMC_POWER_PROFILE_POWERSAVING) {
+                       workload_type = smu_cmn_to_asic_specific_index(smu,
+                                                              CMN2ASIC_MAPPING_WORKLOAD,
+                                                              PP_SMC_POWER_PROFILE_FULLSCREEN3D);
+                       smu->power_profile_mode = smu->workload_mask & (1 << workload_type)
+                                                                               ? PP_SMC_POWER_PROFILE_FULLSCREEN3D
+                                                                               : PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT;
+               }
+       }
 
        return ret;
 }
index b891a5e0a3969a82cd9740a51a27149d65643900..9d0b19419de0fff509ad64497c3deb6993fca1a9 100644 (file)
@@ -2499,13 +2499,14 @@ static int smu_v13_0_7_set_power_profile_mode(struct smu_context *smu, long *inp
                                                       smu->power_profile_mode);
        if (workload_type < 0)
                return -EINVAL;
+
        ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetWorkloadMask,
-                                   1 << workload_type, NULL);
+                                   smu->workload_mask, NULL);
 
        if (ret)
                dev_err(smu->adev->dev, "[%s] Failed to set work load mask!", __func__);
        else
-               smu->workload_mask = (1 << workload_type);
+               smu_cmn_assign_power_profile(smu);
 
        return ret;
 }
index 1e16a281f2dcde29ffb028740b6ed6a43852f4f6..1aa13d32ceb2cf6f73eaf0a5ad5f4343f64539da 100644 (file)
@@ -1807,12 +1807,11 @@ static int smu_v14_0_2_set_power_profile_mode(struct smu_context *smu,
        if (workload_type < 0)
                return -EINVAL;
 
-       ret = smu_cmn_send_smc_msg_with_param(smu,
-                                              SMU_MSG_SetWorkloadMask,
-                                              1 << workload_type,
-                                              NULL);
+       ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetWorkloadMask,
+                                                                                 smu->workload_mask, NULL);
+
        if (!ret)
-               smu->workload_mask = 1 << workload_type;
+               smu_cmn_assign_power_profile(smu);
 
        return ret;
 }
index 91ad434bcdaeb421734fb486f6a0fbf7dfa65c82..bdfc5e617333df22dc406c99e3859546f46ea64c 100644 (file)
@@ -1138,6 +1138,14 @@ int smu_cmn_set_mp1_state(struct smu_context *smu,
        return ret;
 }
 
+void smu_cmn_assign_power_profile(struct smu_context *smu)
+{
+       uint32_t index;
+       index = fls(smu->workload_mask);
+       index = index > 0 && index <= WORKLOAD_POLICY_MAX ? index - 1 : 0;
+       smu->power_profile_mode = smu->workload_setting[index];
+}
+
 bool smu_cmn_is_audio_func_enabled(struct amdgpu_device *adev)
 {
        struct pci_dev *p = NULL;
index 1de685defe85b19e3d2895ec33a2e95101c8ded8..8a801e389659d1b6f7bc162a93de43761e7739bd 100644 (file)
@@ -130,6 +130,8 @@ void smu_cmn_init_soft_gpu_metrics(void *table, uint8_t frev, uint8_t crev);
 int smu_cmn_set_mp1_state(struct smu_context *smu,
                          enum pp_mp1_state mp1_state);
 
+void smu_cmn_assign_power_profile(struct smu_context *smu);
+
 /*
  * Helper function to make sysfs_emit_at() happy. Align buf to
  * the current page boundary and record the offset.