drm/amd/pm: Add debugfs info for STB
[linux-2.6-block.git] / drivers / gpu / drm / amd / pm / amdgpu_pm.c
CommitLineData
d38ceaf9 1/*
9ce6aae1
AD
2 * Copyright 2017 Advanced Micro Devices, Inc.
3 *
d38ceaf9
AD
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Rafał Miłecki <zajec5@gmail.com>
23 * Alex Deucher <alexdeucher@gmail.com>
24 */
fdf2f6c5 25
d38ceaf9
AD
26#include "amdgpu.h"
27#include "amdgpu_drv.h"
28#include "amdgpu_pm.h"
29#include "amdgpu_dpm.h"
30#include "atom.h"
fdf2f6c5 31#include <linux/pci.h>
d38ceaf9
AD
32#include <linux/hwmon.h>
33#include <linux/hwmon-sysfs.h>
ddf74e79 34#include <linux/nospec.h>
b9a9294b 35#include <linux/pm_runtime.h>
517cb957 36#include <asm/processor.h>
8ca606de 37#include "hwmgr.h"
1b5708ff 38
a8503b15 39static const struct cg_flag_name clocks[] = {
adf16996 40 {AMD_CG_SUPPORT_GFX_FGCG, "Graphics Fine Grain Clock Gating"},
a8503b15
HR
41 {AMD_CG_SUPPORT_GFX_MGCG, "Graphics Medium Grain Clock Gating"},
42 {AMD_CG_SUPPORT_GFX_MGLS, "Graphics Medium Grain memory Light Sleep"},
43 {AMD_CG_SUPPORT_GFX_CGCG, "Graphics Coarse Grain Clock Gating"},
44 {AMD_CG_SUPPORT_GFX_CGLS, "Graphics Coarse Grain memory Light Sleep"},
54170226 45 {AMD_CG_SUPPORT_GFX_CGTS, "Graphics Coarse Grain Tree Shader Clock Gating"},
a8503b15
HR
46 {AMD_CG_SUPPORT_GFX_CGTS_LS, "Graphics Coarse Grain Tree Shader Light Sleep"},
47 {AMD_CG_SUPPORT_GFX_CP_LS, "Graphics Command Processor Light Sleep"},
48 {AMD_CG_SUPPORT_GFX_RLC_LS, "Graphics Run List Controller Light Sleep"},
12ad27fa
HR
49 {AMD_CG_SUPPORT_GFX_3D_CGCG, "Graphics 3D Coarse Grain Clock Gating"},
50 {AMD_CG_SUPPORT_GFX_3D_CGLS, "Graphics 3D Coarse Grain memory Light Sleep"},
a8503b15
HR
51 {AMD_CG_SUPPORT_MC_LS, "Memory Controller Light Sleep"},
52 {AMD_CG_SUPPORT_MC_MGCG, "Memory Controller Medium Grain Clock Gating"},
53 {AMD_CG_SUPPORT_SDMA_LS, "System Direct Memory Access Light Sleep"},
54 {AMD_CG_SUPPORT_SDMA_MGCG, "System Direct Memory Access Medium Grain Clock Gating"},
e96487a6 55 {AMD_CG_SUPPORT_BIF_MGCG, "Bus Interface Medium Grain Clock Gating"},
a8503b15
HR
56 {AMD_CG_SUPPORT_BIF_LS, "Bus Interface Light Sleep"},
57 {AMD_CG_SUPPORT_UVD_MGCG, "Unified Video Decoder Medium Grain Clock Gating"},
58 {AMD_CG_SUPPORT_VCE_MGCG, "Video Compression Engine Medium Grain Clock Gating"},
59 {AMD_CG_SUPPORT_HDP_LS, "Host Data Path Light Sleep"},
60 {AMD_CG_SUPPORT_HDP_MGCG, "Host Data Path Medium Grain Clock Gating"},
f9abe35c
HR
61 {AMD_CG_SUPPORT_DRM_MGCG, "Digital Right Management Medium Grain Clock Gating"},
62 {AMD_CG_SUPPORT_DRM_LS, "Digital Right Management Light Sleep"},
a8503b15 63 {AMD_CG_SUPPORT_ROM_MGCG, "Rom Medium Grain Clock Gating"},
f9abe35c 64 {AMD_CG_SUPPORT_DF_MGCG, "Data Fabric Medium Grain Clock Gating"},
71037bfc
KW
65 {AMD_CG_SUPPORT_VCN_MGCG, "VCN Medium Grain Clock Gating"},
66 {AMD_CG_SUPPORT_HDP_DS, "Host Data Path Deep Sleep"},
67 {AMD_CG_SUPPORT_HDP_SD, "Host Data Path Shutdown"},
68 {AMD_CG_SUPPORT_IH_CG, "Interrupt Handler Clock Gating"},
69 {AMD_CG_SUPPORT_JPEG_MGCG, "JPEG Medium Grain Clock Gating"},
367adb2a
JX
70
71 {AMD_CG_SUPPORT_ATHUB_MGCG, "Address Translation Hub Medium Grain Clock Gating"},
72 {AMD_CG_SUPPORT_ATHUB_LS, "Address Translation Hub Light Sleep"},
a8503b15
HR
73 {0, NULL},
74};
75
2adc1156
EQ
76static const struct hwmon_temp_label {
77 enum PP_HWMON_TEMP channel;
78 const char *label;
79} temp_label[] = {
80 {PP_TEMP_EDGE, "edge"},
81 {PP_TEMP_JUNCTION, "junction"},
82 {PP_TEMP_MEM, "mem"},
83};
84
ca8d40ca
AD
85/**
86 * DOC: power_dpm_state
87 *
dc85db25
AD
88 * The power_dpm_state file is a legacy interface and is only provided for
89 * backwards compatibility. The amdgpu driver provides a sysfs API for adjusting
90 * certain power related parameters. The file power_dpm_state is used for this.
ca8d40ca 91 * It accepts the following arguments:
dc85db25 92 *
ca8d40ca 93 * - battery
dc85db25 94 *
ca8d40ca 95 * - balanced
dc85db25 96 *
ca8d40ca
AD
97 * - performance
98 *
99 * battery
100 *
101 * On older GPUs, the vbios provided a special power state for battery
102 * operation. Selecting battery switched to this state. This is no
103 * longer provided on newer GPUs so the option does nothing in that case.
104 *
105 * balanced
106 *
107 * On older GPUs, the vbios provided a special power state for balanced
108 * operation. Selecting balanced switched to this state. This is no
109 * longer provided on newer GPUs so the option does nothing in that case.
110 *
111 * performance
112 *
113 * On older GPUs, the vbios provided a special power state for performance
114 * operation. Selecting performance switched to this state. This is no
115 * longer provided on newer GPUs so the option does nothing in that case.
116 *
117 */
118
4e01847c
KW
119static ssize_t amdgpu_get_power_dpm_state(struct device *dev,
120 struct device_attribute *attr,
121 char *buf)
d38ceaf9
AD
122{
123 struct drm_device *ddev = dev_get_drvdata(dev);
1348969a 124 struct amdgpu_device *adev = drm_to_adev(ddev);
8dfc8c53 125 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1b5708ff 126 enum amd_pm_state_type pm;
b9a9294b 127 int ret;
1b5708ff 128
53b3f8f4 129 if (amdgpu_in_reset(adev))
48b270bb 130 return -EPERM;
d2ae842d
AD
131 if (adev->in_suspend && !adev->in_runpm)
132 return -EPERM;
48b270bb 133
b9a9294b 134 ret = pm_runtime_get_sync(ddev->dev);
66429300
AD
135 if (ret < 0) {
136 pm_runtime_put_autosuspend(ddev->dev);
b9a9294b 137 return ret;
66429300 138 }
b9a9294b 139
8dfc8c53 140 if (pp_funcs->get_current_power_state) {
1b5708ff 141 pm = amdgpu_dpm_get_current_power_state(adev);
f0d2a7dc 142 } else {
1b5708ff 143 pm = adev->pm.dpm.user_state;
f0d2a7dc 144 }
d38ceaf9 145
b9a9294b
AD
146 pm_runtime_mark_last_busy(ddev->dev);
147 pm_runtime_put_autosuspend(ddev->dev);
148
a9ca9bb3
TT
149 return sysfs_emit(buf, "%s\n",
150 (pm == POWER_STATE_TYPE_BATTERY) ? "battery" :
151 (pm == POWER_STATE_TYPE_BALANCED) ? "balanced" : "performance");
d38ceaf9
AD
152}
153
4e01847c
KW
154static ssize_t amdgpu_set_power_dpm_state(struct device *dev,
155 struct device_attribute *attr,
156 const char *buf,
157 size_t count)
d38ceaf9
AD
158{
159 struct drm_device *ddev = dev_get_drvdata(dev);
1348969a 160 struct amdgpu_device *adev = drm_to_adev(ddev);
1b5708ff 161 enum amd_pm_state_type state;
b9a9294b 162 int ret;
d38ceaf9 163
53b3f8f4 164 if (amdgpu_in_reset(adev))
48b270bb 165 return -EPERM;
d2ae842d
AD
166 if (adev->in_suspend && !adev->in_runpm)
167 return -EPERM;
48b270bb 168
d38ceaf9 169 if (strncmp("battery", buf, strlen("battery")) == 0)
1b5708ff 170 state = POWER_STATE_TYPE_BATTERY;
d38ceaf9 171 else if (strncmp("balanced", buf, strlen("balanced")) == 0)
1b5708ff 172 state = POWER_STATE_TYPE_BALANCED;
d38ceaf9 173 else if (strncmp("performance", buf, strlen("performance")) == 0)
1b5708ff 174 state = POWER_STATE_TYPE_PERFORMANCE;
27414cd4
AD
175 else
176 return -EINVAL;
d38ceaf9 177
b9a9294b 178 ret = pm_runtime_get_sync(ddev->dev);
66429300
AD
179 if (ret < 0) {
180 pm_runtime_put_autosuspend(ddev->dev);
b9a9294b 181 return ret;
66429300 182 }
b9a9294b 183
f0d2a7dc
EQ
184 if (is_support_sw_smu(adev)) {
185 mutex_lock(&adev->pm.mutex);
186 adev->pm.dpm.user_state = state;
187 mutex_unlock(&adev->pm.mutex);
188 } else if (adev->powerplay.pp_funcs->dispatch_tasks) {
39199b80 189 amdgpu_dpm_dispatch_task(adev, AMD_PP_TASK_ENABLE_USER_STATE, &state);
1b5708ff
RZ
190 } else {
191 mutex_lock(&adev->pm.mutex);
192 adev->pm.dpm.user_state = state;
193 mutex_unlock(&adev->pm.mutex);
194
b9a9294b 195 amdgpu_pm_compute_clocks(adev);
1b5708ff 196 }
b9a9294b
AD
197 pm_runtime_mark_last_busy(ddev->dev);
198 pm_runtime_put_autosuspend(ddev->dev);
199
d38ceaf9
AD
200 return count;
201}
202
8567f681
AD
203
204/**
205 * DOC: power_dpm_force_performance_level
206 *
207 * The amdgpu driver provides a sysfs API for adjusting certain power
208 * related parameters. The file power_dpm_force_performance_level is
209 * used for this. It accepts the following arguments:
dc85db25 210 *
8567f681 211 * - auto
dc85db25 212 *
8567f681 213 * - low
dc85db25 214 *
8567f681 215 * - high
dc85db25 216 *
8567f681 217 * - manual
dc85db25 218 *
8567f681 219 * - profile_standard
dc85db25 220 *
8567f681 221 * - profile_min_sclk
dc85db25 222 *
8567f681 223 * - profile_min_mclk
dc85db25 224 *
8567f681
AD
225 * - profile_peak
226 *
227 * auto
228 *
229 * When auto is selected, the driver will attempt to dynamically select
230 * the optimal power profile for current conditions in the driver.
231 *
232 * low
233 *
234 * When low is selected, the clocks are forced to the lowest power state.
235 *
236 * high
237 *
238 * When high is selected, the clocks are forced to the highest power state.
239 *
240 * manual
241 *
242 * When manual is selected, the user can manually adjust which power states
243 * are enabled for each clock domain via the sysfs pp_dpm_mclk, pp_dpm_sclk,
244 * and pp_dpm_pcie files and adjust the power state transition heuristics
245 * via the pp_power_profile_mode sysfs file.
246 *
247 * profile_standard
248 * profile_min_sclk
249 * profile_min_mclk
250 * profile_peak
251 *
252 * When the profiling modes are selected, clock and power gating are
253 * disabled and the clocks are set for different profiling cases. This
254 * mode is recommended for profiling specific work loads where you do
255 * not want clock or power gating for clock fluctuation to interfere
256 * with your results. profile_standard sets the clocks to a fixed clock
257 * level which varies from asic to asic. profile_min_sclk forces the sclk
258 * to the lowest level. profile_min_mclk forces the mclk to the lowest level.
259 * profile_peak sets all clocks (mclk, sclk, pcie) to the highest levels.
260 *
261 */
262
4e01847c
KW
263static ssize_t amdgpu_get_power_dpm_force_performance_level(struct device *dev,
264 struct device_attribute *attr,
265 char *buf)
d38ceaf9
AD
266{
267 struct drm_device *ddev = dev_get_drvdata(dev);
1348969a 268 struct amdgpu_device *adev = drm_to_adev(ddev);
cd4d7464 269 enum amd_dpm_forced_level level = 0xff;
b9a9294b 270 int ret;
d38ceaf9 271
53b3f8f4 272 if (amdgpu_in_reset(adev))
48b270bb 273 return -EPERM;
d2ae842d
AD
274 if (adev->in_suspend && !adev->in_runpm)
275 return -EPERM;
48b270bb 276
b9a9294b 277 ret = pm_runtime_get_sync(ddev->dev);
66429300
AD
278 if (ret < 0) {
279 pm_runtime_put_autosuspend(ddev->dev);
b9a9294b 280 return ret;
66429300 281 }
0c67df48 282
4df144f8 283 if (adev->powerplay.pp_funcs->get_performance_level)
cd4d7464
RZ
284 level = amdgpu_dpm_get_performance_level(adev);
285 else
286 level = adev->pm.dpm.forced_level;
287
b9a9294b
AD
288 pm_runtime_mark_last_busy(ddev->dev);
289 pm_runtime_put_autosuspend(ddev->dev);
290
a9ca9bb3
TT
291 return sysfs_emit(buf, "%s\n",
292 (level == AMD_DPM_FORCED_LEVEL_AUTO) ? "auto" :
293 (level == AMD_DPM_FORCED_LEVEL_LOW) ? "low" :
294 (level == AMD_DPM_FORCED_LEVEL_HIGH) ? "high" :
295 (level == AMD_DPM_FORCED_LEVEL_MANUAL) ? "manual" :
296 (level == AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD) ? "profile_standard" :
297 (level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK) ? "profile_min_sclk" :
298 (level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK) ? "profile_min_mclk" :
299 (level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) ? "profile_peak" :
300 (level == AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM) ? "perf_determinism" :
301 "unknown");
d38ceaf9
AD
302}
303
4e01847c
KW
304static ssize_t amdgpu_set_power_dpm_force_performance_level(struct device *dev,
305 struct device_attribute *attr,
306 const char *buf,
307 size_t count)
d38ceaf9
AD
308{
309 struct drm_device *ddev = dev_get_drvdata(dev);
1348969a 310 struct amdgpu_device *adev = drm_to_adev(ddev);
8dfc8c53 311 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
e5d03ac2 312 enum amd_dpm_forced_level level;
9fac5799 313 enum amd_dpm_forced_level current_level;
d38ceaf9
AD
314 int ret = 0;
315
53b3f8f4 316 if (amdgpu_in_reset(adev))
48b270bb 317 return -EPERM;
d2ae842d
AD
318 if (adev->in_suspend && !adev->in_runpm)
319 return -EPERM;
48b270bb 320
d38ceaf9 321 if (strncmp("low", buf, strlen("low")) == 0) {
e5d03ac2 322 level = AMD_DPM_FORCED_LEVEL_LOW;
d38ceaf9 323 } else if (strncmp("high", buf, strlen("high")) == 0) {
e5d03ac2 324 level = AMD_DPM_FORCED_LEVEL_HIGH;
d38ceaf9 325 } else if (strncmp("auto", buf, strlen("auto")) == 0) {
e5d03ac2 326 level = AMD_DPM_FORCED_LEVEL_AUTO;
f3898ea1 327 } else if (strncmp("manual", buf, strlen("manual")) == 0) {
e5d03ac2 328 level = AMD_DPM_FORCED_LEVEL_MANUAL;
570272d2
RZ
329 } else if (strncmp("profile_exit", buf, strlen("profile_exit")) == 0) {
330 level = AMD_DPM_FORCED_LEVEL_PROFILE_EXIT;
331 } else if (strncmp("profile_standard", buf, strlen("profile_standard")) == 0) {
332 level = AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD;
333 } else if (strncmp("profile_min_sclk", buf, strlen("profile_min_sclk")) == 0) {
334 level = AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK;
335 } else if (strncmp("profile_min_mclk", buf, strlen("profile_min_mclk")) == 0) {
336 level = AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK;
337 } else if (strncmp("profile_peak", buf, strlen("profile_peak")) == 0) {
338 level = AMD_DPM_FORCED_LEVEL_PROFILE_PEAK;
6be64246
LL
339 } else if (strncmp("perf_determinism", buf, strlen("perf_determinism")) == 0) {
340 level = AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM;
570272d2 341 } else {
b9a9294b 342 return -EINVAL;
d38ceaf9 343 }
1b5708ff 344
b9a9294b 345 ret = pm_runtime_get_sync(ddev->dev);
66429300
AD
346 if (ret < 0) {
347 pm_runtime_put_autosuspend(ddev->dev);
b9a9294b 348 return ret;
66429300 349 }
b9a9294b 350
8dfc8c53 351 if (pp_funcs->get_performance_level)
780f3a9c 352 current_level = amdgpu_dpm_get_performance_level(adev);
9fac5799
AD
353 else
354 current_level = adev->pm.dpm.forced_level;
bb5a2bdf 355
b9a9294b
AD
356 if (current_level == level) {
357 pm_runtime_mark_last_busy(ddev->dev);
358 pm_runtime_put_autosuspend(ddev->dev);
f1403342 359 return count;
b9a9294b 360 }
3bd58979 361
cbd2d08c 362 if (adev->asic_type == CHIP_RAVEN) {
54f78a76 363 if (!(adev->apu_flags & AMD_APU_IS_RAVEN2)) {
cbd2d08c 364 if (current_level != AMD_DPM_FORCED_LEVEL_MANUAL && level == AMD_DPM_FORCED_LEVEL_MANUAL)
365 amdgpu_gfx_off_ctrl(adev, false);
366 else if (current_level == AMD_DPM_FORCED_LEVEL_MANUAL && level != AMD_DPM_FORCED_LEVEL_MANUAL)
367 amdgpu_gfx_off_ctrl(adev, true);
368 }
369 }
370
db8a974f
EQ
371 /* profile_exit setting is valid only when current mode is in profile mode */
372 if (!(current_level & (AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD |
373 AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK |
374 AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK |
375 AMD_DPM_FORCED_LEVEL_PROFILE_PEAK)) &&
376 (level == AMD_DPM_FORCED_LEVEL_PROFILE_EXIT)) {
377 pr_err("Currently not in any profile mode!\n");
b9a9294b
AD
378 pm_runtime_mark_last_busy(ddev->dev);
379 pm_runtime_put_autosuspend(ddev->dev);
f1403342 380 return -EINVAL;
db8a974f
EQ
381 }
382
8f4828d0 383 if (pp_funcs->force_performance_level) {
1b5708ff 384 mutex_lock(&adev->pm.mutex);
d38ceaf9 385 if (adev->pm.dpm.thermal_active) {
10f950f6 386 mutex_unlock(&adev->pm.mutex);
b9a9294b
AD
387 pm_runtime_mark_last_busy(ddev->dev);
388 pm_runtime_put_autosuspend(ddev->dev);
f1403342 389 return -EINVAL;
d38ceaf9
AD
390 }
391 ret = amdgpu_dpm_force_performance_level(adev, level);
27414cd4
AD
392 if (ret) {
393 mutex_unlock(&adev->pm.mutex);
394 pm_runtime_mark_last_busy(ddev->dev);
395 pm_runtime_put_autosuspend(ddev->dev);
f1403342 396 return -EINVAL;
27414cd4 397 } else {
1b5708ff 398 adev->pm.dpm.forced_level = level;
27414cd4 399 }
1b5708ff 400 mutex_unlock(&adev->pm.mutex);
d38ceaf9 401 }
b9a9294b
AD
402 pm_runtime_mark_last_busy(ddev->dev);
403 pm_runtime_put_autosuspend(ddev->dev);
570272d2 404
f1403342 405 return count;
d38ceaf9
AD
406}
407
f3898ea1
EH
408static ssize_t amdgpu_get_pp_num_states(struct device *dev,
409 struct device_attribute *attr,
410 char *buf)
411{
412 struct drm_device *ddev = dev_get_drvdata(dev);
1348969a 413 struct amdgpu_device *adev = drm_to_adev(ddev);
8dfc8c53 414 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
f3898ea1 415 struct pp_states_info data;
09b6744c
DP
416 uint32_t i;
417 int buf_len, ret;
f3898ea1 418
53b3f8f4 419 if (amdgpu_in_reset(adev))
48b270bb 420 return -EPERM;
d2ae842d
AD
421 if (adev->in_suspend && !adev->in_runpm)
422 return -EPERM;
48b270bb 423
b9a9294b 424 ret = pm_runtime_get_sync(ddev->dev);
66429300
AD
425 if (ret < 0) {
426 pm_runtime_put_autosuspend(ddev->dev);
b9a9294b 427 return ret;
66429300 428 }
b9a9294b 429
8dfc8c53 430 if (pp_funcs->get_pp_num_states) {
f3898ea1 431 amdgpu_dpm_get_pp_num_states(adev, &data);
6f81b2d0 432 } else {
433 memset(&data, 0, sizeof(data));
434 }
f3898ea1 435
b9a9294b
AD
436 pm_runtime_mark_last_busy(ddev->dev);
437 pm_runtime_put_autosuspend(ddev->dev);
438
09b6744c 439 buf_len = sysfs_emit(buf, "states: %d\n", data.nums);
f3898ea1 440 for (i = 0; i < data.nums; i++)
09b6744c 441 buf_len += sysfs_emit_at(buf, buf_len, "%d %s\n", i,
f3898ea1
EH
442 (data.states[i] == POWER_STATE_TYPE_INTERNAL_BOOT) ? "boot" :
443 (data.states[i] == POWER_STATE_TYPE_BATTERY) ? "battery" :
444 (data.states[i] == POWER_STATE_TYPE_BALANCED) ? "balanced" :
445 (data.states[i] == POWER_STATE_TYPE_PERFORMANCE) ? "performance" : "default");
446
447 return buf_len;
448}
449
450static ssize_t amdgpu_get_pp_cur_state(struct device *dev,
451 struct device_attribute *attr,
452 char *buf)
453{
454 struct drm_device *ddev = dev_get_drvdata(dev);
1348969a 455 struct amdgpu_device *adev = drm_to_adev(ddev);
8dfc8c53 456 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
2b24c199 457 struct pp_states_info data = {0};
f3898ea1 458 enum amd_pm_state_type pm = 0;
ea2d0bf8 459 int i = 0, ret = 0;
f3898ea1 460
53b3f8f4 461 if (amdgpu_in_reset(adev))
48b270bb 462 return -EPERM;
d2ae842d
AD
463 if (adev->in_suspend && !adev->in_runpm)
464 return -EPERM;
48b270bb 465
b9a9294b 466 ret = pm_runtime_get_sync(ddev->dev);
66429300
AD
467 if (ret < 0) {
468 pm_runtime_put_autosuspend(ddev->dev);
b9a9294b 469 return ret;
66429300 470 }
b9a9294b 471
8dfc8c53
DP
472 if (pp_funcs->get_current_power_state
473 && pp_funcs->get_pp_num_states) {
f3898ea1
EH
474 pm = amdgpu_dpm_get_current_power_state(adev);
475 amdgpu_dpm_get_pp_num_states(adev, &data);
ea2d0bf8 476 }
f3898ea1 477
b9a9294b
AD
478 pm_runtime_mark_last_busy(ddev->dev);
479 pm_runtime_put_autosuspend(ddev->dev);
480
ea2d0bf8
KW
481 for (i = 0; i < data.nums; i++) {
482 if (pm == data.states[i])
483 break;
f3898ea1
EH
484 }
485
ea2d0bf8
KW
486 if (i == data.nums)
487 i = -EINVAL;
488
a9ca9bb3 489 return sysfs_emit(buf, "%d\n", i);
f3898ea1
EH
490}
491
492static ssize_t amdgpu_get_pp_force_state(struct device *dev,
493 struct device_attribute *attr,
494 char *buf)
495{
496 struct drm_device *ddev = dev_get_drvdata(dev);
1348969a 497 struct amdgpu_device *adev = drm_to_adev(ddev);
f3898ea1 498
53b3f8f4 499 if (amdgpu_in_reset(adev))
48b270bb 500 return -EPERM;
d2ae842d
AD
501 if (adev->in_suspend && !adev->in_runpm)
502 return -EPERM;
48b270bb 503
cd4d7464
RZ
504 if (adev->pp_force_state_enabled)
505 return amdgpu_get_pp_cur_state(dev, attr, buf);
506 else
a9ca9bb3 507 return sysfs_emit(buf, "\n");
f3898ea1
EH
508}
509
510static ssize_t amdgpu_set_pp_force_state(struct device *dev,
511 struct device_attribute *attr,
512 const char *buf,
513 size_t count)
514{
515 struct drm_device *ddev = dev_get_drvdata(dev);
1348969a 516 struct amdgpu_device *adev = drm_to_adev(ddev);
f3898ea1 517 enum amd_pm_state_type state = 0;
041bf022 518 unsigned long idx;
f3898ea1
EH
519 int ret;
520
53b3f8f4 521 if (amdgpu_in_reset(adev))
48b270bb 522 return -EPERM;
d2ae842d
AD
523 if (adev->in_suspend && !adev->in_runpm)
524 return -EPERM;
48b270bb 525
f3898ea1
EH
526 if (strlen(buf) == 1)
527 adev->pp_force_state_enabled = false;
0b53f9ad
KW
528 else if (is_support_sw_smu(adev))
529 adev->pp_force_state_enabled = false;
6d07fe7b
RZ
530 else if (adev->powerplay.pp_funcs->dispatch_tasks &&
531 adev->powerplay.pp_funcs->get_pp_num_states) {
041bf022 532 struct pp_states_info data;
f3898ea1 533
041bf022 534 ret = kstrtoul(buf, 0, &idx);
b9a9294b
AD
535 if (ret || idx >= ARRAY_SIZE(data.states))
536 return -EINVAL;
537
ddf74e79 538 idx = array_index_nospec(idx, ARRAY_SIZE(data.states));
f3898ea1 539
041bf022
DC
540 amdgpu_dpm_get_pp_num_states(adev, &data);
541 state = data.states[idx];
b9a9294b
AD
542
543 ret = pm_runtime_get_sync(ddev->dev);
66429300
AD
544 if (ret < 0) {
545 pm_runtime_put_autosuspend(ddev->dev);
b9a9294b 546 return ret;
66429300 547 }
b9a9294b 548
041bf022
DC
549 /* only set user selected power states */
550 if (state != POWER_STATE_TYPE_INTERNAL_BOOT &&
551 state != POWER_STATE_TYPE_DEFAULT) {
552 amdgpu_dpm_dispatch_task(adev,
39199b80 553 AMD_PP_TASK_ENABLE_USER_STATE, &state);
041bf022 554 adev->pp_force_state_enabled = true;
f3898ea1 555 }
b9a9294b
AD
556 pm_runtime_mark_last_busy(ddev->dev);
557 pm_runtime_put_autosuspend(ddev->dev);
f3898ea1 558 }
b9a9294b 559
f3898ea1
EH
560 return count;
561}
562
d54bb40f
AD
563/**
564 * DOC: pp_table
565 *
566 * The amdgpu driver provides a sysfs API for uploading new powerplay
567 * tables. The file pp_table is used for this. Reading the file
568 * will dump the current power play table. Writing to the file
569 * will attempt to upload a new powerplay table and re-initialize
570 * powerplay using that new table.
571 *
572 */
573
f3898ea1
EH
574static ssize_t amdgpu_get_pp_table(struct device *dev,
575 struct device_attribute *attr,
576 char *buf)
577{
578 struct drm_device *ddev = dev_get_drvdata(dev);
1348969a 579 struct amdgpu_device *adev = drm_to_adev(ddev);
f3898ea1 580 char *table = NULL;
b9a9294b 581 int size, ret;
f3898ea1 582
53b3f8f4 583 if (amdgpu_in_reset(adev))
48b270bb 584 return -EPERM;
d2ae842d
AD
585 if (adev->in_suspend && !adev->in_runpm)
586 return -EPERM;
48b270bb 587
b9a9294b 588 ret = pm_runtime_get_sync(ddev->dev);
66429300
AD
589 if (ret < 0) {
590 pm_runtime_put_autosuspend(ddev->dev);
b9a9294b 591 return ret;
66429300 592 }
b9a9294b 593
8dfc8c53 594 if (adev->powerplay.pp_funcs->get_pp_table) {
f3898ea1 595 size = amdgpu_dpm_get_pp_table(adev, &table);
b9a9294b
AD
596 pm_runtime_mark_last_busy(ddev->dev);
597 pm_runtime_put_autosuspend(ddev->dev);
598 if (size < 0)
599 return size;
600 } else {
601 pm_runtime_mark_last_busy(ddev->dev);
602 pm_runtime_put_autosuspend(ddev->dev);
f3898ea1 603 return 0;
b9a9294b 604 }
f3898ea1
EH
605
606 if (size >= PAGE_SIZE)
607 size = PAGE_SIZE - 1;
608
1684d3ba 609 memcpy(buf, table, size);
f3898ea1
EH
610
611 return size;
612}
613
614static ssize_t amdgpu_set_pp_table(struct device *dev,
615 struct device_attribute *attr,
616 const char *buf,
617 size_t count)
618{
619 struct drm_device *ddev = dev_get_drvdata(dev);
1348969a 620 struct amdgpu_device *adev = drm_to_adev(ddev);
289921b0 621 int ret = 0;
f3898ea1 622
53b3f8f4 623 if (amdgpu_in_reset(adev))
48b270bb 624 return -EPERM;
d2ae842d
AD
625 if (adev->in_suspend && !adev->in_runpm)
626 return -EPERM;
48b270bb 627
b9a9294b 628 ret = pm_runtime_get_sync(ddev->dev);
66429300
AD
629 if (ret < 0) {
630 pm_runtime_put_autosuspend(ddev->dev);
b9a9294b 631 return ret;
66429300 632 }
b9a9294b 633
8f4828d0
DP
634 ret = amdgpu_dpm_set_pp_table(adev, buf, count);
635 if (ret) {
636 pm_runtime_mark_last_busy(ddev->dev);
637 pm_runtime_put_autosuspend(ddev->dev);
638 return ret;
639 }
f3898ea1 640
b9a9294b
AD
641 pm_runtime_mark_last_busy(ddev->dev);
642 pm_runtime_put_autosuspend(ddev->dev);
643
f3898ea1
EH
644 return count;
645}
646
4e418c34
AD
647/**
648 * DOC: pp_od_clk_voltage
649 *
650 * The amdgpu driver provides a sysfs API for adjusting the clocks and voltages
651 * in each power level within a power state. The pp_od_clk_voltage is used for
652 * this.
653 *
ccda42a4
AD
654 * Note that the actual memory controller clock rate are exposed, not
655 * the effective memory clock of the DRAMs. To translate it, use the
656 * following formula:
657 *
658 * Clock conversion (Mhz):
659 *
660 * HBM: effective_memory_clock = memory_controller_clock * 1
661 *
662 * G5: effective_memory_clock = memory_controller_clock * 1
663 *
664 * G6: effective_memory_clock = memory_controller_clock * 2
665 *
666 * DRAM data rate (MT/s):
667 *
668 * HBM: effective_memory_clock * 2 = data_rate
669 *
670 * G5: effective_memory_clock * 4 = data_rate
671 *
672 * G6: effective_memory_clock * 8 = data_rate
673 *
674 * Bandwidth (MB/s):
675 *
676 * data_rate * vram_bit_width / 8 = memory_bandwidth
677 *
678 * Some examples:
679 *
680 * G5 on RX460:
681 *
682 * memory_controller_clock = 1750 Mhz
683 *
684 * effective_memory_clock = 1750 Mhz * 1 = 1750 Mhz
685 *
686 * data rate = 1750 * 4 = 7000 MT/s
687 *
688 * memory_bandwidth = 7000 * 128 bits / 8 = 112000 MB/s
689 *
690 * G6 on RX5700:
691 *
692 * memory_controller_clock = 875 Mhz
693 *
694 * effective_memory_clock = 875 Mhz * 2 = 1750 Mhz
695 *
696 * data rate = 1750 * 8 = 14000 MT/s
697 *
698 * memory_bandwidth = 14000 * 256 bits / 8 = 448000 MB/s
699 *
d5bf2653
EQ
700 * < For Vega10 and previous ASICs >
701 *
4e418c34 702 * Reading the file will display:
dc85db25 703 *
4e418c34 704 * - a list of engine clock levels and voltages labeled OD_SCLK
dc85db25 705 *
4e418c34 706 * - a list of memory clock levels and voltages labeled OD_MCLK
dc85db25 707 *
4e418c34
AD
708 * - a list of valid ranges for sclk, mclk, and voltage labeled OD_RANGE
709 *
710 * To manually adjust these settings, first select manual using
711 * power_dpm_force_performance_level. Enter a new value for each
712 * level by writing a string that contains "s/m level clock voltage" to
713 * the file. E.g., "s 1 500 820" will update sclk level 1 to be 500 MHz
714 * at 820 mV; "m 0 350 810" will update mclk level 0 to be 350 MHz at
715 * 810 mV. When you have edited all of the states as needed, write
716 * "c" (commit) to the file to commit your changes. If you want to reset to the
717 * default power levels, write "r" (reset) to the file to reset them.
718 *
d5bf2653 719 *
bd09331a 720 * < For Vega20 and newer ASICs >
d5bf2653
EQ
721 *
722 * Reading the file will display:
723 *
724 * - minimum and maximum engine clock labeled OD_SCLK
725 *
37a58f69
EQ
726 * - minimum(not available for Vega20 and Navi1x) and maximum memory
727 * clock labeled OD_MCLK
d5bf2653 728 *
b1f82cb2 729 * - three <frequency, voltage> points labeled OD_VDDC_CURVE.
d5bf2653
EQ
730 * They can be used to calibrate the sclk voltage curve.
731 *
a2b6df4f
EQ
732 * - voltage offset(in mV) applied on target voltage calculation.
733 * This is available for Sienna Cichlid, Navy Flounder and Dimgrey
734 * Cavefish. For these ASICs, the target voltage calculation can be
735 * illustrated by "voltage = voltage calculated from v/f curve +
736 * overdrive vddgfx offset"
737 *
d5bf2653
EQ
738 * - a list of valid ranges for sclk, mclk, and voltage curve points
739 * labeled OD_RANGE
740 *
0487bbb4
AD
741 * < For APUs >
742 *
743 * Reading the file will display:
744 *
745 * - minimum and maximum engine clock labeled OD_SCLK
746 *
747 * - a list of valid ranges for sclk labeled OD_RANGE
748 *
3dc8077f
AD
749 * < For VanGogh >
750 *
751 * Reading the file will display:
752 *
753 * - minimum and maximum engine clock labeled OD_SCLK
754 * - minimum and maximum core clocks labeled OD_CCLK
755 *
756 * - a list of valid ranges for sclk and cclk labeled OD_RANGE
757 *
d5bf2653
EQ
758 * To manually adjust these settings:
759 *
760 * - First select manual using power_dpm_force_performance_level
761 *
762 * - For clock frequency setting, enter a new value by writing a
763 * string that contains "s/m index clock" to the file. The index
764 * should be 0 if to set minimum clock. And 1 if to set maximum
765 * clock. E.g., "s 0 500" will update minimum sclk to be 500 MHz.
3dc8077f
AD
766 * "m 1 800" will update maximum mclk to be 800Mhz. For core
767 * clocks on VanGogh, the string contains "p core index clock".
768 * E.g., "p 2 0 800" would set the minimum core clock on core
769 * 2 to 800Mhz.
d5bf2653
EQ
770 *
771 * For sclk voltage curve, enter the new values by writing a
b1f82cb2
EQ
772 * string that contains "vc point clock voltage" to the file. The
773 * points are indexed by 0, 1 and 2. E.g., "vc 0 300 600" will
774 * update point1 with clock set as 300Mhz and voltage as
775 * 600mV. "vc 2 1000 1000" will update point3 with clock set
776 * as 1000Mhz and voltage 1000mV.
d5bf2653 777 *
a2b6df4f
EQ
778 * To update the voltage offset applied for gfxclk/voltage calculation,
779 * enter the new value by writing a string that contains "vo offset".
780 * This is supported by Sienna Cichlid, Navy Flounder and Dimgrey Cavefish.
781 * And the offset can be a positive or negative value.
782 *
d5bf2653
EQ
783 * - When you have edited all of the states as needed, write "c" (commit)
784 * to the file to commit your changes
785 *
786 * - If you want to reset to the default power levels, write "r" (reset)
787 * to the file to reset them
788 *
4e418c34
AD
789 */
790
e3933f26
RZ
791static ssize_t amdgpu_set_pp_od_clk_voltage(struct device *dev,
792 struct device_attribute *attr,
793 const char *buf,
794 size_t count)
795{
796 struct drm_device *ddev = dev_get_drvdata(dev);
1348969a 797 struct amdgpu_device *adev = drm_to_adev(ddev);
e3933f26
RZ
798 int ret;
799 uint32_t parameter_size = 0;
800 long parameter[64];
801 char buf_cpy[128];
802 char *tmp_str;
803 char *sub_str;
804 const char delimiter[3] = {' ', '\n', '\0'};
805 uint32_t type;
806
53b3f8f4 807 if (amdgpu_in_reset(adev))
48b270bb 808 return -EPERM;
d2ae842d
AD
809 if (adev->in_suspend && !adev->in_runpm)
810 return -EPERM;
48b270bb 811
e3933f26
RZ
812 if (count > 127)
813 return -EINVAL;
814
815 if (*buf == 's')
816 type = PP_OD_EDIT_SCLK_VDDC_TABLE;
0d90d0dd
HR
817 else if (*buf == 'p')
818 type = PP_OD_EDIT_CCLK_VDDC_TABLE;
e3933f26
RZ
819 else if (*buf == 'm')
820 type = PP_OD_EDIT_MCLK_VDDC_TABLE;
821 else if(*buf == 'r')
822 type = PP_OD_RESTORE_DEFAULT_TABLE;
823 else if (*buf == 'c')
824 type = PP_OD_COMMIT_DPM_TABLE;
d5bf2653
EQ
825 else if (!strncmp(buf, "vc", 2))
826 type = PP_OD_EDIT_VDDC_CURVE;
a2b6df4f
EQ
827 else if (!strncmp(buf, "vo", 2))
828 type = PP_OD_EDIT_VDDGFX_OFFSET;
e3933f26
RZ
829 else
830 return -EINVAL;
831
832 memcpy(buf_cpy, buf, count+1);
833
834 tmp_str = buf_cpy;
835
a2b6df4f
EQ
836 if ((type == PP_OD_EDIT_VDDC_CURVE) ||
837 (type == PP_OD_EDIT_VDDGFX_OFFSET))
d5bf2653 838 tmp_str++;
e3933f26
RZ
839 while (isspace(*++tmp_str));
840
ce7c1d04 841 while ((sub_str = strsep(&tmp_str, delimiter)) != NULL) {
aec1d870
MC
842 if (strlen(sub_str) == 0)
843 continue;
e3933f26
RZ
844 ret = kstrtol(sub_str, 0, &parameter[parameter_size]);
845 if (ret)
846 return -EINVAL;
847 parameter_size++;
848
849 while (isspace(*tmp_str))
850 tmp_str++;
851 }
852
b9a9294b 853 ret = pm_runtime_get_sync(ddev->dev);
66429300
AD
854 if (ret < 0) {
855 pm_runtime_put_autosuspend(ddev->dev);
b9a9294b 856 return ret;
66429300 857 }
b9a9294b 858
8f4828d0
DP
859 if (adev->powerplay.pp_funcs->set_fine_grain_clk_vol) {
860 ret = amdgpu_dpm_set_fine_grain_clk_vol(adev, type,
861 parameter,
862 parameter_size);
b9a9294b
AD
863 if (ret) {
864 pm_runtime_mark_last_busy(ddev->dev);
865 pm_runtime_put_autosuspend(ddev->dev);
f1403342 866 return -EINVAL;
b9a9294b 867 }
8f4828d0 868 }
12a6727d 869
8f4828d0
DP
870 if (adev->powerplay.pp_funcs->odn_edit_dpm_table) {
871 ret = amdgpu_dpm_odn_edit_dpm_table(adev, type,
872 parameter, parameter_size);
873 if (ret) {
874 pm_runtime_mark_last_busy(ddev->dev);
875 pm_runtime_put_autosuspend(ddev->dev);
876 return -EINVAL;
616ae02f 877 }
8f4828d0 878 }
e388cc47 879
8f4828d0
DP
880 if (type == PP_OD_COMMIT_DPM_TABLE) {
881 if (adev->powerplay.pp_funcs->dispatch_tasks) {
882 amdgpu_dpm_dispatch_task(adev,
883 AMD_PP_TASK_READJUST_POWER_STATE,
884 NULL);
885 pm_runtime_mark_last_busy(ddev->dev);
886 pm_runtime_put_autosuspend(ddev->dev);
887 return count;
888 } else {
889 pm_runtime_mark_last_busy(ddev->dev);
890 pm_runtime_put_autosuspend(ddev->dev);
891 return -EINVAL;
e3933f26
RZ
892 }
893 }
8f4828d0 894
b9a9294b
AD
895 pm_runtime_mark_last_busy(ddev->dev);
896 pm_runtime_put_autosuspend(ddev->dev);
e3933f26 897
f1403342 898 return count;
e3933f26
RZ
899}
900
901static ssize_t amdgpu_get_pp_od_clk_voltage(struct device *dev,
902 struct device_attribute *attr,
903 char *buf)
904{
905 struct drm_device *ddev = dev_get_drvdata(dev);
1348969a 906 struct amdgpu_device *adev = drm_to_adev(ddev);
b9a9294b
AD
907 ssize_t size;
908 int ret;
e3933f26 909
53b3f8f4 910 if (amdgpu_in_reset(adev))
48b270bb 911 return -EPERM;
d2ae842d
AD
912 if (adev->in_suspend && !adev->in_runpm)
913 return -EPERM;
48b270bb 914
b9a9294b 915 ret = pm_runtime_get_sync(ddev->dev);
66429300
AD
916 if (ret < 0) {
917 pm_runtime_put_autosuspend(ddev->dev);
b9a9294b 918 return ret;
66429300 919 }
b9a9294b 920
8f4828d0 921 if (adev->powerplay.pp_funcs->print_clock_levels) {
e3933f26
RZ
922 size = amdgpu_dpm_print_clock_levels(adev, OD_SCLK, buf);
923 size += amdgpu_dpm_print_clock_levels(adev, OD_MCLK, buf+size);
d5bf2653 924 size += amdgpu_dpm_print_clock_levels(adev, OD_VDDC_CURVE, buf+size);
8f4828d0 925 size += amdgpu_dpm_print_clock_levels(adev, OD_VDDGFX_OFFSET, buf+size);
a3c991f9 926 size += amdgpu_dpm_print_clock_levels(adev, OD_RANGE, buf+size);
8f4828d0 927 size += amdgpu_dpm_print_clock_levels(adev, OD_CCLK, buf+size);
e3933f26 928 } else {
09b6744c 929 size = sysfs_emit(buf, "\n");
e3933f26 930 }
b9a9294b
AD
931 pm_runtime_mark_last_busy(ddev->dev);
932 pm_runtime_put_autosuspend(ddev->dev);
e3933f26 933
b9a9294b 934 return size;
e3933f26
RZ
935}
936
7ca881a8 937/**
98eb03bb 938 * DOC: pp_features
7ca881a8
EQ
939 *
940 * The amdgpu driver provides a sysfs API for adjusting what powerplay
98eb03bb 941 * features to be enabled. The file pp_features is used for this. And
7ca881a8
EQ
942 * this is only available for Vega10 and later dGPUs.
943 *
944 * Reading back the file will show you the followings:
945 * - Current ppfeature masks
946 * - List of the all supported powerplay features with their naming,
947 * bitmasks and enablement status('Y'/'N' means "enabled"/"disabled").
948 *
949 * To manually enable or disable a specific feature, just set or clear
950 * the corresponding bit from original ppfeature masks and input the
951 * new ppfeature masks.
952 */
4e01847c
KW
953static ssize_t amdgpu_set_pp_features(struct device *dev,
954 struct device_attribute *attr,
955 const char *buf,
956 size_t count)
7ca881a8
EQ
957{
958 struct drm_device *ddev = dev_get_drvdata(dev);
1348969a 959 struct amdgpu_device *adev = drm_to_adev(ddev);
7ca881a8
EQ
960 uint64_t featuremask;
961 int ret;
962
53b3f8f4 963 if (amdgpu_in_reset(adev))
48b270bb 964 return -EPERM;
d2ae842d
AD
965 if (adev->in_suspend && !adev->in_runpm)
966 return -EPERM;
48b270bb 967
7ca881a8
EQ
968 ret = kstrtou64(buf, 0, &featuremask);
969 if (ret)
970 return -EINVAL;
971
b9a9294b 972 ret = pm_runtime_get_sync(ddev->dev);
66429300
AD
973 if (ret < 0) {
974 pm_runtime_put_autosuspend(ddev->dev);
b9a9294b 975 return ret;
66429300 976 }
b9a9294b 977
c6ce68e6 978 if (adev->powerplay.pp_funcs->set_ppfeature_status) {
7ca881a8 979 ret = amdgpu_dpm_set_ppfeature_status(adev, featuremask);
27414cd4
AD
980 if (ret) {
981 pm_runtime_mark_last_busy(ddev->dev);
982 pm_runtime_put_autosuspend(ddev->dev);
983 return -EINVAL;
984 }
7ca881a8 985 }
b9a9294b
AD
986 pm_runtime_mark_last_busy(ddev->dev);
987 pm_runtime_put_autosuspend(ddev->dev);
7ca881a8
EQ
988
989 return count;
990}
991
4e01847c
KW
992static ssize_t amdgpu_get_pp_features(struct device *dev,
993 struct device_attribute *attr,
994 char *buf)
7ca881a8
EQ
995{
996 struct drm_device *ddev = dev_get_drvdata(dev);
1348969a 997 struct amdgpu_device *adev = drm_to_adev(ddev);
b9a9294b
AD
998 ssize_t size;
999 int ret;
7ca881a8 1000
53b3f8f4 1001 if (amdgpu_in_reset(adev))
48b270bb 1002 return -EPERM;
d2ae842d
AD
1003 if (adev->in_suspend && !adev->in_runpm)
1004 return -EPERM;
48b270bb 1005
b9a9294b 1006 ret = pm_runtime_get_sync(ddev->dev);
66429300
AD
1007 if (ret < 0) {
1008 pm_runtime_put_autosuspend(ddev->dev);
b9a9294b 1009 return ret;
66429300 1010 }
b9a9294b 1011
8dfc8c53 1012 if (adev->powerplay.pp_funcs->get_ppfeature_status)
b9a9294b
AD
1013 size = amdgpu_dpm_get_ppfeature_status(adev, buf);
1014 else
09b6744c 1015 size = sysfs_emit(buf, "\n");
b9a9294b
AD
1016
1017 pm_runtime_mark_last_busy(ddev->dev);
1018 pm_runtime_put_autosuspend(ddev->dev);
7ca881a8 1019
b9a9294b 1020 return size;
7ca881a8
EQ
1021}
1022
271dc908 1023/**
a667b75c 1024 * DOC: pp_dpm_sclk pp_dpm_mclk pp_dpm_socclk pp_dpm_fclk pp_dpm_dcefclk pp_dpm_pcie
271dc908
AD
1025 *
1026 * The amdgpu driver provides a sysfs API for adjusting what power levels
1027 * are enabled for a given power state. The files pp_dpm_sclk, pp_dpm_mclk,
d7e28e2d
EQ
1028 * pp_dpm_socclk, pp_dpm_fclk, pp_dpm_dcefclk and pp_dpm_pcie are used for
1029 * this.
d7337ca2 1030 *
d7e28e2d
EQ
1031 * pp_dpm_socclk and pp_dpm_dcefclk interfaces are only available for
1032 * Vega10 and later ASICs.
828e37ef 1033 * pp_dpm_fclk interface is only available for Vega20 and later ASICs.
271dc908
AD
1034 *
1035 * Reading back the files will show you the available power levels within
1036 * the power state and the clock information for those levels.
1037 *
1038 * To manually adjust these states, first select manual using
48edde39 1039 * power_dpm_force_performance_level.
a667b75c 1040 * Secondly, enter a new value for each level by inputing a string that
48edde39 1041 * contains " echo xx xx xx > pp_dpm_sclk/mclk/pcie"
a667b75c
AD
1042 * E.g.,
1043 *
1044 * .. code-block:: bash
1045 *
1046 * echo "4 5 6" > pp_dpm_sclk
1047 *
1048 * will enable sclk levels 4, 5, and 6.
d7e28e2d
EQ
1049 *
1050 * NOTE: change to the dcefclk max dpm level is not supported now
271dc908
AD
1051 */
1052
2ea092e5
DP
1053static ssize_t amdgpu_get_pp_dpm_clock(struct device *dev,
1054 enum pp_clock_type type,
f3898ea1
EH
1055 char *buf)
1056{
1057 struct drm_device *ddev = dev_get_drvdata(dev);
1348969a 1058 struct amdgpu_device *adev = drm_to_adev(ddev);
b9a9294b
AD
1059 ssize_t size;
1060 int ret;
f3898ea1 1061
53b3f8f4 1062 if (amdgpu_in_reset(adev))
48b270bb 1063 return -EPERM;
d2ae842d
AD
1064 if (adev->in_suspend && !adev->in_runpm)
1065 return -EPERM;
48b270bb 1066
b9a9294b 1067 ret = pm_runtime_get_sync(ddev->dev);
66429300
AD
1068 if (ret < 0) {
1069 pm_runtime_put_autosuspend(ddev->dev);
b9a9294b 1070 return ret;
66429300 1071 }
b9a9294b 1072
2ea092e5
DP
1073 if (adev->powerplay.pp_funcs->print_clock_levels)
1074 size = amdgpu_dpm_print_clock_levels(adev, type, buf);
cd4d7464 1075 else
09b6744c 1076 size = sysfs_emit(buf, "\n");
b9a9294b
AD
1077
1078 pm_runtime_mark_last_busy(ddev->dev);
1079 pm_runtime_put_autosuspend(ddev->dev);
1080
1081 return size;
f3898ea1
EH
1082}
1083
4b4bd048
KC
1084/*
1085 * Worst case: 32 bits individually specified, in octal at 12 characters
1086 * per line (+1 for \n).
1087 */
1088#define AMDGPU_MASK_BUF_MAX (32 * 13)
1089
1090static ssize_t amdgpu_read_mask(const char *buf, size_t count, uint32_t *mask)
f3898ea1 1091{
f3898ea1 1092 int ret;
c915ef89 1093 unsigned long level;
48edde39 1094 char *sub_str = NULL;
1095 char *tmp;
4b4bd048 1096 char buf_cpy[AMDGPU_MASK_BUF_MAX + 1];
48edde39 1097 const char delimiter[3] = {' ', '\n', '\0'};
4b4bd048 1098 size_t bytes;
f3898ea1 1099
4b4bd048
KC
1100 *mask = 0;
1101
1102 bytes = min(count, sizeof(buf_cpy) - 1);
1103 memcpy(buf_cpy, buf, bytes);
1104 buf_cpy[bytes] = '\0';
48edde39 1105 tmp = buf_cpy;
ce7c1d04 1106 while ((sub_str = strsep(&tmp, delimiter)) != NULL) {
48edde39 1107 if (strlen(sub_str)) {
c915ef89
DC
1108 ret = kstrtoul(sub_str, 0, &level);
1109 if (ret || level > 31)
4b4bd048
KC
1110 return -EINVAL;
1111 *mask |= 1 << level;
48edde39 1112 } else
1113 break;
f3898ea1 1114 }
4b4bd048
KC
1115
1116 return 0;
1117}
1118
2ea092e5
DP
1119static ssize_t amdgpu_set_pp_dpm_clock(struct device *dev,
1120 enum pp_clock_type type,
4b4bd048
KC
1121 const char *buf,
1122 size_t count)
1123{
1124 struct drm_device *ddev = dev_get_drvdata(dev);
1348969a 1125 struct amdgpu_device *adev = drm_to_adev(ddev);
4b4bd048
KC
1126 int ret;
1127 uint32_t mask = 0;
1128
53b3f8f4 1129 if (amdgpu_in_reset(adev))
48b270bb 1130 return -EPERM;
d2ae842d
AD
1131 if (adev->in_suspend && !adev->in_runpm)
1132 return -EPERM;
48b270bb 1133
4b4bd048
KC
1134 ret = amdgpu_read_mask(buf, count, &mask);
1135 if (ret)
1136 return ret;
1137
b9a9294b 1138 ret = pm_runtime_get_sync(ddev->dev);
66429300
AD
1139 if (ret < 0) {
1140 pm_runtime_put_autosuspend(ddev->dev);
b9a9294b 1141 return ret;
66429300 1142 }
b9a9294b 1143
2ea092e5
DP
1144 if (adev->powerplay.pp_funcs->force_clock_level)
1145 ret = amdgpu_dpm_force_clock_level(adev, type, mask);
1146 else
1147 ret = 0;
241dbbb1 1148
b9a9294b
AD
1149 pm_runtime_mark_last_busy(ddev->dev);
1150 pm_runtime_put_autosuspend(ddev->dev);
1151
241dbbb1
EQ
1152 if (ret)
1153 return -EINVAL;
cd4d7464 1154
f3898ea1
EH
1155 return count;
1156}
1157
2ea092e5 1158static ssize_t amdgpu_get_pp_dpm_sclk(struct device *dev,
f3898ea1
EH
1159 struct device_attribute *attr,
1160 char *buf)
1161{
2ea092e5
DP
1162 return amdgpu_get_pp_dpm_clock(dev, PP_SCLK, buf);
1163}
b9a9294b 1164
2ea092e5
DP
1165static ssize_t amdgpu_set_pp_dpm_sclk(struct device *dev,
1166 struct device_attribute *attr,
1167 const char *buf,
1168 size_t count)
1169{
1170 return amdgpu_set_pp_dpm_clock(dev, PP_SCLK, buf, count);
1171}
b9a9294b 1172
2ea092e5
DP
1173static ssize_t amdgpu_get_pp_dpm_mclk(struct device *dev,
1174 struct device_attribute *attr,
1175 char *buf)
1176{
1177 return amdgpu_get_pp_dpm_clock(dev, PP_MCLK, buf);
f3898ea1
EH
1178}
1179
1180static ssize_t amdgpu_set_pp_dpm_mclk(struct device *dev,
1181 struct device_attribute *attr,
1182 const char *buf,
1183 size_t count)
1184{
2ea092e5 1185 return amdgpu_set_pp_dpm_clock(dev, PP_MCLK, buf, count);
f3898ea1
EH
1186}
1187
d7337ca2
EQ
1188static ssize_t amdgpu_get_pp_dpm_socclk(struct device *dev,
1189 struct device_attribute *attr,
1190 char *buf)
1191{
2ea092e5 1192 return amdgpu_get_pp_dpm_clock(dev, PP_SOCCLK, buf);
d7337ca2
EQ
1193}
1194
1195static ssize_t amdgpu_set_pp_dpm_socclk(struct device *dev,
1196 struct device_attribute *attr,
1197 const char *buf,
1198 size_t count)
1199{
2ea092e5 1200 return amdgpu_set_pp_dpm_clock(dev, PP_SOCCLK, buf, count);
d7337ca2
EQ
1201}
1202
828e37ef
EQ
1203static ssize_t amdgpu_get_pp_dpm_fclk(struct device *dev,
1204 struct device_attribute *attr,
1205 char *buf)
1206{
2ea092e5 1207 return amdgpu_get_pp_dpm_clock(dev, PP_FCLK, buf);
828e37ef
EQ
1208}
1209
1210static ssize_t amdgpu_set_pp_dpm_fclk(struct device *dev,
1211 struct device_attribute *attr,
1212 const char *buf,
1213 size_t count)
1214{
2ea092e5 1215 return amdgpu_set_pp_dpm_clock(dev, PP_FCLK, buf, count);
828e37ef
EQ
1216}
1217
9577b0ec
XD
1218static ssize_t amdgpu_get_pp_dpm_vclk(struct device *dev,
1219 struct device_attribute *attr,
1220 char *buf)
1221{
2ea092e5 1222 return amdgpu_get_pp_dpm_clock(dev, PP_VCLK, buf);
9577b0ec
XD
1223}
1224
1225static ssize_t amdgpu_set_pp_dpm_vclk(struct device *dev,
1226 struct device_attribute *attr,
1227 const char *buf,
1228 size_t count)
1229{
2ea092e5 1230 return amdgpu_set_pp_dpm_clock(dev, PP_VCLK, buf, count);
9577b0ec
XD
1231}
1232
1233static ssize_t amdgpu_get_pp_dpm_dclk(struct device *dev,
1234 struct device_attribute *attr,
1235 char *buf)
1236{
2ea092e5 1237 return amdgpu_get_pp_dpm_clock(dev, PP_DCLK, buf);
9577b0ec
XD
1238}
1239
1240static ssize_t amdgpu_set_pp_dpm_dclk(struct device *dev,
1241 struct device_attribute *attr,
1242 const char *buf,
1243 size_t count)
1244{
2ea092e5 1245 return amdgpu_set_pp_dpm_clock(dev, PP_DCLK, buf, count);
9577b0ec
XD
1246}
1247
d7e28e2d
EQ
1248static ssize_t amdgpu_get_pp_dpm_dcefclk(struct device *dev,
1249 struct device_attribute *attr,
1250 char *buf)
1251{
2ea092e5 1252 return amdgpu_get_pp_dpm_clock(dev, PP_DCEFCLK, buf);
d7e28e2d
EQ
1253}
1254
1255static ssize_t amdgpu_set_pp_dpm_dcefclk(struct device *dev,
1256 struct device_attribute *attr,
1257 const char *buf,
1258 size_t count)
1259{
2ea092e5 1260 return amdgpu_set_pp_dpm_clock(dev, PP_DCEFCLK, buf, count);
d7e28e2d
EQ
1261}
1262
f3898ea1
EH
1263static ssize_t amdgpu_get_pp_dpm_pcie(struct device *dev,
1264 struct device_attribute *attr,
1265 char *buf)
1266{
2ea092e5 1267 return amdgpu_get_pp_dpm_clock(dev, PP_PCIE, buf);
f3898ea1
EH
1268}
1269
1270static ssize_t amdgpu_set_pp_dpm_pcie(struct device *dev,
1271 struct device_attribute *attr,
1272 const char *buf,
1273 size_t count)
1274{
2ea092e5 1275 return amdgpu_set_pp_dpm_clock(dev, PP_PCIE, buf, count);
f3898ea1
EH
1276}
1277
428bafa8
EH
1278static ssize_t amdgpu_get_pp_sclk_od(struct device *dev,
1279 struct device_attribute *attr,
1280 char *buf)
1281{
1282 struct drm_device *ddev = dev_get_drvdata(dev);
1348969a 1283 struct amdgpu_device *adev = drm_to_adev(ddev);
428bafa8 1284 uint32_t value = 0;
b9a9294b 1285 int ret;
428bafa8 1286
53b3f8f4 1287 if (amdgpu_in_reset(adev))
48b270bb 1288 return -EPERM;
d2ae842d
AD
1289 if (adev->in_suspend && !adev->in_runpm)
1290 return -EPERM;
48b270bb 1291
b9a9294b 1292 ret = pm_runtime_get_sync(ddev->dev);
66429300
AD
1293 if (ret < 0) {
1294 pm_runtime_put_autosuspend(ddev->dev);
b9a9294b 1295 return ret;
66429300 1296 }
b9a9294b 1297
6d7c8302 1298 if (is_support_sw_smu(adev))
75145aab 1299 value = 0;
6d7c8302 1300 else if (adev->powerplay.pp_funcs->get_sclk_od)
428bafa8
EH
1301 value = amdgpu_dpm_get_sclk_od(adev);
1302
b9a9294b
AD
1303 pm_runtime_mark_last_busy(ddev->dev);
1304 pm_runtime_put_autosuspend(ddev->dev);
1305
a9ca9bb3 1306 return sysfs_emit(buf, "%d\n", value);
428bafa8
EH
1307}
1308
1309static ssize_t amdgpu_set_pp_sclk_od(struct device *dev,
1310 struct device_attribute *attr,
1311 const char *buf,
1312 size_t count)
1313{
1314 struct drm_device *ddev = dev_get_drvdata(dev);
1348969a 1315 struct amdgpu_device *adev = drm_to_adev(ddev);
428bafa8
EH
1316 int ret;
1317 long int value;
1318
53b3f8f4 1319 if (amdgpu_in_reset(adev))
48b270bb 1320 return -EPERM;
d2ae842d
AD
1321 if (adev->in_suspend && !adev->in_runpm)
1322 return -EPERM;
48b270bb 1323
428bafa8
EH
1324 ret = kstrtol(buf, 0, &value);
1325
b9a9294b
AD
1326 if (ret)
1327 return -EINVAL;
1328
1329 ret = pm_runtime_get_sync(ddev->dev);
66429300
AD
1330 if (ret < 0) {
1331 pm_runtime_put_autosuspend(ddev->dev);
b9a9294b 1332 return ret;
66429300 1333 }
428bafa8 1334
e9c5b46e 1335 if (is_support_sw_smu(adev)) {
75145aab 1336 value = 0;
cd4d7464 1337 } else {
e9c5b46e
LG
1338 if (adev->powerplay.pp_funcs->set_sclk_od)
1339 amdgpu_dpm_set_sclk_od(adev, (uint32_t)value);
1340
1341 if (adev->powerplay.pp_funcs->dispatch_tasks) {
1342 amdgpu_dpm_dispatch_task(adev, AMD_PP_TASK_READJUST_POWER_STATE, NULL);
1343 } else {
1344 adev->pm.dpm.current_ps = adev->pm.dpm.boot_ps;
1345 amdgpu_pm_compute_clocks(adev);
1346 }
8b2e574d 1347 }
428bafa8 1348
b9a9294b
AD
1349 pm_runtime_mark_last_busy(ddev->dev);
1350 pm_runtime_put_autosuspend(ddev->dev);
1351
428bafa8
EH
1352 return count;
1353}
1354
f2bdc05f
EH
1355static ssize_t amdgpu_get_pp_mclk_od(struct device *dev,
1356 struct device_attribute *attr,
1357 char *buf)
1358{
1359 struct drm_device *ddev = dev_get_drvdata(dev);
1348969a 1360 struct amdgpu_device *adev = drm_to_adev(ddev);
f2bdc05f 1361 uint32_t value = 0;
b9a9294b 1362 int ret;
f2bdc05f 1363
53b3f8f4 1364 if (amdgpu_in_reset(adev))
48b270bb 1365 return -EPERM;
d2ae842d
AD
1366 if (adev->in_suspend && !adev->in_runpm)
1367 return -EPERM;
48b270bb 1368
b9a9294b 1369 ret = pm_runtime_get_sync(ddev->dev);
66429300
AD
1370 if (ret < 0) {
1371 pm_runtime_put_autosuspend(ddev->dev);
b9a9294b 1372 return ret;
66429300 1373 }
b9a9294b 1374
6d7c8302 1375 if (is_support_sw_smu(adev))
75145aab 1376 value = 0;
6d7c8302 1377 else if (adev->powerplay.pp_funcs->get_mclk_od)
f2bdc05f 1378 value = amdgpu_dpm_get_mclk_od(adev);
f2bdc05f 1379
b9a9294b
AD
1380 pm_runtime_mark_last_busy(ddev->dev);
1381 pm_runtime_put_autosuspend(ddev->dev);
1382
a9ca9bb3 1383 return sysfs_emit(buf, "%d\n", value);
f2bdc05f
EH
1384}
1385
1386static ssize_t amdgpu_set_pp_mclk_od(struct device *dev,
1387 struct device_attribute *attr,
1388 const char *buf,
1389 size_t count)
1390{
1391 struct drm_device *ddev = dev_get_drvdata(dev);
1348969a 1392 struct amdgpu_device *adev = drm_to_adev(ddev);
f2bdc05f
EH
1393 int ret;
1394 long int value;
1395
53b3f8f4 1396 if (amdgpu_in_reset(adev))
48b270bb 1397 return -EPERM;
d2ae842d
AD
1398 if (adev->in_suspend && !adev->in_runpm)
1399 return -EPERM;
48b270bb 1400
f2bdc05f
EH
1401 ret = kstrtol(buf, 0, &value);
1402
b9a9294b
AD
1403 if (ret)
1404 return -EINVAL;
1405
1406 ret = pm_runtime_get_sync(ddev->dev);
66429300
AD
1407 if (ret < 0) {
1408 pm_runtime_put_autosuspend(ddev->dev);
b9a9294b 1409 return ret;
66429300 1410 }
f2bdc05f 1411
e9c5b46e 1412 if (is_support_sw_smu(adev)) {
75145aab 1413 value = 0;
cd4d7464 1414 } else {
e9c5b46e
LG
1415 if (adev->powerplay.pp_funcs->set_mclk_od)
1416 amdgpu_dpm_set_mclk_od(adev, (uint32_t)value);
1417
1418 if (adev->powerplay.pp_funcs->dispatch_tasks) {
1419 amdgpu_dpm_dispatch_task(adev, AMD_PP_TASK_READJUST_POWER_STATE, NULL);
1420 } else {
1421 adev->pm.dpm.current_ps = adev->pm.dpm.boot_ps;
1422 amdgpu_pm_compute_clocks(adev);
1423 }
f2bdc05f
EH
1424 }
1425
b9a9294b
AD
1426 pm_runtime_mark_last_busy(ddev->dev);
1427 pm_runtime_put_autosuspend(ddev->dev);
1428
f2bdc05f
EH
1429 return count;
1430}
1431
6b2576f5
AD
1432/**
1433 * DOC: pp_power_profile_mode
1434 *
1435 * The amdgpu driver provides a sysfs API for adjusting the heuristics
1436 * related to switching between power levels in a power state. The file
1437 * pp_power_profile_mode is used for this.
1438 *
1439 * Reading this file outputs a list of all of the predefined power profiles
1440 * and the relevant heuristics settings for that profile.
1441 *
1442 * To select a profile or create a custom profile, first select manual using
1443 * power_dpm_force_performance_level. Writing the number of a predefined
1444 * profile to pp_power_profile_mode will enable those heuristics. To
1445 * create a custom set of heuristics, write a string of numbers to the file
1446 * starting with the number of the custom profile along with a setting
1447 * for each heuristic parameter. Due to differences across asic families
1448 * the heuristic parameters vary from family to family.
1449 *
1450 */
1451
37c5c4db
RZ
1452static ssize_t amdgpu_get_pp_power_profile_mode(struct device *dev,
1453 struct device_attribute *attr,
1454 char *buf)
1455{
1456 struct drm_device *ddev = dev_get_drvdata(dev);
1348969a 1457 struct amdgpu_device *adev = drm_to_adev(ddev);
b9a9294b
AD
1458 ssize_t size;
1459 int ret;
37c5c4db 1460
53b3f8f4 1461 if (amdgpu_in_reset(adev))
48b270bb 1462 return -EPERM;
d2ae842d
AD
1463 if (adev->in_suspend && !adev->in_runpm)
1464 return -EPERM;
48b270bb 1465
b9a9294b 1466 ret = pm_runtime_get_sync(ddev->dev);
66429300
AD
1467 if (ret < 0) {
1468 pm_runtime_put_autosuspend(ddev->dev);
b9a9294b 1469 return ret;
66429300 1470 }
b9a9294b 1471
2ea092e5 1472 if (adev->powerplay.pp_funcs->get_power_profile_mode)
b9a9294b
AD
1473 size = amdgpu_dpm_get_power_profile_mode(adev, buf);
1474 else
09b6744c 1475 size = sysfs_emit(buf, "\n");
b9a9294b
AD
1476
1477 pm_runtime_mark_last_busy(ddev->dev);
1478 pm_runtime_put_autosuspend(ddev->dev);
37c5c4db 1479
b9a9294b 1480 return size;
37c5c4db
RZ
1481}
1482
1483
1484static ssize_t amdgpu_set_pp_power_profile_mode(struct device *dev,
1485 struct device_attribute *attr,
1486 const char *buf,
1487 size_t count)
1488{
7c8e0835 1489 int ret;
37c5c4db 1490 struct drm_device *ddev = dev_get_drvdata(dev);
1348969a 1491 struct amdgpu_device *adev = drm_to_adev(ddev);
37c5c4db
RZ
1492 uint32_t parameter_size = 0;
1493 long parameter[64];
1494 char *sub_str, buf_cpy[128];
1495 char *tmp_str;
1496 uint32_t i = 0;
1497 char tmp[2];
1498 long int profile_mode = 0;
1499 const char delimiter[3] = {' ', '\n', '\0'};
1500
53b3f8f4 1501 if (amdgpu_in_reset(adev))
48b270bb 1502 return -EPERM;
d2ae842d
AD
1503 if (adev->in_suspend && !adev->in_runpm)
1504 return -EPERM;
48b270bb 1505
37c5c4db
RZ
1506 tmp[0] = *(buf);
1507 tmp[1] = '\0';
1508 ret = kstrtol(tmp, 0, &profile_mode);
1509 if (ret)
b9a9294b 1510 return -EINVAL;
37c5c4db
RZ
1511
1512 if (profile_mode == PP_SMC_POWER_PROFILE_CUSTOM) {
1513 if (count < 2 || count > 127)
1514 return -EINVAL;
1515 while (isspace(*++buf))
1516 i++;
1517 memcpy(buf_cpy, buf, count-i);
1518 tmp_str = buf_cpy;
ce7c1d04 1519 while ((sub_str = strsep(&tmp_str, delimiter)) != NULL) {
c2efbc3f
EQ
1520 if (strlen(sub_str) == 0)
1521 continue;
37c5c4db 1522 ret = kstrtol(sub_str, 0, &parameter[parameter_size]);
b9a9294b
AD
1523 if (ret)
1524 return -EINVAL;
37c5c4db
RZ
1525 parameter_size++;
1526 while (isspace(*tmp_str))
1527 tmp_str++;
1528 }
1529 }
1530 parameter[parameter_size] = profile_mode;
b9a9294b
AD
1531
1532 ret = pm_runtime_get_sync(ddev->dev);
66429300
AD
1533 if (ret < 0) {
1534 pm_runtime_put_autosuspend(ddev->dev);
b9a9294b 1535 return ret;
66429300 1536 }
b9a9294b 1537
2ea092e5 1538 if (adev->powerplay.pp_funcs->set_power_profile_mode)
37c5c4db 1539 ret = amdgpu_dpm_set_power_profile_mode(adev, parameter, parameter_size);
b9a9294b
AD
1540
1541 pm_runtime_mark_last_busy(ddev->dev);
1542 pm_runtime_put_autosuspend(ddev->dev);
1543
37c5c4db
RZ
1544 if (!ret)
1545 return count;
b9a9294b 1546
37c5c4db
RZ
1547 return -EINVAL;
1548}
1549
b374d82d 1550/**
f503fe69 1551 * DOC: gpu_busy_percent
b374d82d
TSD
1552 *
1553 * The amdgpu driver provides a sysfs API for reading how busy the GPU
1554 * is as a percentage. The file gpu_busy_percent is used for this.
1555 * The SMU firmware computes a percentage of load based on the
1556 * aggregate activity level in the IP cores.
1557 */
4e01847c
KW
1558static ssize_t amdgpu_get_gpu_busy_percent(struct device *dev,
1559 struct device_attribute *attr,
1560 char *buf)
b374d82d
TSD
1561{
1562 struct drm_device *ddev = dev_get_drvdata(dev);
1348969a 1563 struct amdgpu_device *adev = drm_to_adev(ddev);
b374d82d
TSD
1564 int r, value, size = sizeof(value);
1565
53b3f8f4 1566 if (amdgpu_in_reset(adev))
48b270bb 1567 return -EPERM;
d2ae842d
AD
1568 if (adev->in_suspend && !adev->in_runpm)
1569 return -EPERM;
48b270bb 1570
b9a9294b 1571 r = pm_runtime_get_sync(ddev->dev);
66429300
AD
1572 if (r < 0) {
1573 pm_runtime_put_autosuspend(ddev->dev);
b9a9294b 1574 return r;
66429300 1575 }
b9a9294b 1576
b374d82d
TSD
1577 /* read the IP busy sensor */
1578 r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GPU_LOAD,
1579 (void *)&value, &size);
4a5a2de6 1580
b9a9294b
AD
1581 pm_runtime_mark_last_busy(ddev->dev);
1582 pm_runtime_put_autosuspend(ddev->dev);
1583
b374d82d
TSD
1584 if (r)
1585 return r;
1586
a9ca9bb3 1587 return sysfs_emit(buf, "%d\n", value);
b374d82d
TSD
1588}
1589
f120386d
EQ
1590/**
1591 * DOC: mem_busy_percent
1592 *
1593 * The amdgpu driver provides a sysfs API for reading how busy the VRAM
1594 * is as a percentage. The file mem_busy_percent is used for this.
1595 * The SMU firmware computes a percentage of load based on the
1596 * aggregate activity level in the IP cores.
1597 */
4e01847c
KW
1598static ssize_t amdgpu_get_mem_busy_percent(struct device *dev,
1599 struct device_attribute *attr,
1600 char *buf)
f120386d
EQ
1601{
1602 struct drm_device *ddev = dev_get_drvdata(dev);
1348969a 1603 struct amdgpu_device *adev = drm_to_adev(ddev);
f120386d
EQ
1604 int r, value, size = sizeof(value);
1605
53b3f8f4 1606 if (amdgpu_in_reset(adev))
48b270bb 1607 return -EPERM;
d2ae842d
AD
1608 if (adev->in_suspend && !adev->in_runpm)
1609 return -EPERM;
48b270bb 1610
b9a9294b 1611 r = pm_runtime_get_sync(ddev->dev);
66429300
AD
1612 if (r < 0) {
1613 pm_runtime_put_autosuspend(ddev->dev);
b9a9294b 1614 return r;
66429300 1615 }
b9a9294b 1616
f120386d
EQ
1617 /* read the IP busy sensor */
1618 r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_MEM_LOAD,
1619 (void *)&value, &size);
1620
b9a9294b
AD
1621 pm_runtime_mark_last_busy(ddev->dev);
1622 pm_runtime_put_autosuspend(ddev->dev);
1623
f120386d
EQ
1624 if (r)
1625 return r;
1626
a9ca9bb3 1627 return sysfs_emit(buf, "%d\n", value);
f120386d
EQ
1628}
1629
b45e18ac
KR
1630/**
1631 * DOC: pcie_bw
1632 *
1633 * The amdgpu driver provides a sysfs API for estimating how much data
1634 * has been received and sent by the GPU in the last second through PCIe.
1635 * The file pcie_bw is used for this.
1636 * The Perf counters count the number of received and sent messages and return
1637 * those values, as well as the maximum payload size of a PCIe packet (mps).
1638 * Note that it is not possible to easily and quickly obtain the size of each
1639 * packet transmitted, so we output the max payload size (mps) to allow for
1640 * quick estimation of the PCIe bandwidth usage
1641 */
1642static ssize_t amdgpu_get_pcie_bw(struct device *dev,
1643 struct device_attribute *attr,
1644 char *buf)
1645{
1646 struct drm_device *ddev = dev_get_drvdata(dev);
1348969a 1647 struct amdgpu_device *adev = drm_to_adev(ddev);
d08d692e 1648 uint64_t count0 = 0, count1 = 0;
b9a9294b 1649 int ret;
b45e18ac 1650
53b3f8f4 1651 if (amdgpu_in_reset(adev))
48b270bb 1652 return -EPERM;
d2ae842d
AD
1653 if (adev->in_suspend && !adev->in_runpm)
1654 return -EPERM;
48b270bb 1655
d08d692e
AD
1656 if (adev->flags & AMD_IS_APU)
1657 return -ENODATA;
1658
1659 if (!adev->asic_funcs->get_pcie_usage)
1660 return -ENODATA;
1661
b9a9294b 1662 ret = pm_runtime_get_sync(ddev->dev);
66429300
AD
1663 if (ret < 0) {
1664 pm_runtime_put_autosuspend(ddev->dev);
b9a9294b 1665 return ret;
66429300 1666 }
b9a9294b 1667
b45e18ac 1668 amdgpu_asic_get_pcie_usage(adev, &count0, &count1);
b9a9294b
AD
1669
1670 pm_runtime_mark_last_busy(ddev->dev);
1671 pm_runtime_put_autosuspend(ddev->dev);
1672
a9ca9bb3
TT
1673 return sysfs_emit(buf, "%llu %llu %i\n",
1674 count0, count1, pcie_get_mps(adev->pdev));
b45e18ac
KR
1675}
1676
fb2dbfd2
KR
1677/**
1678 * DOC: unique_id
1679 *
1680 * The amdgpu driver provides a sysfs API for providing a unique ID for the GPU
1681 * The file unique_id is used for this.
1682 * This will provide a Unique ID that will persist from machine to machine
1683 *
1684 * NOTE: This will only work for GFX9 and newer. This file will be absent
1685 * on unsupported ASICs (GFX8 and older)
1686 */
1687static ssize_t amdgpu_get_unique_id(struct device *dev,
1688 struct device_attribute *attr,
1689 char *buf)
1690{
1691 struct drm_device *ddev = dev_get_drvdata(dev);
1348969a 1692 struct amdgpu_device *adev = drm_to_adev(ddev);
fb2dbfd2 1693
53b3f8f4 1694 if (amdgpu_in_reset(adev))
48b270bb 1695 return -EPERM;
d2ae842d
AD
1696 if (adev->in_suspend && !adev->in_runpm)
1697 return -EPERM;
48b270bb 1698
fb2dbfd2 1699 if (adev->unique_id)
a9ca9bb3 1700 return sysfs_emit(buf, "%016llx\n", adev->unique_id);
fb2dbfd2
KR
1701
1702 return 0;
1703}
1704
b265bdbd
EQ
1705/**
1706 * DOC: thermal_throttling_logging
1707 *
1708 * Thermal throttling pulls down the clock frequency and thus the performance.
1709 * It's an useful mechanism to protect the chip from overheating. Since it
1710 * impacts performance, the user controls whether it is enabled and if so,
1711 * the log frequency.
1712 *
1713 * Reading back the file shows you the status(enabled or disabled) and
1714 * the interval(in seconds) between each thermal logging.
1715 *
1716 * Writing an integer to the file, sets a new logging interval, in seconds.
1717 * The value should be between 1 and 3600. If the value is less than 1,
1718 * thermal logging is disabled. Values greater than 3600 are ignored.
1719 */
1720static ssize_t amdgpu_get_thermal_throttling_logging(struct device *dev,
1721 struct device_attribute *attr,
1722 char *buf)
1723{
1724 struct drm_device *ddev = dev_get_drvdata(dev);
1348969a 1725 struct amdgpu_device *adev = drm_to_adev(ddev);
b265bdbd 1726
a9ca9bb3
TT
1727 return sysfs_emit(buf, "%s: thermal throttling logging %s, with interval %d seconds\n",
1728 adev_to_drm(adev)->unique,
1729 atomic_read(&adev->throttling_logging_enabled) ? "enabled" : "disabled",
1730 adev->throttling_logging_rs.interval / HZ + 1);
b265bdbd
EQ
1731}
1732
1733static ssize_t amdgpu_set_thermal_throttling_logging(struct device *dev,
1734 struct device_attribute *attr,
1735 const char *buf,
1736 size_t count)
1737{
1738 struct drm_device *ddev = dev_get_drvdata(dev);
1348969a 1739 struct amdgpu_device *adev = drm_to_adev(ddev);
b265bdbd
EQ
1740 long throttling_logging_interval;
1741 unsigned long flags;
1742 int ret = 0;
1743
1744 ret = kstrtol(buf, 0, &throttling_logging_interval);
1745 if (ret)
1746 return ret;
1747
1748 if (throttling_logging_interval > 3600)
1749 return -EINVAL;
1750
1751 if (throttling_logging_interval > 0) {
1752 raw_spin_lock_irqsave(&adev->throttling_logging_rs.lock, flags);
1753 /*
1754 * Reset the ratelimit timer internals.
1755 * This can effectively restart the timer.
1756 */
1757 adev->throttling_logging_rs.interval =
1758 (throttling_logging_interval - 1) * HZ;
1759 adev->throttling_logging_rs.begin = 0;
1760 adev->throttling_logging_rs.printed = 0;
1761 adev->throttling_logging_rs.missed = 0;
1762 raw_spin_unlock_irqrestore(&adev->throttling_logging_rs.lock, flags);
1763
1764 atomic_set(&adev->throttling_logging_enabled, 1);
1765 } else {
1766 atomic_set(&adev->throttling_logging_enabled, 0);
1767 }
1768
1769 return count;
1770}
1771
25c933b1
EQ
1772/**
1773 * DOC: gpu_metrics
1774 *
1775 * The amdgpu driver provides a sysfs API for retrieving current gpu
1776 * metrics data. The file gpu_metrics is used for this. Reading the
1777 * file will dump all the current gpu metrics data.
1778 *
1779 * These data include temperature, frequency, engines utilization,
1780 * power consume, throttler status, fan speed and cpu core statistics(
1781 * available for APU only). That's it will give a snapshot of all sensors
1782 * at the same time.
1783 */
1784static ssize_t amdgpu_get_gpu_metrics(struct device *dev,
1785 struct device_attribute *attr,
1786 char *buf)
1787{
1788 struct drm_device *ddev = dev_get_drvdata(dev);
1348969a 1789 struct amdgpu_device *adev = drm_to_adev(ddev);
25c933b1
EQ
1790 void *gpu_metrics;
1791 ssize_t size = 0;
1792 int ret;
1793
53b3f8f4 1794 if (amdgpu_in_reset(adev))
25c933b1 1795 return -EPERM;
d2ae842d
AD
1796 if (adev->in_suspend && !adev->in_runpm)
1797 return -EPERM;
25c933b1
EQ
1798
1799 ret = pm_runtime_get_sync(ddev->dev);
1800 if (ret < 0) {
1801 pm_runtime_put_autosuspend(ddev->dev);
1802 return ret;
1803 }
1804
2ea092e5 1805 if (adev->powerplay.pp_funcs->get_gpu_metrics)
25c933b1 1806 size = amdgpu_dpm_get_gpu_metrics(adev, &gpu_metrics);
25c933b1
EQ
1807
1808 if (size <= 0)
1809 goto out;
1810
1811 if (size >= PAGE_SIZE)
1812 size = PAGE_SIZE - 1;
1813
1814 memcpy(buf, gpu_metrics, size);
1815
1816out:
1817 pm_runtime_mark_last_busy(ddev->dev);
1818 pm_runtime_put_autosuspend(ddev->dev);
1819
1820 return size;
1821}
1822
a7673a1c
S
1823/**
1824 * DOC: smartshift_apu_power
1825 *
1826 * The amdgpu driver provides a sysfs API for reporting APU power
1827 * share if it supports smartshift. The value is expressed as
1828 * the proportion of stapm limit where stapm limit is the total APU
1829 * power limit. The result is in percentage. If APU power is 130% of
1830 * STAPM, then APU is using 30% of the dGPU's headroom.
1831 */
1832
1833static ssize_t amdgpu_get_smartshift_apu_power(struct device *dev, struct device_attribute *attr,
1834 char *buf)
1835{
1836 struct drm_device *ddev = dev_get_drvdata(dev);
1837 struct amdgpu_device *adev = drm_to_adev(ddev);
1838 uint32_t ss_power, size;
1839 int r = 0;
1840
1841 if (amdgpu_in_reset(adev))
1842 return -EPERM;
1843 if (adev->in_suspend && !adev->in_runpm)
1844 return -EPERM;
1845
1846 r = pm_runtime_get_sync(ddev->dev);
1847 if (r < 0) {
1848 pm_runtime_put_autosuspend(ddev->dev);
1849 return r;
1850 }
1851
1852 r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_SS_APU_SHARE,
1853 (void *)&ss_power, &size);
1854 if (r)
1855 goto out;
1856
1857 r = sysfs_emit(buf, "%u%%\n", ss_power);
1858
1859out:
1860 pm_runtime_mark_last_busy(ddev->dev);
1861 pm_runtime_put_autosuspend(ddev->dev);
1862 return r;
1863}
1864
1865/**
1866 * DOC: smartshift_dgpu_power
1867 *
1868 * The amdgpu driver provides a sysfs API for reporting the dGPU power
1869 * share if the device is in HG and supports smartshift. The value
1870 * is expressed as the proportion of stapm limit where stapm limit
1871 * is the total APU power limit. The value is in percentage. If dGPU
1872 * power is 20% higher than STAPM power(120%), it's using 20% of the
1873 * APU's power headroom.
1874 */
1875
1876static ssize_t amdgpu_get_smartshift_dgpu_power(struct device *dev, struct device_attribute *attr,
1877 char *buf)
1878{
1879 struct drm_device *ddev = dev_get_drvdata(dev);
1880 struct amdgpu_device *adev = drm_to_adev(ddev);
1881 uint32_t ss_power, size;
1882 int r = 0;
1883
1884 if (amdgpu_in_reset(adev))
1885 return -EPERM;
1886 if (adev->in_suspend && !adev->in_runpm)
1887 return -EPERM;
1888
1889 r = pm_runtime_get_sync(ddev->dev);
1890 if (r < 0) {
1891 pm_runtime_put_autosuspend(ddev->dev);
1892 return r;
1893 }
1894
1895 r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_SS_DGPU_SHARE,
1896 (void *)&ss_power, &size);
1897
1898 if (r)
1899 goto out;
1900
1901 r = sysfs_emit(buf, "%u%%\n", ss_power);
1902
1903out:
1904 pm_runtime_mark_last_busy(ddev->dev);
1905 pm_runtime_put_autosuspend(ddev->dev);
1906 return r;
1907}
1908
30d95a37
S
1909/**
1910 * DOC: smartshift_bias
1911 *
1912 * The amdgpu driver provides a sysfs API for reporting the
1913 * smartshift(SS2.0) bias level. The value ranges from -100 to 100
1914 * and the default is 0. -100 sets maximum preference to APU
1915 * and 100 sets max perference to dGPU.
1916 */
1917
1918static ssize_t amdgpu_get_smartshift_bias(struct device *dev,
1919 struct device_attribute *attr,
1920 char *buf)
1921{
1922 int r = 0;
1923
1924 r = sysfs_emit(buf, "%d\n", amdgpu_smartshift_bias);
1925
1926 return r;
1927}
1928
1929static ssize_t amdgpu_set_smartshift_bias(struct device *dev,
1930 struct device_attribute *attr,
1931 const char *buf, size_t count)
1932{
1933 struct drm_device *ddev = dev_get_drvdata(dev);
1934 struct amdgpu_device *adev = drm_to_adev(ddev);
1935 int r = 0;
1936 int bias = 0;
1937
1938 if (amdgpu_in_reset(adev))
1939 return -EPERM;
1940 if (adev->in_suspend && !adev->in_runpm)
1941 return -EPERM;
1942
1943 r = pm_runtime_get_sync(ddev->dev);
1944 if (r < 0) {
1945 pm_runtime_put_autosuspend(ddev->dev);
1946 return r;
1947 }
1948
1949 r = kstrtoint(buf, 10, &bias);
1950 if (r)
1951 goto out;
1952
1953 if (bias > AMDGPU_SMARTSHIFT_MAX_BIAS)
1954 bias = AMDGPU_SMARTSHIFT_MAX_BIAS;
1955 else if (bias < AMDGPU_SMARTSHIFT_MIN_BIAS)
1956 bias = AMDGPU_SMARTSHIFT_MIN_BIAS;
1957
1958 amdgpu_smartshift_bias = bias;
1959 r = count;
1960
1961 /* TODO: upadte bias level with SMU message */
1962
1963out:
1964 pm_runtime_mark_last_busy(ddev->dev);
1965 pm_runtime_put_autosuspend(ddev->dev);
1966 return r;
1967}
1968
1969
a7673a1c
S
1970static int ss_power_attr_update(struct amdgpu_device *adev, struct amdgpu_device_attr *attr,
1971 uint32_t mask, enum amdgpu_device_attr_states *states)
1972{
1973 uint32_t ss_power, size;
1974
1975 if (!amdgpu_acpi_is_power_shift_control_supported())
1976 *states = ATTR_STATE_UNSUPPORTED;
1977 else if ((adev->flags & AMD_IS_PX) &&
1978 !amdgpu_device_supports_smart_shift(adev_to_drm(adev)))
1979 *states = ATTR_STATE_UNSUPPORTED;
1980 else if (amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_SS_APU_SHARE,
1981 (void *)&ss_power, &size))
1982 *states = ATTR_STATE_UNSUPPORTED;
1983 else if (amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_SS_DGPU_SHARE,
1984 (void *)&ss_power, &size))
1985 *states = ATTR_STATE_UNSUPPORTED;
1986
1987 return 0;
1988}
1989
30d95a37
S
1990static int ss_bias_attr_update(struct amdgpu_device *adev, struct amdgpu_device_attr *attr,
1991 uint32_t mask, enum amdgpu_device_attr_states *states)
1992{
1993 uint32_t ss_power, size;
1994
1995 if (!amdgpu_device_supports_smart_shift(adev_to_drm(adev)))
1996 *states = ATTR_STATE_UNSUPPORTED;
1997 else if (amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_SS_APU_SHARE,
1998 (void *)&ss_power, &size))
1999 *states = ATTR_STATE_UNSUPPORTED;
2000 else if (amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_SS_DGPU_SHARE,
2001 (void *)&ss_power, &size))
2002 *states = ATTR_STATE_UNSUPPORTED;
2003
2004 return 0;
2005}
2006
4e01847c
KW
2007static struct amdgpu_device_attr amdgpu_device_attrs[] = {
2008 AMDGPU_DEVICE_ATTR_RW(power_dpm_state, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
4215a119 2009 AMDGPU_DEVICE_ATTR_RW(power_dpm_force_performance_level, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
7884d0e9
JG
2010 AMDGPU_DEVICE_ATTR_RO(pp_num_states, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
2011 AMDGPU_DEVICE_ATTR_RO(pp_cur_state, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
2012 AMDGPU_DEVICE_ATTR_RW(pp_force_state, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
2013 AMDGPU_DEVICE_ATTR_RW(pp_table, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
4e01847c
KW
2014 AMDGPU_DEVICE_ATTR_RW(pp_dpm_sclk, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
2015 AMDGPU_DEVICE_ATTR_RW(pp_dpm_mclk, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
2016 AMDGPU_DEVICE_ATTR_RW(pp_dpm_socclk, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
2017 AMDGPU_DEVICE_ATTR_RW(pp_dpm_fclk, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
9577b0ec
XD
2018 AMDGPU_DEVICE_ATTR_RW(pp_dpm_vclk, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
2019 AMDGPU_DEVICE_ATTR_RW(pp_dpm_dclk, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
4e01847c
KW
2020 AMDGPU_DEVICE_ATTR_RW(pp_dpm_dcefclk, ATTR_FLAG_BASIC),
2021 AMDGPU_DEVICE_ATTR_RW(pp_dpm_pcie, ATTR_FLAG_BASIC),
2022 AMDGPU_DEVICE_ATTR_RW(pp_sclk_od, ATTR_FLAG_BASIC),
2023 AMDGPU_DEVICE_ATTR_RW(pp_mclk_od, ATTR_FLAG_BASIC),
ac82902d 2024 AMDGPU_DEVICE_ATTR_RW(pp_power_profile_mode, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
4e01847c 2025 AMDGPU_DEVICE_ATTR_RW(pp_od_clk_voltage, ATTR_FLAG_BASIC),
ac82902d
VC
2026 AMDGPU_DEVICE_ATTR_RO(gpu_busy_percent, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
2027 AMDGPU_DEVICE_ATTR_RO(mem_busy_percent, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
4e01847c 2028 AMDGPU_DEVICE_ATTR_RO(pcie_bw, ATTR_FLAG_BASIC),
ac82902d
VC
2029 AMDGPU_DEVICE_ATTR_RW(pp_features, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
2030 AMDGPU_DEVICE_ATTR_RO(unique_id, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
2031 AMDGPU_DEVICE_ATTR_RW(thermal_throttling_logging, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
2032 AMDGPU_DEVICE_ATTR_RO(gpu_metrics, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
a7673a1c
S
2033 AMDGPU_DEVICE_ATTR_RO(smartshift_apu_power, ATTR_FLAG_BASIC,
2034 .attr_update = ss_power_attr_update),
2035 AMDGPU_DEVICE_ATTR_RO(smartshift_dgpu_power, ATTR_FLAG_BASIC,
2036 .attr_update = ss_power_attr_update),
30d95a37
S
2037 AMDGPU_DEVICE_ATTR_RW(smartshift_bias, ATTR_FLAG_BASIC,
2038 .attr_update = ss_bias_attr_update),
4e01847c
KW
2039};
2040
2041static int default_attr_update(struct amdgpu_device *adev, struct amdgpu_device_attr *attr,
ba02fd6b 2042 uint32_t mask, enum amdgpu_device_attr_states *states)
4e01847c
KW
2043{
2044 struct device_attribute *dev_attr = &attr->dev_attr;
2045 const char *attr_name = dev_attr->attr.name;
2046 struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
2047 enum amd_asic_type asic_type = adev->asic_type;
2048
2049 if (!(attr->flags & mask)) {
ba02fd6b 2050 *states = ATTR_STATE_UNSUPPORTED;
4e01847c
KW
2051 return 0;
2052 }
2053
2054#define DEVICE_ATTR_IS(_name) (!strcmp(attr_name, #_name))
2055
2056 if (DEVICE_ATTR_IS(pp_dpm_socclk)) {
d5c8ffb9 2057 if (asic_type < CHIP_VEGA10)
ba02fd6b 2058 *states = ATTR_STATE_UNSUPPORTED;
4e01847c 2059 } else if (DEVICE_ATTR_IS(pp_dpm_dcefclk)) {
0133840f
KR
2060 if (asic_type < CHIP_VEGA10 ||
2061 asic_type == CHIP_ARCTURUS ||
2062 asic_type == CHIP_ALDEBARAN)
ba02fd6b 2063 *states = ATTR_STATE_UNSUPPORTED;
4e01847c
KW
2064 } else if (DEVICE_ATTR_IS(pp_dpm_fclk)) {
2065 if (asic_type < CHIP_VEGA20)
ba02fd6b 2066 *states = ATTR_STATE_UNSUPPORTED;
4e01847c 2067 } else if (DEVICE_ATTR_IS(pp_od_clk_voltage)) {
ba02fd6b 2068 *states = ATTR_STATE_UNSUPPORTED;
4e01847c 2069 if ((is_support_sw_smu(adev) && adev->smu.od_enabled) ||
e017fb66 2070 (is_support_sw_smu(adev) && adev->smu.is_apu) ||
08da4fcd 2071 (!is_support_sw_smu(adev) && hwmgr->od_enabled))
ba02fd6b 2072 *states = ATTR_STATE_SUPPORTED;
4e01847c
KW
2073 } else if (DEVICE_ATTR_IS(mem_busy_percent)) {
2074 if (adev->flags & AMD_IS_APU || asic_type == CHIP_VEGA10)
ba02fd6b 2075 *states = ATTR_STATE_UNSUPPORTED;
4e01847c
KW
2076 } else if (DEVICE_ATTR_IS(pcie_bw)) {
2077 /* PCIe Perf counters won't work on APU nodes */
2078 if (adev->flags & AMD_IS_APU)
ba02fd6b 2079 *states = ATTR_STATE_UNSUPPORTED;
4e01847c 2080 } else if (DEVICE_ATTR_IS(unique_id)) {
81a16241
KR
2081 if (asic_type != CHIP_VEGA10 &&
2082 asic_type != CHIP_VEGA20 &&
2083 asic_type != CHIP_ARCTURUS)
ba02fd6b 2084 *states = ATTR_STATE_UNSUPPORTED;
4e01847c 2085 } else if (DEVICE_ATTR_IS(pp_features)) {
d5c8ffb9 2086 if (adev->flags & AMD_IS_APU || asic_type < CHIP_VEGA10)
ba02fd6b 2087 *states = ATTR_STATE_UNSUPPORTED;
25c933b1
EQ
2088 } else if (DEVICE_ATTR_IS(gpu_metrics)) {
2089 if (asic_type < CHIP_VEGA12)
2090 *states = ATTR_STATE_UNSUPPORTED;
9577b0ec 2091 } else if (DEVICE_ATTR_IS(pp_dpm_vclk)) {
8a4d393e 2092 if (!(asic_type == CHIP_VANGOGH || asic_type == CHIP_SIENNA_CICHLID))
9577b0ec
XD
2093 *states = ATTR_STATE_UNSUPPORTED;
2094 } else if (DEVICE_ATTR_IS(pp_dpm_dclk)) {
8a4d393e 2095 if (!(asic_type == CHIP_VANGOGH || asic_type == CHIP_SIENNA_CICHLID))
9577b0ec 2096 *states = ATTR_STATE_UNSUPPORTED;
a7505591
ML
2097 } else if (DEVICE_ATTR_IS(pp_power_profile_mode)) {
2098 if (!adev->powerplay.pp_funcs->get_power_profile_mode ||
2099 amdgpu_dpm_get_power_profile_mode(adev, NULL) == -EOPNOTSUPP)
2100 *states = ATTR_STATE_UNSUPPORTED;
4e01847c
KW
2101 }
2102
1d0e622f
KW
2103 switch (asic_type) {
2104 case CHIP_ARCTURUS:
2105 case CHIP_ALDEBARAN:
2106 /* the Mi series card does not support standalone mclk/socclk/fclk level setting */
4e01847c
KW
2107 if (DEVICE_ATTR_IS(pp_dpm_mclk) ||
2108 DEVICE_ATTR_IS(pp_dpm_socclk) ||
2109 DEVICE_ATTR_IS(pp_dpm_fclk)) {
2110 dev_attr->attr.mode &= ~S_IWUGO;
2111 dev_attr->store = NULL;
2112 }
1d0e622f
KW
2113 break;
2114 default:
2115 break;
4e01847c
KW
2116 }
2117
ede14a1b
DP
2118 if (DEVICE_ATTR_IS(pp_dpm_dcefclk)) {
2119 /* SMU MP1 does not support dcefclk level setting */
2120 if (asic_type >= CHIP_NAVI10) {
2121 dev_attr->attr.mode &= ~S_IWUGO;
2122 dev_attr->store = NULL;
2123 }
2124 }
2125
4e01847c
KW
2126#undef DEVICE_ATTR_IS
2127
2128 return 0;
2129}
2130
2131
2132static int amdgpu_device_attr_create(struct amdgpu_device *adev,
2133 struct amdgpu_device_attr *attr,
ba02fd6b 2134 uint32_t mask, struct list_head *attr_list)
4e01847c
KW
2135{
2136 int ret = 0;
2137 struct device_attribute *dev_attr = &attr->dev_attr;
2138 const char *name = dev_attr->attr.name;
ba02fd6b
KW
2139 enum amdgpu_device_attr_states attr_states = ATTR_STATE_SUPPORTED;
2140 struct amdgpu_device_attr_entry *attr_entry;
2141
4e01847c 2142 int (*attr_update)(struct amdgpu_device *adev, struct amdgpu_device_attr *attr,
ba02fd6b 2143 uint32_t mask, enum amdgpu_device_attr_states *states) = default_attr_update;
4e01847c
KW
2144
2145 BUG_ON(!attr);
2146
8a81028b 2147 attr_update = attr->attr_update ? attr->attr_update : default_attr_update;
4e01847c 2148
ba02fd6b 2149 ret = attr_update(adev, attr, mask, &attr_states);
4e01847c
KW
2150 if (ret) {
2151 dev_err(adev->dev, "failed to update device file %s, ret = %d\n",
2152 name, ret);
2153 return ret;
2154 }
2155
ba02fd6b 2156 if (attr_states == ATTR_STATE_UNSUPPORTED)
4e01847c
KW
2157 return 0;
2158
2159 ret = device_create_file(adev->dev, dev_attr);
2160 if (ret) {
2161 dev_err(adev->dev, "failed to create device file %s, ret = %d\n",
2162 name, ret);
2163 }
2164
ba02fd6b
KW
2165 attr_entry = kmalloc(sizeof(*attr_entry), GFP_KERNEL);
2166 if (!attr_entry)
2167 return -ENOMEM;
2168
2169 attr_entry->attr = attr;
2170 INIT_LIST_HEAD(&attr_entry->entry);
2171
2172 list_add_tail(&attr_entry->entry, attr_list);
4e01847c
KW
2173
2174 return ret;
2175}
2176
2177static void amdgpu_device_attr_remove(struct amdgpu_device *adev, struct amdgpu_device_attr *attr)
2178{
2179 struct device_attribute *dev_attr = &attr->dev_attr;
2180
4e01847c 2181 device_remove_file(adev->dev, dev_attr);
4e01847c
KW
2182}
2183
ba02fd6b
KW
2184static void amdgpu_device_attr_remove_groups(struct amdgpu_device *adev,
2185 struct list_head *attr_list);
2186
4e01847c
KW
2187static int amdgpu_device_attr_create_groups(struct amdgpu_device *adev,
2188 struct amdgpu_device_attr *attrs,
2189 uint32_t counts,
ba02fd6b
KW
2190 uint32_t mask,
2191 struct list_head *attr_list)
4e01847c
KW
2192{
2193 int ret = 0;
2194 uint32_t i = 0;
2195
2196 for (i = 0; i < counts; i++) {
ba02fd6b 2197 ret = amdgpu_device_attr_create(adev, &attrs[i], mask, attr_list);
4e01847c
KW
2198 if (ret)
2199 goto failed;
2200 }
2201
2202 return 0;
2203
2204failed:
ba02fd6b 2205 amdgpu_device_attr_remove_groups(adev, attr_list);
4e01847c
KW
2206
2207 return ret;
2208}
2209
2210static void amdgpu_device_attr_remove_groups(struct amdgpu_device *adev,
ba02fd6b 2211 struct list_head *attr_list)
4e01847c 2212{
ba02fd6b 2213 struct amdgpu_device_attr_entry *entry, *entry_tmp;
4e01847c 2214
ba02fd6b
KW
2215 if (list_empty(attr_list))
2216 return ;
2217
2218 list_for_each_entry_safe(entry, entry_tmp, attr_list, entry) {
2219 amdgpu_device_attr_remove(adev, entry->attr);
2220 list_del(&entry->entry);
2221 kfree(entry);
2222 }
4e01847c 2223}
e3933f26 2224
d38ceaf9
AD
2225static ssize_t amdgpu_hwmon_show_temp(struct device *dev,
2226 struct device_attribute *attr,
2227 char *buf)
2228{
2229 struct amdgpu_device *adev = dev_get_drvdata(dev);
a34d1166 2230 int channel = to_sensor_dev_attr(attr)->index;
70c5350a 2231 int r, temp = 0, size = sizeof(temp);
d38ceaf9 2232
53b3f8f4 2233 if (amdgpu_in_reset(adev))
48b270bb 2234 return -EPERM;
d2ae842d
AD
2235 if (adev->in_suspend && !adev->in_runpm)
2236 return -EPERM;
48b270bb 2237
a34d1166
EQ
2238 if (channel >= PP_TEMP_MAX)
2239 return -EINVAL;
2240
4a580877 2241 r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
66429300 2242 if (r < 0) {
4a580877 2243 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
b9a9294b 2244 return r;
66429300 2245 }
b9a9294b 2246
a34d1166
EQ
2247 switch (channel) {
2248 case PP_TEMP_JUNCTION:
2249 /* get current junction temperature */
2250 r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_HOTSPOT_TEMP,
2251 (void *)&temp, &size);
a34d1166
EQ
2252 break;
2253 case PP_TEMP_EDGE:
2254 /* get current edge temperature */
2255 r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_EDGE_TEMP,
2256 (void *)&temp, &size);
a34d1166
EQ
2257 break;
2258 case PP_TEMP_MEM:
2259 /* get current memory temperature */
2260 r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_MEM_TEMP,
2261 (void *)&temp, &size);
b9a9294b
AD
2262 break;
2263 default:
2264 r = -EINVAL;
a34d1166
EQ
2265 break;
2266 }
d38ceaf9 2267
4a580877
LT
2268 pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
2269 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
b9a9294b
AD
2270
2271 if (r)
2272 return r;
2273
a9ca9bb3 2274 return sysfs_emit(buf, "%d\n", temp);
d38ceaf9
AD
2275}
2276
2277static ssize_t amdgpu_hwmon_show_temp_thresh(struct device *dev,
2278 struct device_attribute *attr,
2279 char *buf)
2280{
2281 struct amdgpu_device *adev = dev_get_drvdata(dev);
2282 int hyst = to_sensor_dev_attr(attr)->index;
2283 int temp;
2284
2285 if (hyst)
2286 temp = adev->pm.dpm.thermal.min_temp;
2287 else
2288 temp = adev->pm.dpm.thermal.max_temp;
2289
a9ca9bb3 2290 return sysfs_emit(buf, "%d\n", temp);
d38ceaf9
AD
2291}
2292
437ccd17
EQ
2293static ssize_t amdgpu_hwmon_show_hotspot_temp_thresh(struct device *dev,
2294 struct device_attribute *attr,
2295 char *buf)
2296{
2297 struct amdgpu_device *adev = dev_get_drvdata(dev);
2298 int hyst = to_sensor_dev_attr(attr)->index;
2299 int temp;
2300
2301 if (hyst)
2302 temp = adev->pm.dpm.thermal.min_hotspot_temp;
2303 else
2304 temp = adev->pm.dpm.thermal.max_hotspot_crit_temp;
2305
a9ca9bb3 2306 return sysfs_emit(buf, "%d\n", temp);
437ccd17
EQ
2307}
2308
2309static ssize_t amdgpu_hwmon_show_mem_temp_thresh(struct device *dev,
2310 struct device_attribute *attr,
2311 char *buf)
2312{
2313 struct amdgpu_device *adev = dev_get_drvdata(dev);
2314 int hyst = to_sensor_dev_attr(attr)->index;
2315 int temp;
2316
2317 if (hyst)
2318 temp = adev->pm.dpm.thermal.min_mem_temp;
2319 else
2320 temp = adev->pm.dpm.thermal.max_mem_crit_temp;
2321
a9ca9bb3 2322 return sysfs_emit(buf, "%d\n", temp);
437ccd17
EQ
2323}
2324
2adc1156
EQ
2325static ssize_t amdgpu_hwmon_show_temp_label(struct device *dev,
2326 struct device_attribute *attr,
2327 char *buf)
2328{
2329 int channel = to_sensor_dev_attr(attr)->index;
2330
2331 if (channel >= PP_TEMP_MAX)
2332 return -EINVAL;
2333
a9ca9bb3 2334 return sysfs_emit(buf, "%s\n", temp_label[channel].label);
2adc1156
EQ
2335}
2336
901cb599
EQ
2337static ssize_t amdgpu_hwmon_show_temp_emergency(struct device *dev,
2338 struct device_attribute *attr,
2339 char *buf)
2340{
2341 struct amdgpu_device *adev = dev_get_drvdata(dev);
2342 int channel = to_sensor_dev_attr(attr)->index;
2343 int temp = 0;
2344
2345 if (channel >= PP_TEMP_MAX)
2346 return -EINVAL;
2347
2348 switch (channel) {
2349 case PP_TEMP_JUNCTION:
2350 temp = adev->pm.dpm.thermal.max_hotspot_emergency_temp;
2351 break;
2352 case PP_TEMP_EDGE:
2353 temp = adev->pm.dpm.thermal.max_edge_emergency_temp;
2354 break;
2355 case PP_TEMP_MEM:
2356 temp = adev->pm.dpm.thermal.max_mem_emergency_temp;
2357 break;
2358 }
2359
a9ca9bb3 2360 return sysfs_emit(buf, "%d\n", temp);
901cb599
EQ
2361}
2362
d38ceaf9
AD
2363static ssize_t amdgpu_hwmon_get_pwm1_enable(struct device *dev,
2364 struct device_attribute *attr,
2365 char *buf)
2366{
2367 struct amdgpu_device *adev = dev_get_drvdata(dev);
2368 u32 pwm_mode = 0;
b9a9294b
AD
2369 int ret;
2370
53b3f8f4 2371 if (amdgpu_in_reset(adev))
48b270bb 2372 return -EPERM;
d2ae842d
AD
2373 if (adev->in_suspend && !adev->in_runpm)
2374 return -EPERM;
48b270bb 2375
4a580877 2376 ret = pm_runtime_get_sync(adev_to_drm(adev)->dev);
66429300 2377 if (ret < 0) {
4a580877 2378 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
b9a9294b 2379 return ret;
66429300 2380 }
c9ffa427 2381
f46587bc
DP
2382 if (!adev->powerplay.pp_funcs->get_fan_control_mode) {
2383 pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
2384 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2385 return -EINVAL;
a76ff5af 2386 }
d38ceaf9 2387
f46587bc
DP
2388 pwm_mode = amdgpu_dpm_get_fan_control_mode(adev);
2389
4a580877
LT
2390 pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
2391 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
b9a9294b 2392
fdf8eea5 2393 return sysfs_emit(buf, "%u\n", pwm_mode);
d38ceaf9
AD
2394}
2395
2396static ssize_t amdgpu_hwmon_set_pwm1_enable(struct device *dev,
2397 struct device_attribute *attr,
2398 const char *buf,
2399 size_t count)
2400{
2401 struct amdgpu_device *adev = dev_get_drvdata(dev);
b9a9294b 2402 int err, ret;
d38ceaf9
AD
2403 int value;
2404
53b3f8f4 2405 if (amdgpu_in_reset(adev))
48b270bb 2406 return -EPERM;
d2ae842d
AD
2407 if (adev->in_suspend && !adev->in_runpm)
2408 return -EPERM;
48b270bb 2409
fcd90fee
EQ
2410 err = kstrtoint(buf, 10, &value);
2411 if (err)
2412 return err;
a76ff5af 2413
4a580877 2414 ret = pm_runtime_get_sync(adev_to_drm(adev)->dev);
66429300 2415 if (ret < 0) {
4a580877 2416 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
b9a9294b 2417 return ret;
66429300 2418 }
b9a9294b 2419
f46587bc
DP
2420 if (!adev->powerplay.pp_funcs->set_fan_control_mode) {
2421 pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
2422 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2423 return -EINVAL;
a76ff5af 2424 }
d38ceaf9 2425
f46587bc
DP
2426 amdgpu_dpm_set_fan_control_mode(adev, value);
2427
4a580877
LT
2428 pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
2429 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
b9a9294b 2430
d38ceaf9
AD
2431 return count;
2432}
2433
2434static ssize_t amdgpu_hwmon_get_pwm1_min(struct device *dev,
2435 struct device_attribute *attr,
2436 char *buf)
2437{
fdf8eea5 2438 return sysfs_emit(buf, "%i\n", 0);
d38ceaf9
AD
2439}
2440
2441static ssize_t amdgpu_hwmon_get_pwm1_max(struct device *dev,
2442 struct device_attribute *attr,
2443 char *buf)
2444{
fdf8eea5 2445 return sysfs_emit(buf, "%i\n", 255);
d38ceaf9
AD
2446}
2447
2448static ssize_t amdgpu_hwmon_set_pwm1(struct device *dev,
2449 struct device_attribute *attr,
2450 const char *buf, size_t count)
2451{
2452 struct amdgpu_device *adev = dev_get_drvdata(dev);
2453 int err;
2454 u32 value;
b8a9c003 2455 u32 pwm_mode;
d38ceaf9 2456
53b3f8f4 2457 if (amdgpu_in_reset(adev))
48b270bb 2458 return -EPERM;
d2ae842d
AD
2459 if (adev->in_suspend && !adev->in_runpm)
2460 return -EPERM;
48b270bb 2461
4a580877 2462 err = pm_runtime_get_sync(adev_to_drm(adev)->dev);
66429300 2463 if (err < 0) {
4a580877 2464 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
b9a9294b 2465 return err;
66429300 2466 }
b9a9294b 2467
f46587bc 2468 pwm_mode = amdgpu_dpm_get_fan_control_mode(adev);
b8a9c003
RZ
2469 if (pwm_mode != AMD_FAN_CTRL_MANUAL) {
2470 pr_info("manual fan speed control should be enabled first\n");
4a580877
LT
2471 pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
2472 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
b8a9c003
RZ
2473 return -EINVAL;
2474 }
2475
d38ceaf9 2476 err = kstrtou32(buf, 10, &value);
b9a9294b 2477 if (err) {
4a580877
LT
2478 pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
2479 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
d38ceaf9 2480 return err;
b9a9294b 2481 }
d38ceaf9 2482
0d8318e1
EQ
2483 if (adev->powerplay.pp_funcs->set_fan_speed_pwm)
2484 err = amdgpu_dpm_set_fan_speed_pwm(adev, value);
b9a9294b
AD
2485 else
2486 err = -EINVAL;
2487
4a580877
LT
2488 pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
2489 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
b9a9294b
AD
2490
2491 if (err)
2492 return err;
d38ceaf9
AD
2493
2494 return count;
2495}
2496
2497static ssize_t amdgpu_hwmon_get_pwm1(struct device *dev,
2498 struct device_attribute *attr,
2499 char *buf)
2500{
2501 struct amdgpu_device *adev = dev_get_drvdata(dev);
2502 int err;
cd4d7464 2503 u32 speed = 0;
d38ceaf9 2504
53b3f8f4 2505 if (amdgpu_in_reset(adev))
48b270bb 2506 return -EPERM;
d2ae842d
AD
2507 if (adev->in_suspend && !adev->in_runpm)
2508 return -EPERM;
48b270bb 2509
4a580877 2510 err = pm_runtime_get_sync(adev_to_drm(adev)->dev);
66429300 2511 if (err < 0) {
4a580877 2512 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
b9a9294b 2513 return err;
66429300 2514 }
5ec36e2d 2515
0d8318e1
EQ
2516 if (adev->powerplay.pp_funcs->get_fan_speed_pwm)
2517 err = amdgpu_dpm_get_fan_speed_pwm(adev, &speed);
b9a9294b
AD
2518 else
2519 err = -EINVAL;
2520
4a580877
LT
2521 pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
2522 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
b9a9294b
AD
2523
2524 if (err)
2525 return err;
d38ceaf9 2526
fdf8eea5 2527 return sysfs_emit(buf, "%i\n", speed);
d38ceaf9
AD
2528}
2529
81c1514b
GI
2530static ssize_t amdgpu_hwmon_get_fan1_input(struct device *dev,
2531 struct device_attribute *attr,
2532 char *buf)
2533{
2534 struct amdgpu_device *adev = dev_get_drvdata(dev);
2535 int err;
cd4d7464 2536 u32 speed = 0;
81c1514b 2537
53b3f8f4 2538 if (amdgpu_in_reset(adev))
48b270bb 2539 return -EPERM;
d2ae842d
AD
2540 if (adev->in_suspend && !adev->in_runpm)
2541 return -EPERM;
48b270bb 2542
4a580877 2543 err = pm_runtime_get_sync(adev_to_drm(adev)->dev);
66429300 2544 if (err < 0) {
4a580877 2545 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
b9a9294b 2546 return err;
66429300 2547 }
5ec36e2d 2548
f46587bc 2549 if (adev->powerplay.pp_funcs->get_fan_speed_rpm)
cd4d7464 2550 err = amdgpu_dpm_get_fan_speed_rpm(adev, &speed);
b9a9294b
AD
2551 else
2552 err = -EINVAL;
2553
4a580877
LT
2554 pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
2555 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
b9a9294b
AD
2556
2557 if (err)
2558 return err;
81c1514b 2559
fdf8eea5 2560 return sysfs_emit(buf, "%i\n", speed);
81c1514b
GI
2561}
2562
c2870527
RZ
2563static ssize_t amdgpu_hwmon_get_fan1_min(struct device *dev,
2564 struct device_attribute *attr,
2565 char *buf)
2566{
2567 struct amdgpu_device *adev = dev_get_drvdata(dev);
2568 u32 min_rpm = 0;
2569 u32 size = sizeof(min_rpm);
2570 int r;
2571
53b3f8f4 2572 if (amdgpu_in_reset(adev))
48b270bb 2573 return -EPERM;
d2ae842d
AD
2574 if (adev->in_suspend && !adev->in_runpm)
2575 return -EPERM;
48b270bb 2576
4a580877 2577 r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
66429300 2578 if (r < 0) {
4a580877 2579 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
b9a9294b 2580 return r;
66429300 2581 }
b9a9294b 2582
c2870527
RZ
2583 r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_MIN_FAN_RPM,
2584 (void *)&min_rpm, &size);
b9a9294b 2585
4a580877
LT
2586 pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
2587 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
b9a9294b 2588
c2870527
RZ
2589 if (r)
2590 return r;
2591
a9ca9bb3 2592 return sysfs_emit(buf, "%d\n", min_rpm);
c2870527
RZ
2593}
2594
2595static ssize_t amdgpu_hwmon_get_fan1_max(struct device *dev,
2596 struct device_attribute *attr,
2597 char *buf)
2598{
2599 struct amdgpu_device *adev = dev_get_drvdata(dev);
2600 u32 max_rpm = 0;
2601 u32 size = sizeof(max_rpm);
2602 int r;
2603
53b3f8f4 2604 if (amdgpu_in_reset(adev))
48b270bb 2605 return -EPERM;
d2ae842d
AD
2606 if (adev->in_suspend && !adev->in_runpm)
2607 return -EPERM;
48b270bb 2608
4a580877 2609 r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
66429300 2610 if (r < 0) {
4a580877 2611 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
b9a9294b 2612 return r;
66429300 2613 }
b9a9294b 2614
c2870527
RZ
2615 r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_MAX_FAN_RPM,
2616 (void *)&max_rpm, &size);
b9a9294b 2617
4a580877
LT
2618 pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
2619 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
b9a9294b 2620
c2870527
RZ
2621 if (r)
2622 return r;
2623
a9ca9bb3 2624 return sysfs_emit(buf, "%d\n", max_rpm);
c2870527
RZ
2625}
2626
2627static ssize_t amdgpu_hwmon_get_fan1_target(struct device *dev,
2628 struct device_attribute *attr,
2629 char *buf)
2630{
2631 struct amdgpu_device *adev = dev_get_drvdata(dev);
2632 int err;
2633 u32 rpm = 0;
c2870527 2634
53b3f8f4 2635 if (amdgpu_in_reset(adev))
48b270bb 2636 return -EPERM;
d2ae842d
AD
2637 if (adev->in_suspend && !adev->in_runpm)
2638 return -EPERM;
48b270bb 2639
4a580877 2640 err = pm_runtime_get_sync(adev_to_drm(adev)->dev);
66429300 2641 if (err < 0) {
4a580877 2642 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
b9a9294b 2643 return err;
66429300 2644 }
c2870527 2645
f46587bc 2646 if (adev->powerplay.pp_funcs->get_fan_speed_rpm)
c2870527 2647 err = amdgpu_dpm_get_fan_speed_rpm(adev, &rpm);
b9a9294b
AD
2648 else
2649 err = -EINVAL;
2650
4a580877
LT
2651 pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
2652 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
b9a9294b
AD
2653
2654 if (err)
2655 return err;
c2870527 2656
fdf8eea5 2657 return sysfs_emit(buf, "%i\n", rpm);
c2870527
RZ
2658}
2659
2660static ssize_t amdgpu_hwmon_set_fan1_target(struct device *dev,
2661 struct device_attribute *attr,
2662 const char *buf, size_t count)
2663{
2664 struct amdgpu_device *adev = dev_get_drvdata(dev);
2665 int err;
2666 u32 value;
2667 u32 pwm_mode;
2668
53b3f8f4 2669 if (amdgpu_in_reset(adev))
48b270bb 2670 return -EPERM;
d2ae842d
AD
2671 if (adev->in_suspend && !adev->in_runpm)
2672 return -EPERM;
48b270bb 2673
4a580877 2674 err = pm_runtime_get_sync(adev_to_drm(adev)->dev);
66429300 2675 if (err < 0) {
4a580877 2676 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
b9a9294b 2677 return err;
66429300 2678 }
b9a9294b 2679
f46587bc 2680 pwm_mode = amdgpu_dpm_get_fan_control_mode(adev);
96026ce0 2681
b9a9294b 2682 if (pwm_mode != AMD_FAN_CTRL_MANUAL) {
4a580877
LT
2683 pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
2684 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
c2870527 2685 return -ENODATA;
b9a9294b 2686 }
c2870527
RZ
2687
2688 err = kstrtou32(buf, 10, &value);
b9a9294b 2689 if (err) {
4a580877
LT
2690 pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
2691 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
c2870527 2692 return err;
b9a9294b 2693 }
c2870527 2694
f46587bc 2695 if (adev->powerplay.pp_funcs->set_fan_speed_rpm)
c2870527 2696 err = amdgpu_dpm_set_fan_speed_rpm(adev, value);
b9a9294b
AD
2697 else
2698 err = -EINVAL;
2699
4a580877
LT
2700 pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
2701 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
b9a9294b
AD
2702
2703 if (err)
2704 return err;
c2870527
RZ
2705
2706 return count;
2707}
2708
2709static ssize_t amdgpu_hwmon_get_fan1_enable(struct device *dev,
2710 struct device_attribute *attr,
2711 char *buf)
2712{
2713 struct amdgpu_device *adev = dev_get_drvdata(dev);
2714 u32 pwm_mode = 0;
b9a9294b
AD
2715 int ret;
2716
53b3f8f4 2717 if (amdgpu_in_reset(adev))
48b270bb 2718 return -EPERM;
d2ae842d
AD
2719 if (adev->in_suspend && !adev->in_runpm)
2720 return -EPERM;
48b270bb 2721
4a580877 2722 ret = pm_runtime_get_sync(adev_to_drm(adev)->dev);
66429300 2723 if (ret < 0) {
4a580877 2724 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
b9a9294b 2725 return ret;
66429300 2726 }
c2870527 2727
f46587bc
DP
2728 if (!adev->powerplay.pp_funcs->get_fan_control_mode) {
2729 pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
2730 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2731 return -EINVAL;
da5f18e8 2732 }
b9a9294b 2733
f46587bc
DP
2734 pwm_mode = amdgpu_dpm_get_fan_control_mode(adev);
2735
4a580877
LT
2736 pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
2737 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
b9a9294b 2738
fdf8eea5 2739 return sysfs_emit(buf, "%i\n", pwm_mode == AMD_FAN_CTRL_AUTO ? 0 : 1);
c2870527
RZ
2740}
2741
2742static ssize_t amdgpu_hwmon_set_fan1_enable(struct device *dev,
2743 struct device_attribute *attr,
2744 const char *buf,
2745 size_t count)
2746{
2747 struct amdgpu_device *adev = dev_get_drvdata(dev);
2748 int err;
2749 int value;
2750 u32 pwm_mode;
2751
53b3f8f4 2752 if (amdgpu_in_reset(adev))
48b270bb 2753 return -EPERM;
d2ae842d
AD
2754 if (adev->in_suspend && !adev->in_runpm)
2755 return -EPERM;
48b270bb 2756
c2870527
RZ
2757 err = kstrtoint(buf, 10, &value);
2758 if (err)
2759 return err;
2760
2761 if (value == 0)
2762 pwm_mode = AMD_FAN_CTRL_AUTO;
2763 else if (value == 1)
2764 pwm_mode = AMD_FAN_CTRL_MANUAL;
2765 else
2766 return -EINVAL;
2767
4a580877 2768 err = pm_runtime_get_sync(adev_to_drm(adev)->dev);
66429300 2769 if (err < 0) {
4a580877 2770 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
b9a9294b 2771 return err;
66429300 2772 }
b9a9294b 2773
f46587bc
DP
2774 if (!adev->powerplay.pp_funcs->set_fan_control_mode) {
2775 pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
2776 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2777 return -EINVAL;
da5f18e8 2778 }
f46587bc 2779 amdgpu_dpm_set_fan_control_mode(adev, pwm_mode);
c2870527 2780
4a580877
LT
2781 pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
2782 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
b9a9294b 2783
c2870527
RZ
2784 return count;
2785}
2786
2bd376bf
AD
2787static ssize_t amdgpu_hwmon_show_vddgfx(struct device *dev,
2788 struct device_attribute *attr,
2789 char *buf)
2790{
2791 struct amdgpu_device *adev = dev_get_drvdata(dev);
2bd376bf
AD
2792 u32 vddgfx;
2793 int r, size = sizeof(vddgfx);
2794
53b3f8f4 2795 if (amdgpu_in_reset(adev))
48b270bb 2796 return -EPERM;
d2ae842d
AD
2797 if (adev->in_suspend && !adev->in_runpm)
2798 return -EPERM;
48b270bb 2799
4a580877 2800 r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
66429300 2801 if (r < 0) {
4a580877 2802 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
b9a9294b 2803 return r;
66429300 2804 }
2bd376bf 2805
2bd376bf
AD
2806 /* get the voltage */
2807 r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VDDGFX,
2808 (void *)&vddgfx, &size);
b9a9294b 2809
4a580877
LT
2810 pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
2811 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
b9a9294b 2812
2bd376bf
AD
2813 if (r)
2814 return r;
2815
a9ca9bb3 2816 return sysfs_emit(buf, "%d\n", vddgfx);
2bd376bf
AD
2817}
2818
2819static ssize_t amdgpu_hwmon_show_vddgfx_label(struct device *dev,
2820 struct device_attribute *attr,
2821 char *buf)
2822{
a9ca9bb3 2823 return sysfs_emit(buf, "vddgfx\n");
2bd376bf
AD
2824}
2825
2826static ssize_t amdgpu_hwmon_show_vddnb(struct device *dev,
2827 struct device_attribute *attr,
2828 char *buf)
2829{
2830 struct amdgpu_device *adev = dev_get_drvdata(dev);
2bd376bf
AD
2831 u32 vddnb;
2832 int r, size = sizeof(vddnb);
2833
53b3f8f4 2834 if (amdgpu_in_reset(adev))
48b270bb 2835 return -EPERM;
d2ae842d
AD
2836 if (adev->in_suspend && !adev->in_runpm)
2837 return -EPERM;
48b270bb 2838
2bd376bf 2839 /* only APUs have vddnb */
ccf9ef0b 2840 if (!(adev->flags & AMD_IS_APU))
2bd376bf
AD
2841 return -EINVAL;
2842
4a580877 2843 r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
66429300 2844 if (r < 0) {
4a580877 2845 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
b9a9294b 2846 return r;
66429300 2847 }
2bd376bf 2848
2bd376bf
AD
2849 /* get the voltage */
2850 r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VDDNB,
2851 (void *)&vddnb, &size);
b9a9294b 2852
4a580877
LT
2853 pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
2854 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
b9a9294b 2855
2bd376bf
AD
2856 if (r)
2857 return r;
2858
a9ca9bb3 2859 return sysfs_emit(buf, "%d\n", vddnb);
2bd376bf
AD
2860}
2861
2862static ssize_t amdgpu_hwmon_show_vddnb_label(struct device *dev,
2863 struct device_attribute *attr,
2864 char *buf)
2865{
a9ca9bb3 2866 return sysfs_emit(buf, "vddnb\n");
2bd376bf
AD
2867}
2868
2976fc26
AD
2869static ssize_t amdgpu_hwmon_show_power_avg(struct device *dev,
2870 struct device_attribute *attr,
2871 char *buf)
2872{
2873 struct amdgpu_device *adev = dev_get_drvdata(dev);
5b79d048
RZ
2874 u32 query = 0;
2875 int r, size = sizeof(u32);
2976fc26
AD
2876 unsigned uw;
2877
53b3f8f4 2878 if (amdgpu_in_reset(adev))
48b270bb 2879 return -EPERM;
d2ae842d
AD
2880 if (adev->in_suspend && !adev->in_runpm)
2881 return -EPERM;
48b270bb 2882
4a580877 2883 r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
66429300 2884 if (r < 0) {
4a580877 2885 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
b9a9294b 2886 return r;
66429300 2887 }
2976fc26 2888
2976fc26
AD
2889 /* get the voltage */
2890 r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GPU_POWER,
2891 (void *)&query, &size);
b9a9294b 2892
4a580877
LT
2893 pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
2894 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
b9a9294b 2895
2976fc26
AD
2896 if (r)
2897 return r;
2898
2899 /* convert to microwatts */
5b79d048 2900 uw = (query >> 8) * 1000000 + (query & 0xff) * 1000;
2976fc26 2901
a9ca9bb3 2902 return sysfs_emit(buf, "%u\n", uw);
2976fc26
AD
2903}
2904
8d81bce7
RZ
2905static ssize_t amdgpu_hwmon_show_power_cap_min(struct device *dev,
2906 struct device_attribute *attr,
2907 char *buf)
2908{
fdf8eea5 2909 return sysfs_emit(buf, "%i\n", 0);
8d81bce7
RZ
2910}
2911
91161b06
DP
2912
2913static ssize_t amdgpu_hwmon_show_power_cap_generic(struct device *dev,
2914 struct device_attribute *attr,
2915 char *buf,
2916 enum pp_power_limit_level pp_limit_level)
8d81bce7
RZ
2917{
2918 struct amdgpu_device *adev = dev_get_drvdata(dev);
8dfc8c53 2919 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
a40a020d
DP
2920 enum pp_power_type power_type = to_sensor_dev_attr(attr)->index;
2921 uint32_t limit;
b9a9294b
AD
2922 ssize_t size;
2923 int r;
2924
53b3f8f4 2925 if (amdgpu_in_reset(adev))
48b270bb 2926 return -EPERM;
d2ae842d
AD
2927 if (adev->in_suspend && !adev->in_runpm)
2928 return -EPERM;
48b270bb 2929
91161b06
DP
2930 if ( !(pp_funcs && pp_funcs->get_power_limit))
2931 return -ENODATA;
2932
4a580877 2933 r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
66429300 2934 if (r < 0) {
4a580877 2935 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
b9a9294b 2936 return r;
66429300 2937 }
8d81bce7 2938
91161b06
DP
2939 r = pp_funcs->get_power_limit(adev->powerplay.pp_handle, &limit,
2940 pp_limit_level, power_type);
dc2a8240
DP
2941
2942 if (!r)
09b6744c 2943 size = sysfs_emit(buf, "%u\n", limit * 1000000);
dc2a8240 2944 else
09b6744c 2945 size = sysfs_emit(buf, "\n");
b9a9294b 2946
4a580877
LT
2947 pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
2948 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
b9a9294b
AD
2949
2950 return size;
8d81bce7
RZ
2951}
2952
91161b06
DP
2953
2954static ssize_t amdgpu_hwmon_show_power_cap_max(struct device *dev,
8d81bce7
RZ
2955 struct device_attribute *attr,
2956 char *buf)
2957{
91161b06 2958 return amdgpu_hwmon_show_power_cap_generic(dev, attr, buf, PP_PWR_LIMIT_MAX);
dc2a8240 2959
91161b06 2960}
b9a9294b 2961
91161b06
DP
2962static ssize_t amdgpu_hwmon_show_power_cap(struct device *dev,
2963 struct device_attribute *attr,
2964 char *buf)
2965{
2966 return amdgpu_hwmon_show_power_cap_generic(dev, attr, buf, PP_PWR_LIMIT_CURRENT);
b9a9294b 2967
8d81bce7
RZ
2968}
2969
6e58941c
EH
2970static ssize_t amdgpu_hwmon_show_power_cap_default(struct device *dev,
2971 struct device_attribute *attr,
2972 char *buf)
2973{
91161b06 2974 return amdgpu_hwmon_show_power_cap_generic(dev, attr, buf, PP_PWR_LIMIT_DEFAULT);
6e58941c 2975
6e58941c 2976}
91161b06 2977
ae07970a
XH
2978static ssize_t amdgpu_hwmon_show_power_label(struct device *dev,
2979 struct device_attribute *attr,
2980 char *buf)
2981{
2982 int limit_type = to_sensor_dev_attr(attr)->index;
2983
a9ca9bb3 2984 return sysfs_emit(buf, "%s\n",
ae07970a
XH
2985 limit_type == SMU_FAST_PPT_LIMIT ? "fastPPT" : "slowPPT");
2986}
8d81bce7
RZ
2987
2988static ssize_t amdgpu_hwmon_set_power_cap(struct device *dev,
2989 struct device_attribute *attr,
2990 const char *buf,
2991 size_t count)
2992{
2993 struct amdgpu_device *adev = dev_get_drvdata(dev);
8dfc8c53 2994 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
ae07970a 2995 int limit_type = to_sensor_dev_attr(attr)->index;
8d81bce7
RZ
2996 int err;
2997 u32 value;
2998
53b3f8f4 2999 if (amdgpu_in_reset(adev))
48b270bb 3000 return -EPERM;
d2ae842d
AD
3001 if (adev->in_suspend && !adev->in_runpm)
3002 return -EPERM;
48b270bb 3003
c9ffa427
YT
3004 if (amdgpu_sriov_vf(adev))
3005 return -EINVAL;
3006
8d81bce7
RZ
3007 err = kstrtou32(buf, 10, &value);
3008 if (err)
3009 return err;
3010
3011 value = value / 1000000; /* convert to Watt */
ae07970a 3012 value |= limit_type << 24;
b9a9294b 3013
4a580877 3014 err = pm_runtime_get_sync(adev_to_drm(adev)->dev);
66429300 3015 if (err < 0) {
4a580877 3016 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
b9a9294b 3017 return err;
66429300 3018 }
b9a9294b 3019
8dfc8c53
DP
3020 if (pp_funcs && pp_funcs->set_power_limit)
3021 err = pp_funcs->set_power_limit(adev->powerplay.pp_handle, value);
b9a9294b 3022 else
fcd90fee 3023 err = -EINVAL;
b9a9294b 3024
4a580877
LT
3025 pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
3026 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
8d81bce7 3027
fcd90fee
EQ
3028 if (err)
3029 return err;
3030
8d81bce7
RZ
3031 return count;
3032}
3033
d0948af7
AD
3034static ssize_t amdgpu_hwmon_show_sclk(struct device *dev,
3035 struct device_attribute *attr,
3036 char *buf)
3037{
3038 struct amdgpu_device *adev = dev_get_drvdata(dev);
d0948af7
AD
3039 uint32_t sclk;
3040 int r, size = sizeof(sclk);
3041
53b3f8f4 3042 if (amdgpu_in_reset(adev))
48b270bb 3043 return -EPERM;
d2ae842d
AD
3044 if (adev->in_suspend && !adev->in_runpm)
3045 return -EPERM;
48b270bb 3046
4a580877 3047 r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
66429300 3048 if (r < 0) {
4a580877 3049 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
b9a9294b 3050 return r;
66429300 3051 }
d0948af7 3052
d0948af7
AD
3053 /* get the sclk */
3054 r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GFX_SCLK,
3055 (void *)&sclk, &size);
b9a9294b 3056
4a580877
LT
3057 pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
3058 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
b9a9294b 3059
d0948af7
AD
3060 if (r)
3061 return r;
3062
a9ca9bb3 3063 return sysfs_emit(buf, "%u\n", sclk * 10 * 1000);
d0948af7
AD
3064}
3065
3066static ssize_t amdgpu_hwmon_show_sclk_label(struct device *dev,
3067 struct device_attribute *attr,
3068 char *buf)
3069{
a9ca9bb3 3070 return sysfs_emit(buf, "sclk\n");
d0948af7
AD
3071}
3072
3073static ssize_t amdgpu_hwmon_show_mclk(struct device *dev,
3074 struct device_attribute *attr,
3075 char *buf)
3076{
3077 struct amdgpu_device *adev = dev_get_drvdata(dev);
d0948af7
AD
3078 uint32_t mclk;
3079 int r, size = sizeof(mclk);
3080
53b3f8f4 3081 if (amdgpu_in_reset(adev))
48b270bb 3082 return -EPERM;
d2ae842d
AD
3083 if (adev->in_suspend && !adev->in_runpm)
3084 return -EPERM;
48b270bb 3085
4a580877 3086 r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
66429300 3087 if (r < 0) {
4a580877 3088 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
b9a9294b 3089 return r;
66429300 3090 }
d0948af7 3091
d0948af7
AD
3092 /* get the sclk */
3093 r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GFX_MCLK,
3094 (void *)&mclk, &size);
b9a9294b 3095
4a580877
LT
3096 pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
3097 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
b9a9294b 3098
d0948af7
AD
3099 if (r)
3100 return r;
3101
a9ca9bb3 3102 return sysfs_emit(buf, "%u\n", mclk * 10 * 1000);
d0948af7
AD
3103}
3104
3105static ssize_t amdgpu_hwmon_show_mclk_label(struct device *dev,
3106 struct device_attribute *attr,
3107 char *buf)
3108{
a9ca9bb3 3109 return sysfs_emit(buf, "mclk\n");
d0948af7 3110}
844c5419
AD
3111
3112/**
3113 * DOC: hwmon
3114 *
3115 * The amdgpu driver exposes the following sensor interfaces:
dc85db25 3116 *
844c5419 3117 * - GPU temperature (via the on-die sensor)
dc85db25 3118 *
844c5419 3119 * - GPU voltage
dc85db25 3120 *
844c5419 3121 * - Northbridge voltage (APUs only)
dc85db25 3122 *
844c5419 3123 * - GPU power
dc85db25 3124 *
844c5419
AD
3125 * - GPU fan
3126 *
d0948af7
AD
3127 * - GPU gfx/compute engine clock
3128 *
3129 * - GPU memory clock (dGPU only)
3130 *
844c5419 3131 * hwmon interfaces for GPU temperature:
dc85db25 3132 *
a34d1166
EQ
3133 * - temp[1-3]_input: the on die GPU temperature in millidegrees Celsius
3134 * - temp2_input and temp3_input are supported on SOC15 dGPUs only
dc85db25 3135 *
2adc1156
EQ
3136 * - temp[1-3]_label: temperature channel label
3137 * - temp2_label and temp3_label are supported on SOC15 dGPUs only
3138 *
437ccd17
EQ
3139 * - temp[1-3]_crit: temperature critical max value in millidegrees Celsius
3140 * - temp2_crit and temp3_crit are supported on SOC15 dGPUs only
dc85db25 3141 *
437ccd17
EQ
3142 * - temp[1-3]_crit_hyst: temperature hysteresis for critical limit in millidegrees Celsius
3143 * - temp2_crit_hyst and temp3_crit_hyst are supported on SOC15 dGPUs only
844c5419 3144 *
901cb599
EQ
3145 * - temp[1-3]_emergency: temperature emergency max value(asic shutdown) in millidegrees Celsius
3146 * - these are supported on SOC15 dGPUs only
3147 *
844c5419 3148 * hwmon interfaces for GPU voltage:
dc85db25 3149 *
844c5419 3150 * - in0_input: the voltage on the GPU in millivolts
dc85db25 3151 *
844c5419
AD
3152 * - in1_input: the voltage on the Northbridge in millivolts
3153 *
3154 * hwmon interfaces for GPU power:
dc85db25 3155 *
844c5419 3156 * - power1_average: average power used by the GPU in microWatts
dc85db25 3157 *
844c5419 3158 * - power1_cap_min: minimum cap supported in microWatts
dc85db25 3159 *
844c5419 3160 * - power1_cap_max: maximum cap supported in microWatts
dc85db25 3161 *
844c5419
AD
3162 * - power1_cap: selected power cap in microWatts
3163 *
3164 * hwmon interfaces for GPU fan:
dc85db25 3165 *
844c5419 3166 * - pwm1: pulse width modulation fan level (0-255)
dc85db25
AD
3167 *
3168 * - pwm1_enable: pulse width modulation fan control method (0: no fan speed control, 1: manual fan speed control using pwm interface, 2: automatic fan speed control)
3169 *
844c5419 3170 * - pwm1_min: pulse width modulation fan control minimum level (0)
dc85db25 3171 *
844c5419 3172 * - pwm1_max: pulse width modulation fan control maximum level (255)
dc85db25 3173 *
e5527d8c 3174 * - fan1_min: a minimum value Unit: revolution/min (RPM)
c2870527 3175 *
e5527d8c 3176 * - fan1_max: a maximum value Unit: revolution/max (RPM)
c2870527 3177 *
844c5419
AD
3178 * - fan1_input: fan speed in RPM
3179 *
879e723d 3180 * - fan[1-\*]_target: Desired fan speed Unit: revolution/min (RPM)
c2870527 3181 *
879e723d 3182 * - fan[1-\*]_enable: Enable or disable the sensors.1: Enable 0: Disable
c2870527 3183 *
96401f7c
EQ
3184 * NOTE: DO NOT set the fan speed via "pwm1" and "fan[1-\*]_target" interfaces at the same time.
3185 * That will get the former one overridden.
3186 *
d0948af7
AD
3187 * hwmon interfaces for GPU clocks:
3188 *
3189 * - freq1_input: the gfx/compute clock in hertz
3190 *
3191 * - freq2_input: the memory clock in hertz
3192 *
844c5419
AD
3193 * You can use hwmon tools like sensors to view this information on your system.
3194 *
3195 */
3196
a34d1166 3197static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, amdgpu_hwmon_show_temp, NULL, PP_TEMP_EDGE);
d38ceaf9
AD
3198static SENSOR_DEVICE_ATTR(temp1_crit, S_IRUGO, amdgpu_hwmon_show_temp_thresh, NULL, 0);
3199static SENSOR_DEVICE_ATTR(temp1_crit_hyst, S_IRUGO, amdgpu_hwmon_show_temp_thresh, NULL, 1);
901cb599 3200static SENSOR_DEVICE_ATTR(temp1_emergency, S_IRUGO, amdgpu_hwmon_show_temp_emergency, NULL, PP_TEMP_EDGE);
a34d1166 3201static SENSOR_DEVICE_ATTR(temp2_input, S_IRUGO, amdgpu_hwmon_show_temp, NULL, PP_TEMP_JUNCTION);
437ccd17
EQ
3202static SENSOR_DEVICE_ATTR(temp2_crit, S_IRUGO, amdgpu_hwmon_show_hotspot_temp_thresh, NULL, 0);
3203static SENSOR_DEVICE_ATTR(temp2_crit_hyst, S_IRUGO, amdgpu_hwmon_show_hotspot_temp_thresh, NULL, 1);
901cb599 3204static SENSOR_DEVICE_ATTR(temp2_emergency, S_IRUGO, amdgpu_hwmon_show_temp_emergency, NULL, PP_TEMP_JUNCTION);
a34d1166 3205static SENSOR_DEVICE_ATTR(temp3_input, S_IRUGO, amdgpu_hwmon_show_temp, NULL, PP_TEMP_MEM);
437ccd17
EQ
3206static SENSOR_DEVICE_ATTR(temp3_crit, S_IRUGO, amdgpu_hwmon_show_mem_temp_thresh, NULL, 0);
3207static SENSOR_DEVICE_ATTR(temp3_crit_hyst, S_IRUGO, amdgpu_hwmon_show_mem_temp_thresh, NULL, 1);
901cb599 3208static SENSOR_DEVICE_ATTR(temp3_emergency, S_IRUGO, amdgpu_hwmon_show_temp_emergency, NULL, PP_TEMP_MEM);
2adc1156
EQ
3209static SENSOR_DEVICE_ATTR(temp1_label, S_IRUGO, amdgpu_hwmon_show_temp_label, NULL, PP_TEMP_EDGE);
3210static SENSOR_DEVICE_ATTR(temp2_label, S_IRUGO, amdgpu_hwmon_show_temp_label, NULL, PP_TEMP_JUNCTION);
3211static SENSOR_DEVICE_ATTR(temp3_label, S_IRUGO, amdgpu_hwmon_show_temp_label, NULL, PP_TEMP_MEM);
d38ceaf9
AD
3212static SENSOR_DEVICE_ATTR(pwm1, S_IRUGO | S_IWUSR, amdgpu_hwmon_get_pwm1, amdgpu_hwmon_set_pwm1, 0);
3213static SENSOR_DEVICE_ATTR(pwm1_enable, S_IRUGO | S_IWUSR, amdgpu_hwmon_get_pwm1_enable, amdgpu_hwmon_set_pwm1_enable, 0);
3214static SENSOR_DEVICE_ATTR(pwm1_min, S_IRUGO, amdgpu_hwmon_get_pwm1_min, NULL, 0);
3215static SENSOR_DEVICE_ATTR(pwm1_max, S_IRUGO, amdgpu_hwmon_get_pwm1_max, NULL, 0);
81c1514b 3216static SENSOR_DEVICE_ATTR(fan1_input, S_IRUGO, amdgpu_hwmon_get_fan1_input, NULL, 0);
c2870527
RZ
3217static SENSOR_DEVICE_ATTR(fan1_min, S_IRUGO, amdgpu_hwmon_get_fan1_min, NULL, 0);
3218static SENSOR_DEVICE_ATTR(fan1_max, S_IRUGO, amdgpu_hwmon_get_fan1_max, NULL, 0);
3219static SENSOR_DEVICE_ATTR(fan1_target, S_IRUGO | S_IWUSR, amdgpu_hwmon_get_fan1_target, amdgpu_hwmon_set_fan1_target, 0);
3220static SENSOR_DEVICE_ATTR(fan1_enable, S_IRUGO | S_IWUSR, amdgpu_hwmon_get_fan1_enable, amdgpu_hwmon_set_fan1_enable, 0);
2bd376bf
AD
3221static SENSOR_DEVICE_ATTR(in0_input, S_IRUGO, amdgpu_hwmon_show_vddgfx, NULL, 0);
3222static SENSOR_DEVICE_ATTR(in0_label, S_IRUGO, amdgpu_hwmon_show_vddgfx_label, NULL, 0);
3223static SENSOR_DEVICE_ATTR(in1_input, S_IRUGO, amdgpu_hwmon_show_vddnb, NULL, 0);
3224static SENSOR_DEVICE_ATTR(in1_label, S_IRUGO, amdgpu_hwmon_show_vddnb_label, NULL, 0);
2976fc26 3225static SENSOR_DEVICE_ATTR(power1_average, S_IRUGO, amdgpu_hwmon_show_power_avg, NULL, 0);
8d81bce7
RZ
3226static SENSOR_DEVICE_ATTR(power1_cap_max, S_IRUGO, amdgpu_hwmon_show_power_cap_max, NULL, 0);
3227static SENSOR_DEVICE_ATTR(power1_cap_min, S_IRUGO, amdgpu_hwmon_show_power_cap_min, NULL, 0);
3228static SENSOR_DEVICE_ATTR(power1_cap, S_IRUGO | S_IWUSR, amdgpu_hwmon_show_power_cap, amdgpu_hwmon_set_power_cap, 0);
6e58941c 3229static SENSOR_DEVICE_ATTR(power1_cap_default, S_IRUGO, amdgpu_hwmon_show_power_cap_default, NULL, 0);
ae07970a
XH
3230static SENSOR_DEVICE_ATTR(power1_label, S_IRUGO, amdgpu_hwmon_show_power_label, NULL, 0);
3231static SENSOR_DEVICE_ATTR(power2_average, S_IRUGO, amdgpu_hwmon_show_power_avg, NULL, 1);
3232static SENSOR_DEVICE_ATTR(power2_cap_max, S_IRUGO, amdgpu_hwmon_show_power_cap_max, NULL, 1);
3233static SENSOR_DEVICE_ATTR(power2_cap_min, S_IRUGO, amdgpu_hwmon_show_power_cap_min, NULL, 1);
3234static SENSOR_DEVICE_ATTR(power2_cap, S_IRUGO | S_IWUSR, amdgpu_hwmon_show_power_cap, amdgpu_hwmon_set_power_cap, 1);
6e58941c 3235static SENSOR_DEVICE_ATTR(power2_cap_default, S_IRUGO, amdgpu_hwmon_show_power_cap_default, NULL, 1);
ae07970a 3236static SENSOR_DEVICE_ATTR(power2_label, S_IRUGO, amdgpu_hwmon_show_power_label, NULL, 1);
d0948af7
AD
3237static SENSOR_DEVICE_ATTR(freq1_input, S_IRUGO, amdgpu_hwmon_show_sclk, NULL, 0);
3238static SENSOR_DEVICE_ATTR(freq1_label, S_IRUGO, amdgpu_hwmon_show_sclk_label, NULL, 0);
3239static SENSOR_DEVICE_ATTR(freq2_input, S_IRUGO, amdgpu_hwmon_show_mclk, NULL, 0);
3240static SENSOR_DEVICE_ATTR(freq2_label, S_IRUGO, amdgpu_hwmon_show_mclk_label, NULL, 0);
d38ceaf9
AD
3241
3242static struct attribute *hwmon_attributes[] = {
3243 &sensor_dev_attr_temp1_input.dev_attr.attr,
3244 &sensor_dev_attr_temp1_crit.dev_attr.attr,
3245 &sensor_dev_attr_temp1_crit_hyst.dev_attr.attr,
a34d1166 3246 &sensor_dev_attr_temp2_input.dev_attr.attr,
437ccd17
EQ
3247 &sensor_dev_attr_temp2_crit.dev_attr.attr,
3248 &sensor_dev_attr_temp2_crit_hyst.dev_attr.attr,
a34d1166 3249 &sensor_dev_attr_temp3_input.dev_attr.attr,
437ccd17
EQ
3250 &sensor_dev_attr_temp3_crit.dev_attr.attr,
3251 &sensor_dev_attr_temp3_crit_hyst.dev_attr.attr,
901cb599
EQ
3252 &sensor_dev_attr_temp1_emergency.dev_attr.attr,
3253 &sensor_dev_attr_temp2_emergency.dev_attr.attr,
3254 &sensor_dev_attr_temp3_emergency.dev_attr.attr,
2adc1156
EQ
3255 &sensor_dev_attr_temp1_label.dev_attr.attr,
3256 &sensor_dev_attr_temp2_label.dev_attr.attr,
3257 &sensor_dev_attr_temp3_label.dev_attr.attr,
d38ceaf9
AD
3258 &sensor_dev_attr_pwm1.dev_attr.attr,
3259 &sensor_dev_attr_pwm1_enable.dev_attr.attr,
3260 &sensor_dev_attr_pwm1_min.dev_attr.attr,
3261 &sensor_dev_attr_pwm1_max.dev_attr.attr,
81c1514b 3262 &sensor_dev_attr_fan1_input.dev_attr.attr,
c2870527
RZ
3263 &sensor_dev_attr_fan1_min.dev_attr.attr,
3264 &sensor_dev_attr_fan1_max.dev_attr.attr,
3265 &sensor_dev_attr_fan1_target.dev_attr.attr,
3266 &sensor_dev_attr_fan1_enable.dev_attr.attr,
2bd376bf
AD
3267 &sensor_dev_attr_in0_input.dev_attr.attr,
3268 &sensor_dev_attr_in0_label.dev_attr.attr,
3269 &sensor_dev_attr_in1_input.dev_attr.attr,
3270 &sensor_dev_attr_in1_label.dev_attr.attr,
2976fc26 3271 &sensor_dev_attr_power1_average.dev_attr.attr,
8d81bce7
RZ
3272 &sensor_dev_attr_power1_cap_max.dev_attr.attr,
3273 &sensor_dev_attr_power1_cap_min.dev_attr.attr,
3274 &sensor_dev_attr_power1_cap.dev_attr.attr,
6e58941c 3275 &sensor_dev_attr_power1_cap_default.dev_attr.attr,
ae07970a
XH
3276 &sensor_dev_attr_power1_label.dev_attr.attr,
3277 &sensor_dev_attr_power2_average.dev_attr.attr,
3278 &sensor_dev_attr_power2_cap_max.dev_attr.attr,
3279 &sensor_dev_attr_power2_cap_min.dev_attr.attr,
3280 &sensor_dev_attr_power2_cap.dev_attr.attr,
6e58941c 3281 &sensor_dev_attr_power2_cap_default.dev_attr.attr,
ae07970a 3282 &sensor_dev_attr_power2_label.dev_attr.attr,
d0948af7
AD
3283 &sensor_dev_attr_freq1_input.dev_attr.attr,
3284 &sensor_dev_attr_freq1_label.dev_attr.attr,
3285 &sensor_dev_attr_freq2_input.dev_attr.attr,
3286 &sensor_dev_attr_freq2_label.dev_attr.attr,
d38ceaf9
AD
3287 NULL
3288};
3289
3290static umode_t hwmon_attributes_visible(struct kobject *kobj,
3291 struct attribute *attr, int index)
3292{
cc29ec87 3293 struct device *dev = kobj_to_dev(kobj);
d38ceaf9
AD
3294 struct amdgpu_device *adev = dev_get_drvdata(dev);
3295 umode_t effective_mode = attr->mode;
3296
c9ffa427
YT
3297 /* under multi-vf mode, the hwmon attributes are all not supported */
3298 if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev))
3299 return 0;
3300
3301 /* there is no fan under pp one vf mode */
3302 if (amdgpu_sriov_is_pp_one_vf(adev) &&
3303 (attr == &sensor_dev_attr_pwm1.dev_attr.attr ||
3304 attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr ||
3305 attr == &sensor_dev_attr_pwm1_max.dev_attr.attr ||
3306 attr == &sensor_dev_attr_pwm1_min.dev_attr.attr ||
3307 attr == &sensor_dev_attr_fan1_input.dev_attr.attr ||
3308 attr == &sensor_dev_attr_fan1_min.dev_attr.attr ||
3309 attr == &sensor_dev_attr_fan1_max.dev_attr.attr ||
3310 attr == &sensor_dev_attr_fan1_target.dev_attr.attr ||
3311 attr == &sensor_dev_attr_fan1_enable.dev_attr.attr))
3312 return 0;
3313
fc5a136d
RZ
3314 /* Skip fan attributes if fan is not present */
3315 if (adev->pm.no_fan && (attr == &sensor_dev_attr_pwm1.dev_attr.attr ||
3316 attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr ||
3317 attr == &sensor_dev_attr_pwm1_max.dev_attr.attr ||
3318 attr == &sensor_dev_attr_pwm1_min.dev_attr.attr ||
c2870527
RZ
3319 attr == &sensor_dev_attr_fan1_input.dev_attr.attr ||
3320 attr == &sensor_dev_attr_fan1_min.dev_attr.attr ||
3321 attr == &sensor_dev_attr_fan1_max.dev_attr.attr ||
3322 attr == &sensor_dev_attr_fan1_target.dev_attr.attr ||
3323 attr == &sensor_dev_attr_fan1_enable.dev_attr.attr))
fc5a136d 3324 return 0;
135f9711 3325
20a96cd3
AD
3326 /* Skip fan attributes on APU */
3327 if ((adev->flags & AMD_IS_APU) &&
3328 (attr == &sensor_dev_attr_pwm1.dev_attr.attr ||
3329 attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr ||
3330 attr == &sensor_dev_attr_pwm1_max.dev_attr.attr ||
3331 attr == &sensor_dev_attr_pwm1_min.dev_attr.attr ||
3332 attr == &sensor_dev_attr_fan1_input.dev_attr.attr ||
3333 attr == &sensor_dev_attr_fan1_min.dev_attr.attr ||
3334 attr == &sensor_dev_attr_fan1_max.dev_attr.attr ||
3335 attr == &sensor_dev_attr_fan1_target.dev_attr.attr ||
3336 attr == &sensor_dev_attr_fan1_enable.dev_attr.attr))
3337 return 0;
3338
35dab589
HR
3339 /* Skip crit temp on APU */
3340 if ((adev->flags & AMD_IS_APU) && (adev->family >= AMDGPU_FAMILY_CZ) &&
3341 (attr == &sensor_dev_attr_temp1_crit.dev_attr.attr ||
3342 attr == &sensor_dev_attr_temp1_crit_hyst.dev_attr.attr))
3343 return 0;
3344
1b5708ff 3345 /* Skip limit attributes if DPM is not enabled */
d38ceaf9
AD
3346 if (!adev->pm.dpm_enabled &&
3347 (attr == &sensor_dev_attr_temp1_crit.dev_attr.attr ||
27100735
AD
3348 attr == &sensor_dev_attr_temp1_crit_hyst.dev_attr.attr ||
3349 attr == &sensor_dev_attr_pwm1.dev_attr.attr ||
3350 attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr ||
3351 attr == &sensor_dev_attr_pwm1_max.dev_attr.attr ||
c2870527
RZ
3352 attr == &sensor_dev_attr_pwm1_min.dev_attr.attr ||
3353 attr == &sensor_dev_attr_fan1_input.dev_attr.attr ||
3354 attr == &sensor_dev_attr_fan1_min.dev_attr.attr ||
3355 attr == &sensor_dev_attr_fan1_max.dev_attr.attr ||
3356 attr == &sensor_dev_attr_fan1_target.dev_attr.attr ||
3357 attr == &sensor_dev_attr_fan1_enable.dev_attr.attr))
d38ceaf9
AD
3358 return 0;
3359
239873fc
KW
3360 if (!is_support_sw_smu(adev)) {
3361 /* mask fan attributes if we have no bindings for this asic to expose */
0d8318e1 3362 if ((!adev->powerplay.pp_funcs->get_fan_speed_pwm &&
239873fc
KW
3363 attr == &sensor_dev_attr_pwm1.dev_attr.attr) || /* can't query fan */
3364 (!adev->powerplay.pp_funcs->get_fan_control_mode &&
3365 attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr)) /* can't query state */
3366 effective_mode &= ~S_IRUGO;
3367
0d8318e1 3368 if ((!adev->powerplay.pp_funcs->set_fan_speed_pwm &&
239873fc
KW
3369 attr == &sensor_dev_attr_pwm1.dev_attr.attr) || /* can't manage fan */
3370 (!adev->powerplay.pp_funcs->set_fan_control_mode &&
3371 attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr)) /* can't manage state */
3372 effective_mode &= ~S_IWUSR;
3373 }
d38ceaf9 3374
ae07970a
XH
3375 if (((adev->family == AMDGPU_FAMILY_SI) ||
3376 ((adev->flags & AMD_IS_APU) &&
3377 (adev->asic_type != CHIP_VANGOGH))) && /* not implemented yet */
367deb67 3378 (attr == &sensor_dev_attr_power1_cap_max.dev_attr.attr ||
8d81bce7 3379 attr == &sensor_dev_attr_power1_cap_min.dev_attr.attr||
6e58941c
EH
3380 attr == &sensor_dev_attr_power1_cap.dev_attr.attr ||
3381 attr == &sensor_dev_attr_power1_cap_default.dev_attr.attr))
8d81bce7
RZ
3382 return 0;
3383
367deb67
AD
3384 if (((adev->family == AMDGPU_FAMILY_SI) ||
3385 ((adev->flags & AMD_IS_APU) &&
3386 (adev->asic_type < CHIP_RENOIR))) && /* not implemented yet */
3387 (attr == &sensor_dev_attr_power1_average.dev_attr.attr))
3388 return 0;
3389
239873fc
KW
3390 if (!is_support_sw_smu(adev)) {
3391 /* hide max/min values if we can't both query and manage the fan */
0d8318e1
EQ
3392 if ((!adev->powerplay.pp_funcs->set_fan_speed_pwm &&
3393 !adev->powerplay.pp_funcs->get_fan_speed_pwm) &&
239873fc
KW
3394 (!adev->powerplay.pp_funcs->set_fan_speed_rpm &&
3395 !adev->powerplay.pp_funcs->get_fan_speed_rpm) &&
3396 (attr == &sensor_dev_attr_pwm1_max.dev_attr.attr ||
3397 attr == &sensor_dev_attr_pwm1_min.dev_attr.attr))
3398 return 0;
3399
3400 if ((!adev->powerplay.pp_funcs->set_fan_speed_rpm &&
3401 !adev->powerplay.pp_funcs->get_fan_speed_rpm) &&
3402 (attr == &sensor_dev_attr_fan1_max.dev_attr.attr ||
3403 attr == &sensor_dev_attr_fan1_min.dev_attr.attr))
3404 return 0;
3405 }
c2870527 3406
1cdd229b
JD
3407 if ((adev->family == AMDGPU_FAMILY_SI || /* not implemented yet */
3408 adev->family == AMDGPU_FAMILY_KV) && /* not implemented yet */
3409 (attr == &sensor_dev_attr_in0_input.dev_attr.attr ||
3410 attr == &sensor_dev_attr_in0_label.dev_attr.attr))
3411 return 0;
3412
0d35bc78
AD
3413 /* only APUs have vddnb */
3414 if (!(adev->flags & AMD_IS_APU) &&
3415 (attr == &sensor_dev_attr_in1_input.dev_attr.attr ||
3416 attr == &sensor_dev_attr_in1_label.dev_attr.attr))
81c1514b
GI
3417 return 0;
3418
d0948af7
AD
3419 /* no mclk on APUs */
3420 if ((adev->flags & AMD_IS_APU) &&
3421 (attr == &sensor_dev_attr_freq2_input.dev_attr.attr ||
3422 attr == &sensor_dev_attr_freq2_label.dev_attr.attr))
3423 return 0;
3424
437ccd17
EQ
3425 /* only SOC15 dGPUs support hotspot and mem temperatures */
3426 if (((adev->flags & AMD_IS_APU) ||
3427 adev->asic_type < CHIP_VEGA10) &&
3428 (attr == &sensor_dev_attr_temp2_crit.dev_attr.attr ||
3429 attr == &sensor_dev_attr_temp2_crit_hyst.dev_attr.attr ||
3430 attr == &sensor_dev_attr_temp3_crit.dev_attr.attr ||
901cb599
EQ
3431 attr == &sensor_dev_attr_temp3_crit_hyst.dev_attr.attr ||
3432 attr == &sensor_dev_attr_temp1_emergency.dev_attr.attr ||
3433 attr == &sensor_dev_attr_temp2_emergency.dev_attr.attr ||
a34d1166
EQ
3434 attr == &sensor_dev_attr_temp3_emergency.dev_attr.attr ||
3435 attr == &sensor_dev_attr_temp2_input.dev_attr.attr ||
2adc1156
EQ
3436 attr == &sensor_dev_attr_temp3_input.dev_attr.attr ||
3437 attr == &sensor_dev_attr_temp2_label.dev_attr.attr ||
3438 attr == &sensor_dev_attr_temp3_label.dev_attr.attr))
437ccd17
EQ
3439 return 0;
3440
ae07970a
XH
3441 /* only Vangogh has fast PPT limit and power labels */
3442 if (!(adev->asic_type == CHIP_VANGOGH) &&
3443 (attr == &sensor_dev_attr_power2_average.dev_attr.attr ||
3444 attr == &sensor_dev_attr_power2_cap_max.dev_attr.attr ||
3445 attr == &sensor_dev_attr_power2_cap_min.dev_attr.attr ||
3446 attr == &sensor_dev_attr_power2_cap.dev_attr.attr ||
6e58941c 3447 attr == &sensor_dev_attr_power2_cap_default.dev_attr.attr ||
ae07970a
XH
3448 attr == &sensor_dev_attr_power2_label.dev_attr.attr ||
3449 attr == &sensor_dev_attr_power1_label.dev_attr.attr))
3450 return 0;
3451
d38ceaf9
AD
3452 return effective_mode;
3453}
3454
3455static const struct attribute_group hwmon_attrgroup = {
3456 .attrs = hwmon_attributes,
3457 .is_visible = hwmon_attributes_visible,
3458};
3459
3460static const struct attribute_group *hwmon_groups[] = {
3461 &hwmon_attrgroup,
3462 NULL
3463};
3464
d38ceaf9
AD
3465int amdgpu_pm_sysfs_init(struct amdgpu_device *adev)
3466{
3467 int ret;
4e01847c 3468 uint32_t mask = 0;
d38ceaf9 3469
c86f5ebf
AD
3470 if (adev->pm.sysfs_initialized)
3471 return 0;
3472
d2f52ac8
RZ
3473 if (adev->pm.dpm_enabled == 0)
3474 return 0;
3475
ba02fd6b
KW
3476 INIT_LIST_HEAD(&adev->pm.pm_attr_list);
3477
d38ceaf9
AD
3478 adev->pm.int_hwmon_dev = hwmon_device_register_with_groups(adev->dev,
3479 DRIVER_NAME, adev,
3480 hwmon_groups);
3481 if (IS_ERR(adev->pm.int_hwmon_dev)) {
3482 ret = PTR_ERR(adev->pm.int_hwmon_dev);
3483 dev_err(adev->dev,
3484 "Unable to register hwmon device: %d\n", ret);
3485 return ret;
3486 }
3487
4e01847c
KW
3488 switch (amdgpu_virt_get_sriov_vf_mode(adev)) {
3489 case SRIOV_VF_MODE_ONE_VF:
3490 mask = ATTR_FLAG_ONEVF;
3491 break;
3492 case SRIOV_VF_MODE_MULTI_VF:
3493 mask = 0;
3494 break;
3495 case SRIOV_VF_MODE_BARE_METAL:
3496 default:
3497 mask = ATTR_FLAG_MASK_ALL;
3498 break;
8efd7275
ML
3499 }
3500
4e01847c
KW
3501 ret = amdgpu_device_attr_create_groups(adev,
3502 amdgpu_device_attrs,
3503 ARRAY_SIZE(amdgpu_device_attrs),
ba02fd6b
KW
3504 mask,
3505 &adev->pm.pm_attr_list);
4e01847c 3506 if (ret)
fb2dbfd2 3507 return ret;
7ca881a8 3508
c86f5ebf
AD
3509 adev->pm.sysfs_initialized = true;
3510
d38ceaf9
AD
3511 return 0;
3512}
3513
3514void amdgpu_pm_sysfs_fini(struct amdgpu_device *adev)
3515{
d2f52ac8
RZ
3516 if (adev->pm.dpm_enabled == 0)
3517 return;
3518
d38ceaf9
AD
3519 if (adev->pm.int_hwmon_dev)
3520 hwmon_device_unregister(adev->pm.int_hwmon_dev);
4e01847c 3521
ba02fd6b 3522 amdgpu_device_attr_remove_groups(adev, &adev->pm.pm_attr_list);
d38ceaf9
AD
3523}
3524
d38ceaf9
AD
3525/*
3526 * Debugfs info
3527 */
3528#if defined(CONFIG_DEBUG_FS)
3529
517cb957
HR
3530static void amdgpu_debugfs_prints_cpu_info(struct seq_file *m,
3531 struct amdgpu_device *adev) {
3532 uint16_t *p_val;
3533 uint32_t size;
3534 int i;
3535
3536 if (is_support_cclk_dpm(adev)) {
4aef0ebc 3537 p_val = kcalloc(adev->smu.cpu_core_num, sizeof(uint16_t),
517cb957
HR
3538 GFP_KERNEL);
3539
3540 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_CPU_CLK,
3541 (void *)p_val, &size)) {
4aef0ebc 3542 for (i = 0; i < adev->smu.cpu_core_num; i++)
517cb957
HR
3543 seq_printf(m, "\t%u MHz (CPU%d)\n",
3544 *(p_val + i), i);
3545 }
3546
3547 kfree(p_val);
3548 }
3549}
3550
3de4ec57
TSD
3551static int amdgpu_debugfs_pm_info_pp(struct seq_file *m, struct amdgpu_device *adev)
3552{
cd7b0c66 3553 uint32_t value;
800c53d6 3554 uint64_t value64 = 0;
5b79d048 3555 uint32_t query = 0;
9f8df7d7 3556 int size;
3de4ec57 3557
3de4ec57 3558 /* GPU Clocks */
9f8df7d7 3559 size = sizeof(value);
3de4ec57 3560 seq_printf(m, "GFX Clocks and Power:\n");
517cb957
HR
3561
3562 amdgpu_debugfs_prints_cpu_info(m, adev);
3563
9f8df7d7 3564 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GFX_MCLK, (void *)&value, &size))
3de4ec57 3565 seq_printf(m, "\t%u MHz (MCLK)\n", value/100);
9f8df7d7 3566 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GFX_SCLK, (void *)&value, &size))
3de4ec57 3567 seq_printf(m, "\t%u MHz (SCLK)\n", value/100);
5ed8d656
RZ
3568 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_STABLE_PSTATE_SCLK, (void *)&value, &size))
3569 seq_printf(m, "\t%u MHz (PSTATE_SCLK)\n", value/100);
3570 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_STABLE_PSTATE_MCLK, (void *)&value, &size))
3571 seq_printf(m, "\t%u MHz (PSTATE_MCLK)\n", value/100);
9f8df7d7 3572 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VDDGFX, (void *)&value, &size))
3de4ec57 3573 seq_printf(m, "\t%u mV (VDDGFX)\n", value);
9f8df7d7 3574 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VDDNB, (void *)&value, &size))
3de4ec57 3575 seq_printf(m, "\t%u mV (VDDNB)\n", value);
5b79d048
RZ
3576 size = sizeof(uint32_t);
3577 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GPU_POWER, (void *)&query, &size))
3578 seq_printf(m, "\t%u.%u W (average GPU)\n", query >> 8, query & 0xff);
9f8df7d7 3579 size = sizeof(value);
3de4ec57
TSD
3580 seq_printf(m, "\n");
3581
3582 /* GPU Temp */
9f8df7d7 3583 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GPU_TEMP, (void *)&value, &size))
3de4ec57
TSD
3584 seq_printf(m, "GPU Temperature: %u C\n", value/1000);
3585
3586 /* GPU Load */
9f8df7d7 3587 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GPU_LOAD, (void *)&value, &size))
3de4ec57 3588 seq_printf(m, "GPU Load: %u %%\n", value);
9b6eb00d
TSD
3589 /* MEM Load */
3590 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_MEM_LOAD, (void *)&value, &size))
3591 seq_printf(m, "MEM Load: %u %%\n", value);
3592
3de4ec57
TSD
3593 seq_printf(m, "\n");
3594
505f8dbb
AD
3595 /* SMC feature mask */
3596 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_ENABLED_SMC_FEATURES_MASK, (void *)&value64, &size))
3597 seq_printf(m, "SMC Feature Mask: 0x%016llx\n", value64);
3598
1f96ecef
EQ
3599 if (adev->asic_type > CHIP_VEGA20) {
3600 /* VCN clocks */
3601 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VCN_POWER_STATE, (void *)&value, &size)) {
3602 if (!value) {
3603 seq_printf(m, "VCN: Disabled\n");
3604 } else {
3605 seq_printf(m, "VCN: Enabled\n");
3606 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_UVD_DCLK, (void *)&value, &size))
3607 seq_printf(m, "\t%u MHz (DCLK)\n", value/100);
3608 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_UVD_VCLK, (void *)&value, &size))
3609 seq_printf(m, "\t%u MHz (VCLK)\n", value/100);
3610 }
3de4ec57 3611 }
1f96ecef
EQ
3612 seq_printf(m, "\n");
3613 } else {
3614 /* UVD clocks */
3615 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_UVD_POWER, (void *)&value, &size)) {
3616 if (!value) {
3617 seq_printf(m, "UVD: Disabled\n");
3618 } else {
3619 seq_printf(m, "UVD: Enabled\n");
3620 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_UVD_DCLK, (void *)&value, &size))
3621 seq_printf(m, "\t%u MHz (DCLK)\n", value/100);
3622 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_UVD_VCLK, (void *)&value, &size))
3623 seq_printf(m, "\t%u MHz (VCLK)\n", value/100);
3624 }
3625 }
3626 seq_printf(m, "\n");
3de4ec57 3627
1f96ecef
EQ
3628 /* VCE clocks */
3629 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VCE_POWER, (void *)&value, &size)) {
3630 if (!value) {
3631 seq_printf(m, "VCE: Disabled\n");
3632 } else {
3633 seq_printf(m, "VCE: Enabled\n");
3634 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VCE_ECCLK, (void *)&value, &size))
3635 seq_printf(m, "\t%u MHz (ECCLK)\n", value/100);
3636 }
3de4ec57
TSD
3637 }
3638 }
3639
3640 return 0;
3641}
3642
a8503b15
HR
3643static void amdgpu_parse_cg_state(struct seq_file *m, u32 flags)
3644{
3645 int i;
3646
3647 for (i = 0; clocks[i].flag; i++)
3648 seq_printf(m, "\t%s: %s\n", clocks[i].name,
3649 (flags & clocks[i].flag) ? "On" : "Off");
3650}
3651
373720f7 3652static int amdgpu_debugfs_pm_info_show(struct seq_file *m, void *unused)
d38ceaf9 3653{
373720f7
ND
3654 struct amdgpu_device *adev = (struct amdgpu_device *)m->private;
3655 struct drm_device *dev = adev_to_drm(adev);
6cb2d4e4 3656 u32 flags = 0;
b9a9294b
AD
3657 int r;
3658
53b3f8f4 3659 if (amdgpu_in_reset(adev))
48b270bb 3660 return -EPERM;
d2ae842d
AD
3661 if (adev->in_suspend && !adev->in_runpm)
3662 return -EPERM;
48b270bb 3663
b9a9294b 3664 r = pm_runtime_get_sync(dev->dev);
66429300
AD
3665 if (r < 0) {
3666 pm_runtime_put_autosuspend(dev->dev);
b9a9294b 3667 return r;
66429300 3668 }
6cb2d4e4 3669
1b5708ff
RZ
3670 if (!adev->pm.dpm_enabled) {
3671 seq_printf(m, "dpm not enabled\n");
b9a9294b
AD
3672 pm_runtime_mark_last_busy(dev->dev);
3673 pm_runtime_put_autosuspend(dev->dev);
1b5708ff
RZ
3674 return 0;
3675 }
b9a9294b
AD
3676
3677 if (!is_support_sw_smu(adev) &&
3678 adev->powerplay.pp_funcs->debugfs_print_current_performance_level) {
d38ceaf9 3679 mutex_lock(&adev->pm.mutex);
cd4d7464
RZ
3680 if (adev->powerplay.pp_funcs->debugfs_print_current_performance_level)
3681 adev->powerplay.pp_funcs->debugfs_print_current_performance_level(adev, m);
d38ceaf9
AD
3682 else
3683 seq_printf(m, "Debugfs support not implemented for this asic\n");
3684 mutex_unlock(&adev->pm.mutex);
b9a9294b 3685 r = 0;
6d07fe7b 3686 } else {
b9a9294b 3687 r = amdgpu_debugfs_pm_info_pp(m, adev);
d38ceaf9 3688 }
81b41ff5
EQ
3689 if (r)
3690 goto out;
3691
81b41ff5 3692 amdgpu_device_ip_get_clockgating_state(adev, &flags);
81b41ff5
EQ
3693
3694 seq_printf(m, "Clock Gating Flags Mask: 0x%x\n", flags);
3695 amdgpu_parse_cg_state(m, flags);
3696 seq_printf(m, "\n");
d38ceaf9 3697
81b41ff5 3698out:
b9a9294b
AD
3699 pm_runtime_mark_last_busy(dev->dev);
3700 pm_runtime_put_autosuspend(dev->dev);
3701
3702 return r;
d38ceaf9
AD
3703}
3704
373720f7
ND
3705DEFINE_SHOW_ATTRIBUTE(amdgpu_debugfs_pm_info);
3706
27ebf21f
LL
3707/*
3708 * amdgpu_pm_priv_buffer_read - Read memory region allocated to FW
3709 *
3710 * Reads debug memory region allocated to PMFW
3711 */
3712static ssize_t amdgpu_pm_prv_buffer_read(struct file *f, char __user *buf,
3713 size_t size, loff_t *pos)
3714{
3715 struct amdgpu_device *adev = file_inode(f)->i_private;
3716 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
3717 void *pp_handle = adev->powerplay.pp_handle;
3718 size_t smu_prv_buf_size;
3719 void *smu_prv_buf;
3720
3721 if (amdgpu_in_reset(adev))
3722 return -EPERM;
3723 if (adev->in_suspend && !adev->in_runpm)
3724 return -EPERM;
3725
3726 if (pp_funcs && pp_funcs->get_smu_prv_buf_details)
3727 pp_funcs->get_smu_prv_buf_details(pp_handle, &smu_prv_buf,
3728 &smu_prv_buf_size);
3729 else
3730 return -ENOSYS;
3731
3732 if (!smu_prv_buf || !smu_prv_buf_size)
3733 return -EINVAL;
3734
3735 return simple_read_from_buffer(buf, size, pos, smu_prv_buf,
3736 smu_prv_buf_size);
3737}
3738
3739static const struct file_operations amdgpu_debugfs_pm_prv_buffer_fops = {
3740 .owner = THIS_MODULE,
3741 .open = simple_open,
3742 .read = amdgpu_pm_prv_buffer_read,
3743 .llseek = default_llseek,
3744};
3745
d38ceaf9
AD
3746#endif
3747
373720f7 3748void amdgpu_debugfs_pm_init(struct amdgpu_device *adev)
d38ceaf9
AD
3749{
3750#if defined(CONFIG_DEBUG_FS)
373720f7
ND
3751 struct drm_minor *minor = adev_to_drm(adev)->primary;
3752 struct dentry *root = minor->debugfs_root;
3753
3754 debugfs_create_file("amdgpu_pm_info", 0444, root, adev,
3755 &amdgpu_debugfs_pm_info_fops);
3756
27ebf21f
LL
3757 if (adev->pm.smu_prv_buffer_size > 0)
3758 debugfs_create_file_size("amdgpu_pm_prv_buffer", 0444, root,
3759 adev,
3760 &amdgpu_debugfs_pm_prv_buffer_fops,
3761 adev->pm.smu_prv_buffer_size);
1f5fc7a5
AG
3762
3763 amdgpu_smu_stb_debug_fs_init(adev);
d38ceaf9
AD
3764#endif
3765}