Commit | Line | Data |
---|---|---|
137d63ab HR |
1 | /* |
2 | * Copyright 2019 Advanced Micro Devices, Inc. | |
3 | * | |
4 | * Permission is hereby granted, free of charge, to any person obtaining a | |
5 | * copy of this software and associated documentation files (the "Software"), | |
6 | * to deal in the Software without restriction, including without limitation | |
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | |
8 | * and/or sell copies of the Software, and to permit persons to whom the | |
9 | * Software is furnished to do so, subject to the following conditions: | |
10 | * | |
11 | * The above copyright notice and this permission notice shall be included in | |
12 | * all copies or substantial portions of the Software. | |
13 | * | |
14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |
15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |
16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | |
17 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR | |
18 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | |
19 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | |
20 | * OTHER DEALINGS IN THE SOFTWARE. | |
21 | */ | |
22 | ||
23 | #include "pp_debug.h" | |
24 | #include <linux/firmware.h> | |
25 | #include <drm/drmP.h> | |
26 | #include "amdgpu.h" | |
27 | #include "amdgpu_smu.h" | |
28 | #include "soc15_common.h" | |
07845526 | 29 | #include "smu_v11_0.h" |
e15da5a4 | 30 | #include "atom.h" |
24e141e1 | 31 | #include "amd_pcie.h" |
137d63ab | 32 | |
4fde03a7 KW |
33 | int smu_get_smc_version(struct smu_context *smu, uint32_t *if_version, uint32_t *smu_version) |
34 | { | |
35 | int ret = 0; | |
36 | ||
37 | if (!if_version && !smu_version) | |
38 | return -EINVAL; | |
39 | ||
40 | if (if_version) { | |
41 | ret = smu_send_smc_msg(smu, SMU_MSG_GetDriverIfVersion); | |
42 | if (ret) | |
43 | return ret; | |
44 | ||
45 | ret = smu_read_smc_arg(smu, if_version); | |
46 | if (ret) | |
47 | return ret; | |
48 | } | |
49 | ||
50 | if (smu_version) { | |
51 | ret = smu_send_smc_msg(smu, SMU_MSG_GetSmuVersion); | |
52 | if (ret) | |
53 | return ret; | |
54 | ||
55 | ret = smu_read_smc_arg(smu, smu_version); | |
56 | if (ret) | |
57 | return ret; | |
58 | } | |
59 | ||
60 | return ret; | |
61 | } | |
62 | ||
0d7cbd28 KW |
63 | int smu_set_soft_freq_range(struct smu_context *smu, enum smu_clk_type clk_type, |
64 | uint32_t min, uint32_t max) | |
65 | { | |
66 | int ret = 0, clk_id = 0; | |
67 | uint32_t param; | |
68 | ||
69 | if (min <= 0 && max <= 0) | |
70 | return -EINVAL; | |
71 | ||
72 | clk_id = smu_clk_get_index(smu, clk_type); | |
73 | if (clk_id < 0) | |
74 | return clk_id; | |
75 | ||
76 | if (max > 0) { | |
77 | param = (uint32_t)((clk_id << 16) | (max & 0xffff)); | |
78 | ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxByFreq, | |
79 | param); | |
80 | if (ret) | |
81 | return ret; | |
82 | } | |
83 | ||
84 | if (min > 0) { | |
85 | param = (uint32_t)((clk_id << 16) | (min & 0xffff)); | |
86 | ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMinByFreq, | |
87 | param); | |
88 | if (ret) | |
89 | return ret; | |
90 | } | |
91 | ||
92 | ||
93 | return ret; | |
94 | } | |
95 | ||
33665617 KW |
96 | int smu_set_hard_freq_range(struct smu_context *smu, enum smu_clk_type clk_type, |
97 | uint32_t min, uint32_t max) | |
98 | { | |
99 | int ret = 0, clk_id = 0; | |
100 | uint32_t param; | |
101 | ||
102 | if (min <= 0 && max <= 0) | |
103 | return -EINVAL; | |
104 | ||
105 | clk_id = smu_clk_get_index(smu, clk_type); | |
106 | if (clk_id < 0) | |
107 | return clk_id; | |
108 | ||
109 | if (max > 0) { | |
110 | param = (uint32_t)((clk_id << 16) | (max & 0xffff)); | |
111 | ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetHardMaxByFreq, | |
112 | param); | |
113 | if (ret) | |
114 | return ret; | |
115 | } | |
116 | ||
117 | if (min > 0) { | |
118 | param = (uint32_t)((clk_id << 16) | (min & 0xffff)); | |
119 | ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinByFreq, | |
120 | param); | |
121 | if (ret) | |
122 | return ret; | |
123 | } | |
124 | ||
125 | ||
126 | return ret; | |
127 | } | |
128 | ||
8b3d243e KW |
129 | int smu_get_dpm_freq_range(struct smu_context *smu, enum smu_clk_type clk_type, |
130 | uint32_t *min, uint32_t *max) | |
131 | { | |
132 | int ret = 0, clk_id = 0; | |
133 | uint32_t param = 0; | |
134 | ||
135 | if (!min && !max) | |
136 | return -EINVAL; | |
137 | ||
57685134 KW |
138 | switch (clk_type) { |
139 | case SMU_UCLK: | |
140 | if (!smu_feature_is_enabled(smu, SMU_FEATURE_DPM_UCLK_BIT)) { | |
141 | pr_warn("uclk dpm is not enabled\n"); | |
142 | return 0; | |
143 | } | |
144 | break; | |
145 | case SMU_GFXCLK: | |
146 | if (!smu_feature_is_enabled(smu, SMU_FEATURE_DPM_GFXCLK_BIT)) { | |
147 | pr_warn("gfxclk dpm is not enabled\n"); | |
148 | return 0; | |
149 | } | |
150 | break; | |
151 | default: | |
152 | break; | |
153 | } | |
154 | ||
155 | mutex_lock(&smu->mutex); | |
8b3d243e | 156 | clk_id = smu_clk_get_index(smu, clk_type); |
57685134 KW |
157 | if (clk_id < 0) { |
158 | ret = -EINVAL; | |
159 | goto failed; | |
160 | } | |
8b3d243e KW |
161 | |
162 | param = (clk_id & 0xffff) << 16; | |
163 | ||
164 | if (max) { | |
165 | ret = smu_send_smc_msg_with_param(smu, SMU_MSG_GetMaxDpmFreq, param); | |
166 | if (ret) | |
57685134 | 167 | goto failed; |
8b3d243e KW |
168 | ret = smu_read_smc_arg(smu, max); |
169 | if (ret) | |
57685134 | 170 | goto failed; |
8b3d243e KW |
171 | } |
172 | ||
173 | if (min) { | |
174 | ret = smu_send_smc_msg_with_param(smu, SMU_MSG_GetMinDpmFreq, param); | |
175 | if (ret) | |
57685134 | 176 | goto failed; |
8b3d243e KW |
177 | ret = smu_read_smc_arg(smu, min); |
178 | if (ret) | |
57685134 | 179 | goto failed; |
8b3d243e KW |
180 | } |
181 | ||
57685134 KW |
182 | failed: |
183 | mutex_unlock(&smu->mutex); | |
8b3d243e KW |
184 | return ret; |
185 | } | |
186 | ||
3ac54a50 KW |
187 | int smu_get_dpm_freq_by_index(struct smu_context *smu, enum smu_clk_type clk_type, |
188 | uint16_t level, uint32_t *value) | |
189 | { | |
190 | int ret = 0, clk_id = 0; | |
191 | uint32_t param; | |
192 | ||
193 | if (!value) | |
194 | return -EINVAL; | |
195 | ||
196 | clk_id = smu_clk_get_index(smu, clk_type); | |
197 | if (clk_id < 0) | |
198 | return clk_id; | |
199 | ||
200 | param = (uint32_t)(((clk_id & 0xffff) << 16) | (level & 0xffff)); | |
201 | ||
202 | ret = smu_send_smc_msg_with_param(smu,SMU_MSG_GetDpmFreqByIndex, | |
203 | param); | |
204 | if (ret) | |
205 | return ret; | |
206 | ||
207 | ret = smu_read_smc_arg(smu, ¶m); | |
208 | if (ret) | |
209 | return ret; | |
210 | ||
211 | /* BIT31: 0 - Fine grained DPM, 1 - Dicrete DPM | |
212 | * now, we un-support it */ | |
213 | *value = param & 0x7fffffff; | |
214 | ||
215 | return ret; | |
216 | } | |
217 | ||
218 | int smu_get_dpm_level_count(struct smu_context *smu, enum smu_clk_type clk_type, | |
219 | uint32_t *value) | |
220 | { | |
221 | return smu_get_dpm_freq_by_index(smu, clk_type, 0xff, value); | |
222 | } | |
223 | ||
72e91f37 KW |
224 | int smu_dpm_set_power_gate(struct smu_context *smu, uint32_t block_type, |
225 | bool gate) | |
226 | { | |
227 | int ret = 0; | |
228 | ||
229 | switch (block_type) { | |
230 | case AMD_IP_BLOCK_TYPE_UVD: | |
231 | ret = smu_dpm_set_uvd_enable(smu, gate); | |
232 | break; | |
233 | case AMD_IP_BLOCK_TYPE_VCE: | |
234 | ret = smu_dpm_set_vce_enable(smu, gate); | |
235 | break; | |
236 | default: | |
237 | break; | |
238 | } | |
239 | ||
240 | return ret; | |
241 | } | |
242 | ||
ea2d0bf8 KW |
243 | enum amd_pm_state_type smu_get_current_power_state(struct smu_context *smu) |
244 | { | |
245 | /* not support power state */ | |
246 | return POWER_STATE_TYPE_DEFAULT; | |
247 | } | |
248 | ||
09895323 KW |
249 | int smu_get_power_num_states(struct smu_context *smu, |
250 | struct pp_states_info *state_info) | |
251 | { | |
252 | if (!state_info) | |
253 | return -EINVAL; | |
254 | ||
255 | /* not support power state */ | |
256 | memset(state_info, 0, sizeof(struct pp_states_info)); | |
257 | state_info->nums = 0; | |
258 | ||
259 | return 0; | |
260 | } | |
261 | ||
143c75d6 KW |
262 | int smu_common_read_sensor(struct smu_context *smu, enum amd_pp_sensors sensor, |
263 | void *data, uint32_t *size) | |
264 | { | |
265 | int ret = 0; | |
266 | ||
267 | switch (sensor) { | |
46814f51 CG |
268 | case AMDGPU_PP_SENSOR_STABLE_PSTATE_SCLK: |
269 | *((uint32_t *)data) = smu->pstate_sclk; | |
270 | *size = 4; | |
271 | break; | |
272 | case AMDGPU_PP_SENSOR_STABLE_PSTATE_MCLK: | |
273 | *((uint32_t *)data) = smu->pstate_mclk; | |
274 | *size = 4; | |
275 | break; | |
143c75d6 KW |
276 | case AMDGPU_PP_SENSOR_ENABLED_SMC_FEATURES_MASK: |
277 | ret = smu_feature_get_enabled_mask(smu, (uint32_t *)data, 2); | |
278 | *size = 8; | |
279 | break; | |
6b1b7b5b KW |
280 | case AMDGPU_PP_SENSOR_UVD_POWER: |
281 | *(uint32_t *)data = smu_feature_is_enabled(smu, SMU_FEATURE_DPM_UVD_BIT) ? 1 : 0; | |
282 | *size = 4; | |
283 | break; | |
284 | case AMDGPU_PP_SENSOR_VCE_POWER: | |
285 | *(uint32_t *)data = smu_feature_is_enabled(smu, SMU_FEATURE_DPM_VCE_BIT) ? 1 : 0; | |
286 | *size = 4; | |
287 | break; | |
143c75d6 KW |
288 | default: |
289 | ret = -EINVAL; | |
290 | break; | |
291 | } | |
292 | ||
293 | if (ret) | |
294 | *size = 0; | |
295 | ||
296 | return ret; | |
297 | } | |
298 | ||
33bd73ae | 299 | int smu_update_table(struct smu_context *smu, enum smu_table_id table_index, |
dbe6a970 KW |
300 | void *table_data, bool drv2smu) |
301 | { | |
302 | struct smu_table_context *smu_table = &smu->smu_table; | |
303 | struct smu_table *table = NULL; | |
304 | int ret = 0; | |
33bd73ae | 305 | int table_id = smu_table_get_index(smu, table_index); |
dbe6a970 KW |
306 | |
307 | if (!table_data || table_id >= smu_table->table_count) | |
308 | return -EINVAL; | |
309 | ||
33bd73ae | 310 | table = &smu_table->tables[table_index]; |
dbe6a970 KW |
311 | |
312 | if (drv2smu) | |
313 | memcpy(table->cpu_addr, table_data, table->size); | |
314 | ||
315 | ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetDriverDramAddrHigh, | |
316 | upper_32_bits(table->mc_address)); | |
317 | if (ret) | |
318 | return ret; | |
319 | ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetDriverDramAddrLow, | |
320 | lower_32_bits(table->mc_address)); | |
321 | if (ret) | |
322 | return ret; | |
323 | ret = smu_send_smc_msg_with_param(smu, drv2smu ? | |
324 | SMU_MSG_TransferTableDram2Smu : | |
325 | SMU_MSG_TransferTableSmu2Dram, | |
33bd73ae | 326 | table_id); |
dbe6a970 KW |
327 | if (ret) |
328 | return ret; | |
329 | ||
330 | if (!drv2smu) | |
331 | memcpy(table_data, table->cpu_addr, table->size); | |
332 | ||
333 | return ret; | |
334 | } | |
335 | ||
dc8e3a0c KW |
336 | bool is_support_sw_smu(struct amdgpu_device *adev) |
337 | { | |
54b998ca HZ |
338 | if (adev->asic_type == CHIP_VEGA20) |
339 | return (amdgpu_dpm == 2) ? true : false; | |
340 | else if (adev->asic_type >= CHIP_NAVI10) | |
dc8e3a0c | 341 | return true; |
54b998ca HZ |
342 | else |
343 | return false; | |
dc8e3a0c KW |
344 | } |
345 | ||
289921b0 KW |
346 | int smu_sys_get_pp_table(struct smu_context *smu, void **table) |
347 | { | |
348 | struct smu_table_context *smu_table = &smu->smu_table; | |
349 | ||
350 | if (!smu_table->power_play_table && !smu_table->hardcode_pptable) | |
351 | return -EINVAL; | |
352 | ||
353 | if (smu_table->hardcode_pptable) | |
354 | *table = smu_table->hardcode_pptable; | |
355 | else | |
356 | *table = smu_table->power_play_table; | |
357 | ||
358 | return smu_table->power_play_table_size; | |
359 | } | |
360 | ||
361 | int smu_sys_set_pp_table(struct smu_context *smu, void *buf, size_t size) | |
362 | { | |
363 | struct smu_table_context *smu_table = &smu->smu_table; | |
364 | ATOM_COMMON_TABLE_HEADER *header = (ATOM_COMMON_TABLE_HEADER *)buf; | |
365 | int ret = 0; | |
366 | ||
a254bfa2 CG |
367 | if (!smu->pm_enabled) |
368 | return -EINVAL; | |
289921b0 KW |
369 | if (header->usStructureSize != size) { |
370 | pr_err("pp table size not matched !\n"); | |
371 | return -EIO; | |
372 | } | |
373 | ||
374 | mutex_lock(&smu->mutex); | |
375 | if (!smu_table->hardcode_pptable) | |
376 | smu_table->hardcode_pptable = kzalloc(size, GFP_KERNEL); | |
377 | if (!smu_table->hardcode_pptable) { | |
378 | ret = -ENOMEM; | |
379 | goto failed; | |
380 | } | |
381 | ||
382 | memcpy(smu_table->hardcode_pptable, buf, size); | |
383 | smu_table->power_play_table = smu_table->hardcode_pptable; | |
384 | smu_table->power_play_table_size = size; | |
385 | mutex_unlock(&smu->mutex); | |
386 | ||
387 | ret = smu_reset(smu); | |
388 | if (ret) | |
389 | pr_info("smu reset failed, ret = %d\n", ret); | |
390 | ||
6c851417 DC |
391 | return ret; |
392 | ||
289921b0 KW |
393 | failed: |
394 | mutex_unlock(&smu->mutex); | |
395 | return ret; | |
396 | } | |
397 | ||
6b816d73 KW |
398 | int smu_feature_init_dpm(struct smu_context *smu) |
399 | { | |
400 | struct smu_feature *feature = &smu->smu_feature; | |
401 | int ret = 0; | |
74c958a3 | 402 | uint32_t allowed_feature_mask[SMU_FEATURE_MAX/32]; |
6b816d73 | 403 | |
a254bfa2 CG |
404 | if (!smu->pm_enabled) |
405 | return ret; | |
f14a323d | 406 | mutex_lock(&feature->mutex); |
74c958a3 | 407 | bitmap_zero(feature->allowed, SMU_FEATURE_MAX); |
f14a323d | 408 | mutex_unlock(&feature->mutex); |
6b816d73 | 409 | |
74c958a3 | 410 | ret = smu_get_allowed_feature_mask(smu, allowed_feature_mask, |
6b816d73 KW |
411 | SMU_FEATURE_MAX/32); |
412 | if (ret) | |
413 | return ret; | |
414 | ||
f14a323d | 415 | mutex_lock(&feature->mutex); |
74c958a3 KW |
416 | bitmap_or(feature->allowed, feature->allowed, |
417 | (unsigned long *)allowed_feature_mask, | |
6b816d73 | 418 | feature->feature_num); |
f14a323d | 419 | mutex_unlock(&feature->mutex); |
6b816d73 KW |
420 | |
421 | return ret; | |
422 | } | |
423 | ||
ffcb08df | 424 | int smu_feature_is_enabled(struct smu_context *smu, enum smu_feature_mask mask) |
2f25158d KW |
425 | { |
426 | struct smu_feature *feature = &smu->smu_feature; | |
ffcb08df | 427 | uint32_t feature_id; |
f14a323d KW |
428 | int ret = 0; |
429 | ||
ffcb08df HR |
430 | feature_id = smu_feature_get_index(smu, mask); |
431 | ||
2f25158d | 432 | WARN_ON(feature_id > feature->feature_num); |
f14a323d KW |
433 | |
434 | mutex_lock(&feature->mutex); | |
435 | ret = test_bit(feature_id, feature->enabled); | |
436 | mutex_unlock(&feature->mutex); | |
437 | ||
438 | return ret; | |
2f25158d KW |
439 | } |
440 | ||
ffcb08df HR |
441 | int smu_feature_set_enabled(struct smu_context *smu, enum smu_feature_mask mask, |
442 | bool enable) | |
2f25158d KW |
443 | { |
444 | struct smu_feature *feature = &smu->smu_feature; | |
ffcb08df | 445 | uint32_t feature_id; |
f14a323d KW |
446 | int ret = 0; |
447 | ||
ffcb08df HR |
448 | feature_id = smu_feature_get_index(smu, mask); |
449 | ||
2f25158d | 450 | WARN_ON(feature_id > feature->feature_num); |
f14a323d KW |
451 | |
452 | mutex_lock(&feature->mutex); | |
453 | ret = smu_feature_update_enable_state(smu, feature_id, enable); | |
454 | if (ret) | |
455 | goto failed; | |
456 | ||
2f25158d KW |
457 | if (enable) |
458 | test_and_set_bit(feature_id, feature->enabled); | |
459 | else | |
460 | test_and_clear_bit(feature_id, feature->enabled); | |
f14a323d KW |
461 | |
462 | failed: | |
463 | mutex_unlock(&feature->mutex); | |
464 | ||
465 | return ret; | |
2f25158d KW |
466 | } |
467 | ||
ffcb08df | 468 | int smu_feature_is_supported(struct smu_context *smu, enum smu_feature_mask mask) |
2f25158d KW |
469 | { |
470 | struct smu_feature *feature = &smu->smu_feature; | |
ffcb08df | 471 | uint32_t feature_id; |
f14a323d KW |
472 | int ret = 0; |
473 | ||
ffcb08df HR |
474 | feature_id = smu_feature_get_index(smu, mask); |
475 | ||
2f25158d | 476 | WARN_ON(feature_id > feature->feature_num); |
f14a323d KW |
477 | |
478 | mutex_lock(&feature->mutex); | |
479 | ret = test_bit(feature_id, feature->supported); | |
480 | mutex_unlock(&feature->mutex); | |
481 | ||
482 | return ret; | |
2f25158d KW |
483 | } |
484 | ||
ffcb08df HR |
485 | int smu_feature_set_supported(struct smu_context *smu, |
486 | enum smu_feature_mask mask, | |
2f25158d KW |
487 | bool enable) |
488 | { | |
489 | struct smu_feature *feature = &smu->smu_feature; | |
ffcb08df | 490 | uint32_t feature_id; |
f14a323d KW |
491 | int ret = 0; |
492 | ||
ffcb08df HR |
493 | feature_id = smu_feature_get_index(smu, mask); |
494 | ||
2f25158d | 495 | WARN_ON(feature_id > feature->feature_num); |
f14a323d | 496 | |
029f4153 | 497 | mutex_lock(&feature->mutex); |
2f25158d KW |
498 | if (enable) |
499 | test_and_set_bit(feature_id, feature->supported); | |
500 | else | |
501 | test_and_clear_bit(feature_id, feature->supported); | |
f14a323d KW |
502 | mutex_unlock(&feature->mutex); |
503 | ||
504 | return ret; | |
2f25158d KW |
505 | } |
506 | ||
137d63ab HR |
507 | static int smu_set_funcs(struct amdgpu_device *adev) |
508 | { | |
07845526 HR |
509 | struct smu_context *smu = &adev->smu; |
510 | ||
511 | switch (adev->asic_type) { | |
512 | case CHIP_VEGA20: | |
2573e870 | 513 | case CHIP_NAVI10: |
3b94fb10 LG |
514 | if (adev->pm.pp_feature & PP_OVERDRIVE_MASK) |
515 | smu->od_enabled = true; | |
07845526 HR |
516 | smu_v11_0_set_smu_funcs(smu); |
517 | break; | |
518 | default: | |
519 | return -EINVAL; | |
520 | } | |
521 | ||
137d63ab HR |
522 | return 0; |
523 | } | |
524 | ||
525 | static int smu_early_init(void *handle) | |
526 | { | |
527 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; | |
528 | struct smu_context *smu = &adev->smu; | |
137d63ab HR |
529 | |
530 | smu->adev = adev; | |
a7517677 | 531 | smu->pm_enabled = !!amdgpu_dpm; |
137d63ab HR |
532 | mutex_init(&smu->mutex); |
533 | ||
74e07f9d | 534 | return smu_set_funcs(adev); |
137d63ab HR |
535 | } |
536 | ||
bee71d26 CG |
537 | static int smu_late_init(void *handle) |
538 | { | |
539 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; | |
540 | struct smu_context *smu = &adev->smu; | |
a254bfa2 CG |
541 | |
542 | if (!smu->pm_enabled) | |
543 | return 0; | |
bee71d26 CG |
544 | mutex_lock(&smu->mutex); |
545 | smu_handle_task(&adev->smu, | |
546 | smu->smu_dpm.dpm_level, | |
547 | AMD_PP_TASK_COMPLETE_INIT); | |
548 | mutex_unlock(&smu->mutex); | |
549 | ||
550 | return 0; | |
551 | } | |
552 | ||
e15da5a4 HR |
553 | int smu_get_atom_data_table(struct smu_context *smu, uint32_t table, |
554 | uint16_t *size, uint8_t *frev, uint8_t *crev, | |
555 | uint8_t **addr) | |
556 | { | |
557 | struct amdgpu_device *adev = smu->adev; | |
558 | uint16_t data_start; | |
559 | ||
560 | if (!amdgpu_atom_parse_data_header(adev->mode_info.atom_context, table, | |
561 | size, frev, crev, &data_start)) | |
562 | return -EINVAL; | |
563 | ||
564 | *addr = (uint8_t *)adev->mode_info.atom_context->bios + data_start; | |
565 | ||
566 | return 0; | |
567 | } | |
568 | ||
b5624000 HR |
569 | static int smu_initialize_pptable(struct smu_context *smu) |
570 | { | |
571 | /* TODO */ | |
572 | return 0; | |
573 | } | |
574 | ||
575 | static int smu_smc_table_sw_init(struct smu_context *smu) | |
576 | { | |
577 | int ret; | |
578 | ||
579 | ret = smu_initialize_pptable(smu); | |
580 | if (ret) { | |
581 | pr_err("Failed to init smu_initialize_pptable!\n"); | |
582 | return ret; | |
583 | } | |
584 | ||
cabd44c0 HR |
585 | /** |
586 | * Create smu_table structure, and init smc tables such as | |
587 | * TABLE_PPTABLE, TABLE_WATERMARKS, TABLE_SMU_METRICS, and etc. | |
588 | */ | |
589 | ret = smu_init_smc_tables(smu); | |
590 | if (ret) { | |
591 | pr_err("Failed to init smc tables!\n"); | |
592 | return ret; | |
593 | } | |
594 | ||
17e6081b HR |
595 | /** |
596 | * Create smu_power_context structure, and allocate smu_dpm_context and | |
597 | * context size to fill the smu_power_context data. | |
598 | */ | |
599 | ret = smu_init_power(smu); | |
600 | if (ret) { | |
601 | pr_err("Failed to init smu_init_power!\n"); | |
602 | return ret; | |
603 | } | |
604 | ||
b5624000 HR |
605 | return 0; |
606 | } | |
607 | ||
813ce279 KW |
608 | static int smu_smc_table_sw_fini(struct smu_context *smu) |
609 | { | |
610 | int ret; | |
611 | ||
612 | ret = smu_fini_smc_tables(smu); | |
613 | if (ret) { | |
614 | pr_err("Failed to smu_fini_smc_tables!\n"); | |
615 | return ret; | |
616 | } | |
617 | ||
618 | return 0; | |
619 | } | |
620 | ||
137d63ab HR |
621 | static int smu_sw_init(void *handle) |
622 | { | |
623 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; | |
624 | struct smu_context *smu = &adev->smu; | |
625 | int ret; | |
626 | ||
0b51d993 | 627 | smu->pool_size = adev->pm.smu_prv_buffer_size; |
6b816d73 | 628 | smu->smu_feature.feature_num = SMU_FEATURE_MAX; |
f14a323d | 629 | mutex_init(&smu->smu_feature.mutex); |
6b816d73 KW |
630 | bitmap_zero(smu->smu_feature.supported, SMU_FEATURE_MAX); |
631 | bitmap_zero(smu->smu_feature.enabled, SMU_FEATURE_MAX); | |
632 | bitmap_zero(smu->smu_feature.allowed, SMU_FEATURE_MAX); | |
2e069391 | 633 | smu->watermarks_bitmap = 0; |
16177fd0 CG |
634 | smu->power_profile_mode = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT; |
635 | smu->default_power_profile_mode = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT; | |
636 | ||
637 | smu->workload_mask = 1 << smu->workload_prority[PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT]; | |
638 | smu->workload_prority[PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT] = 0; | |
639 | smu->workload_prority[PP_SMC_POWER_PROFILE_FULLSCREEN3D] = 1; | |
640 | smu->workload_prority[PP_SMC_POWER_PROFILE_POWERSAVING] = 2; | |
641 | smu->workload_prority[PP_SMC_POWER_PROFILE_VIDEO] = 3; | |
642 | smu->workload_prority[PP_SMC_POWER_PROFILE_VR] = 4; | |
643 | smu->workload_prority[PP_SMC_POWER_PROFILE_COMPUTE] = 5; | |
644 | smu->workload_prority[PP_SMC_POWER_PROFILE_CUSTOM] = 6; | |
645 | ||
646 | smu->workload_setting[0] = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT; | |
647 | smu->workload_setting[1] = PP_SMC_POWER_PROFILE_FULLSCREEN3D; | |
648 | smu->workload_setting[2] = PP_SMC_POWER_PROFILE_POWERSAVING; | |
649 | smu->workload_setting[3] = PP_SMC_POWER_PROFILE_VIDEO; | |
650 | smu->workload_setting[4] = PP_SMC_POWER_PROFILE_VR; | |
651 | smu->workload_setting[5] = PP_SMC_POWER_PROFILE_COMPUTE; | |
652 | smu->workload_setting[6] = PP_SMC_POWER_PROFILE_CUSTOM; | |
379a4454 | 653 | smu->display_config = &adev->pm.pm_display_cfg; |
0b51d993 | 654 | |
9a431038 CG |
655 | smu->smu_dpm.dpm_level = AMD_DPM_FORCED_LEVEL_AUTO; |
656 | smu->smu_dpm.requested_dpm_level = AMD_DPM_FORCED_LEVEL_AUTO; | |
137d63ab HR |
657 | ret = smu_init_microcode(smu); |
658 | if (ret) { | |
659 | pr_err("Failed to load smu firmware!\n"); | |
660 | return ret; | |
661 | } | |
662 | ||
b5624000 HR |
663 | ret = smu_smc_table_sw_init(smu); |
664 | if (ret) { | |
665 | pr_err("Failed to sw init smc table!\n"); | |
666 | return ret; | |
667 | } | |
668 | ||
137d63ab HR |
669 | return 0; |
670 | } | |
671 | ||
672 | static int smu_sw_fini(void *handle) | |
673 | { | |
674 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; | |
813ce279 KW |
675 | struct smu_context *smu = &adev->smu; |
676 | int ret; | |
137d63ab | 677 | |
813ce279 KW |
678 | ret = smu_smc_table_sw_fini(smu); |
679 | if (ret) { | |
680 | pr_err("Failed to sw fini smc table!\n"); | |
681 | return ret; | |
682 | } | |
683 | ||
8bf16963 KW |
684 | ret = smu_fini_power(smu); |
685 | if (ret) { | |
686 | pr_err("Failed to init smu_fini_power!\n"); | |
687 | return ret; | |
688 | } | |
689 | ||
137d63ab HR |
690 | return 0; |
691 | } | |
692 | ||
9c9a1747 HR |
693 | static int smu_init_fb_allocations(struct smu_context *smu) |
694 | { | |
f96357a9 KW |
695 | struct amdgpu_device *adev = smu->adev; |
696 | struct smu_table_context *smu_table = &smu->smu_table; | |
697 | struct smu_table *tables = smu_table->tables; | |
698 | uint32_t table_count = smu_table->table_count; | |
699 | uint32_t i = 0; | |
700 | int32_t ret = 0; | |
701 | ||
702 | if (table_count <= 0) | |
703 | return -EINVAL; | |
704 | ||
705 | for (i = 0 ; i < table_count; i++) { | |
706 | if (tables[i].size == 0) | |
707 | continue; | |
708 | ret = amdgpu_bo_create_kernel(adev, | |
709 | tables[i].size, | |
710 | tables[i].align, | |
711 | tables[i].domain, | |
712 | &tables[i].bo, | |
713 | &tables[i].mc_address, | |
714 | &tables[i].cpu_addr); | |
715 | if (ret) | |
716 | goto failed; | |
717 | } | |
718 | ||
9c9a1747 | 719 | return 0; |
f96357a9 KW |
720 | failed: |
721 | for (; i > 0; i--) { | |
722 | if (tables[i].size == 0) | |
723 | continue; | |
724 | amdgpu_bo_free_kernel(&tables[i].bo, | |
725 | &tables[i].mc_address, | |
726 | &tables[i].cpu_addr); | |
727 | ||
728 | } | |
729 | return ret; | |
9c9a1747 HR |
730 | } |
731 | ||
f96357a9 KW |
732 | static int smu_fini_fb_allocations(struct smu_context *smu) |
733 | { | |
734 | struct smu_table_context *smu_table = &smu->smu_table; | |
735 | struct smu_table *tables = smu_table->tables; | |
736 | uint32_t table_count = smu_table->table_count; | |
737 | uint32_t i = 0; | |
738 | ||
739 | if (table_count == 0 || tables == NULL) | |
289921b0 | 740 | return 0; |
f96357a9 KW |
741 | |
742 | for (i = 0 ; i < table_count; i++) { | |
743 | if (tables[i].size == 0) | |
744 | continue; | |
745 | amdgpu_bo_free_kernel(&tables[i].bo, | |
746 | &tables[i].mc_address, | |
747 | &tables[i].cpu_addr); | |
748 | } | |
749 | ||
750 | return 0; | |
751 | } | |
f6a6b952 | 752 | |
24e141e1 LG |
753 | static int smu_override_pcie_parameters(struct smu_context *smu) |
754 | { | |
755 | struct amdgpu_device *adev = smu->adev; | |
756 | uint32_t pcie_gen = 0, pcie_width = 0, smu_pcie_arg; | |
757 | int ret; | |
758 | ||
759 | if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN4) | |
760 | pcie_gen = 3; | |
761 | else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3) | |
762 | pcie_gen = 2; | |
763 | else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2) | |
764 | pcie_gen = 1; | |
765 | else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1) | |
766 | pcie_gen = 0; | |
767 | ||
768 | /* Bit 31:16: LCLK DPM level. 0 is DPM0, and 1 is DPM1 | |
769 | * Bit 15:8: PCIE GEN, 0 to 3 corresponds to GEN1 to GEN4 | |
770 | * Bit 7:0: PCIE lane width, 1 to 7 corresponds is x1 to x32 | |
771 | */ | |
772 | if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X16) | |
773 | pcie_width = 6; | |
774 | else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X12) | |
775 | pcie_width = 5; | |
776 | else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X8) | |
777 | pcie_width = 4; | |
778 | else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X4) | |
779 | pcie_width = 3; | |
780 | else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X2) | |
781 | pcie_width = 2; | |
782 | else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X1) | |
783 | pcie_width = 1; | |
784 | ||
785 | smu_pcie_arg = (1 << 16) | (pcie_gen << 8) | pcie_width; | |
786 | ret = smu_send_smc_msg_with_param(smu, | |
787 | SMU_MSG_OverridePcieParameters, | |
788 | smu_pcie_arg); | |
789 | if (ret) | |
790 | pr_err("[%s] Attempt to override pcie params failed!\n", __func__); | |
791 | return ret; | |
792 | } | |
793 | ||
4733cc72 LG |
794 | static int smu_smc_table_hw_init(struct smu_context *smu, |
795 | bool initialize) | |
05cadcd3 | 796 | { |
f067499b | 797 | struct amdgpu_device *adev = smu->adev; |
05cadcd3 HR |
798 | int ret; |
799 | ||
f067499b LG |
800 | if (smu_is_dpm_running(smu) && adev->in_suspend) { |
801 | pr_info("dpm has been enabled\n"); | |
802 | return 0; | |
803 | } | |
804 | ||
2e13c755 | 805 | ret = smu_init_display_count(smu, 0); |
56c53ad6 KW |
806 | if (ret) |
807 | return ret; | |
808 | ||
4733cc72 | 809 | if (initialize) { |
4009b9b5 HR |
810 | /* get boot_values from vbios to set revision, gfxclk, and etc. */ |
811 | ret = smu_get_vbios_bootup_values(smu); | |
4733cc72 LG |
812 | if (ret) |
813 | return ret; | |
05cadcd3 | 814 | |
b55c83a7 | 815 | ret = smu_setup_pptable(smu); |
4733cc72 LG |
816 | if (ret) |
817 | return ret; | |
a6b35900 | 818 | |
4733cc72 LG |
819 | /* |
820 | * check if the format_revision in vbios is up to pptable header | |
821 | * version, and the structure size is not 0. | |
822 | */ | |
4733cc72 LG |
823 | ret = smu_check_pptable(smu); |
824 | if (ret) | |
825 | return ret; | |
46126e6d | 826 | |
4733cc72 LG |
827 | /* |
828 | * allocate vram bos to store smc table contents. | |
829 | */ | |
830 | ret = smu_init_fb_allocations(smu); | |
831 | if (ret) | |
832 | return ret; | |
9c9a1747 | 833 | |
4733cc72 LG |
834 | /* |
835 | * Parse pptable format and fill PPTable_t smc_pptable to | |
836 | * smu_table_context structure. And read the smc_dpm_table from vbios, | |
837 | * then fill it into smc_pptable. | |
838 | */ | |
839 | ret = smu_parse_pptable(smu); | |
840 | if (ret) | |
841 | return ret; | |
9e4848a4 | 842 | |
4733cc72 LG |
843 | /* |
844 | * Send msg GetDriverIfVersion to check if the return value is equal | |
845 | * with DRIVER_IF_VERSION of smc header. | |
846 | */ | |
847 | ret = smu_check_fw_version(smu); | |
848 | if (ret) | |
849 | return ret; | |
850 | } | |
a751b095 | 851 | |
31b5ae49 HR |
852 | /* |
853 | * Copy pptable bo in the vram to smc with SMU MSGs such as | |
854 | * SetDriverDramAddr and TransferTableDram2Smu. | |
855 | */ | |
856 | ret = smu_write_pptable(smu); | |
857 | if (ret) | |
858 | return ret; | |
859 | ||
f6a6b952 KW |
860 | /* issue RunAfllBtc msg */ |
861 | ret = smu_run_afll_btc(smu); | |
862 | if (ret) | |
863 | return ret; | |
864 | ||
d4631cba HR |
865 | ret = smu_feature_set_allowed_mask(smu); |
866 | if (ret) | |
867 | return ret; | |
868 | ||
f067499b | 869 | ret = smu_system_features_control(smu, true); |
6b816d73 KW |
870 | if (ret) |
871 | return ret; | |
872 | ||
24e141e1 LG |
873 | ret = smu_override_pcie_parameters(smu); |
874 | if (ret) | |
875 | return ret; | |
876 | ||
e1c6f86a KW |
877 | ret = smu_notify_display_change(smu); |
878 | if (ret) | |
879 | return ret; | |
880 | ||
a7ebb6d2 HR |
881 | /* |
882 | * Set min deep sleep dce fclk with bootup value from vbios via | |
883 | * SetMinDeepSleepDcefclk MSG. | |
884 | */ | |
885 | ret = smu_set_min_dcef_deep_sleep(smu); | |
886 | if (ret) | |
887 | return ret; | |
888 | ||
d6a4aa82 LG |
889 | /* |
890 | * Set initialized values (get from vbios) to dpm tables context such as | |
891 | * gfxclk, memclk, dcefclk, and etc. And enable the DPM feature for each | |
892 | * type of clks. | |
893 | */ | |
4733cc72 LG |
894 | if (initialize) { |
895 | ret = smu_populate_smc_pptable(smu); | |
896 | if (ret) | |
897 | return ret; | |
d6a4aa82 | 898 | |
4733cc72 LG |
899 | ret = smu_init_max_sustainable_clocks(smu); |
900 | if (ret) | |
901 | return ret; | |
902 | } | |
7457cf02 | 903 | |
8f30a16d | 904 | ret = smu_set_default_od_settings(smu, initialize); |
2c80abe3 LG |
905 | if (ret) |
906 | return ret; | |
907 | ||
4733cc72 LG |
908 | if (initialize) { |
909 | ret = smu_populate_umd_state_clk(smu); | |
910 | if (ret) | |
911 | return ret; | |
133438fa | 912 | |
4733cc72 LG |
913 | ret = smu_get_power_limit(smu, &smu->default_power_limit, false); |
914 | if (ret) | |
915 | return ret; | |
916 | } | |
e66adb1e | 917 | |
206bc589 HR |
918 | /* |
919 | * Set PMSTATUSLOG table bo address with SetToolsDramAddr MSG for tools. | |
920 | */ | |
921 | ret = smu_set_tool_table_location(smu); | |
922 | ||
a254bfa2 CG |
923 | if (!smu_is_dpm_running(smu)) |
924 | pr_info("dpm has been disabled\n"); | |
925 | ||
206bc589 | 926 | return ret; |
05cadcd3 HR |
927 | } |
928 | ||
e65d45f2 HR |
929 | /** |
930 | * smu_alloc_memory_pool - allocate memory pool in the system memory | |
931 | * | |
932 | * @smu: amdgpu_device pointer | |
933 | * | |
934 | * This memory pool will be used for SMC use and msg SetSystemVirtualDramAddr | |
935 | * and DramLogSetDramAddr can notify it changed. | |
936 | * | |
937 | * Returns 0 on success, error on failure. | |
938 | */ | |
939 | static int smu_alloc_memory_pool(struct smu_context *smu) | |
940 | { | |
0b51d993 KW |
941 | struct amdgpu_device *adev = smu->adev; |
942 | struct smu_table_context *smu_table = &smu->smu_table; | |
943 | struct smu_table *memory_pool = &smu_table->memory_pool; | |
944 | uint64_t pool_size = smu->pool_size; | |
945 | int ret = 0; | |
946 | ||
947 | if (pool_size == SMU_MEMORY_POOL_SIZE_ZERO) | |
948 | return ret; | |
949 | ||
950 | memory_pool->size = pool_size; | |
951 | memory_pool->align = PAGE_SIZE; | |
952 | memory_pool->domain = AMDGPU_GEM_DOMAIN_GTT; | |
953 | ||
954 | switch (pool_size) { | |
955 | case SMU_MEMORY_POOL_SIZE_256_MB: | |
956 | case SMU_MEMORY_POOL_SIZE_512_MB: | |
957 | case SMU_MEMORY_POOL_SIZE_1_GB: | |
958 | case SMU_MEMORY_POOL_SIZE_2_GB: | |
959 | ret = amdgpu_bo_create_kernel(adev, | |
960 | memory_pool->size, | |
961 | memory_pool->align, | |
962 | memory_pool->domain, | |
963 | &memory_pool->bo, | |
964 | &memory_pool->mc_address, | |
965 | &memory_pool->cpu_addr); | |
966 | break; | |
967 | default: | |
968 | break; | |
969 | } | |
970 | ||
971 | return ret; | |
e65d45f2 HR |
972 | } |
973 | ||
0b51d993 KW |
974 | static int smu_free_memory_pool(struct smu_context *smu) |
975 | { | |
976 | struct smu_table_context *smu_table = &smu->smu_table; | |
977 | struct smu_table *memory_pool = &smu_table->memory_pool; | |
978 | int ret = 0; | |
979 | ||
980 | if (memory_pool->size == SMU_MEMORY_POOL_SIZE_ZERO) | |
981 | return ret; | |
982 | ||
983 | amdgpu_bo_free_kernel(&memory_pool->bo, | |
984 | &memory_pool->mc_address, | |
985 | &memory_pool->cpu_addr); | |
986 | ||
987 | memset(memory_pool, 0, sizeof(struct smu_table)); | |
988 | ||
989 | return ret; | |
990 | } | |
4733cc72 | 991 | |
137d63ab HR |
992 | static int smu_hw_init(void *handle) |
993 | { | |
994 | int ret; | |
995 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; | |
996 | struct smu_context *smu = &adev->smu; | |
997 | ||
0186eb96 HR |
998 | if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) { |
999 | ret = smu_check_fw_status(smu); | |
1000 | if (ret) { | |
1001 | pr_err("SMC firmware status is not correct\n"); | |
3d2f5200 | 1002 | return ret; |
0186eb96 | 1003 | } |
e11c4fd5 HR |
1004 | } |
1005 | ||
6b816d73 KW |
1006 | ret = smu_feature_init_dpm(smu); |
1007 | if (ret) | |
1008 | goto failed; | |
1009 | ||
4733cc72 | 1010 | ret = smu_smc_table_hw_init(smu, true); |
05cadcd3 HR |
1011 | if (ret) |
1012 | goto failed; | |
137d63ab | 1013 | |
e65d45f2 HR |
1014 | ret = smu_alloc_memory_pool(smu); |
1015 | if (ret) | |
1016 | goto failed; | |
1017 | ||
c56de9e8 HR |
1018 | /* |
1019 | * Use msg SetSystemVirtualDramAddr and DramLogSetDramAddr can notify | |
1020 | * pool location. | |
1021 | */ | |
1022 | ret = smu_notify_memory_pool_location(smu); | |
1023 | if (ret) | |
1024 | goto failed; | |
1025 | ||
74ba3553 LG |
1026 | ret = smu_start_thermal_control(smu); |
1027 | if (ret) | |
1028 | goto failed; | |
1029 | ||
5e6d2665 KW |
1030 | ret = smu_register_irq_handler(smu); |
1031 | if (ret) | |
1032 | goto failed; | |
1033 | ||
a254bfa2 CG |
1034 | if (!smu->pm_enabled) |
1035 | adev->pm.dpm_enabled = false; | |
1036 | else | |
948f540c | 1037 | adev->pm.dpm_enabled = true; /* TODO: will set dpm_enabled flag while VCN and DAL DPM is workable */ |
a317cf03 | 1038 | |
137d63ab HR |
1039 | pr_info("SMU is initialized successfully!\n"); |
1040 | ||
1041 | return 0; | |
05cadcd3 HR |
1042 | |
1043 | failed: | |
1044 | mutex_unlock(&smu->mutex); | |
1045 | return ret; | |
137d63ab HR |
1046 | } |
1047 | ||
1048 | static int smu_hw_fini(void *handle) | |
1049 | { | |
1050 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; | |
1051 | struct smu_context *smu = &adev->smu; | |
afba8282 | 1052 | struct smu_table_context *table_context = &smu->smu_table; |
f96357a9 | 1053 | int ret = 0; |
137d63ab | 1054 | |
6316f51c HR |
1055 | kfree(table_context->driver_pptable); |
1056 | table_context->driver_pptable = NULL; | |
afba8282 | 1057 | |
6316f51c HR |
1058 | kfree(table_context->max_sustainable_clocks); |
1059 | table_context->max_sustainable_clocks = NULL; | |
7457cf02 | 1060 | |
6316f51c HR |
1061 | kfree(table_context->overdrive_table); |
1062 | table_context->overdrive_table = NULL; | |
2c80abe3 | 1063 | |
5e6d2665 KW |
1064 | kfree(smu->irq_source); |
1065 | smu->irq_source = NULL; | |
1066 | ||
f96357a9 KW |
1067 | ret = smu_fini_fb_allocations(smu); |
1068 | if (ret) | |
1069 | return ret; | |
1070 | ||
0b51d993 KW |
1071 | ret = smu_free_memory_pool(smu); |
1072 | if (ret) | |
1073 | return ret; | |
1074 | ||
137d63ab HR |
1075 | return 0; |
1076 | } | |
1077 | ||
289921b0 KW |
1078 | int smu_reset(struct smu_context *smu) |
1079 | { | |
1080 | struct amdgpu_device *adev = smu->adev; | |
1081 | int ret = 0; | |
1082 | ||
1083 | ret = smu_hw_fini(adev); | |
1084 | if (ret) | |
1085 | return ret; | |
1086 | ||
1087 | ret = smu_hw_init(adev); | |
1088 | if (ret) | |
1089 | return ret; | |
1090 | ||
1091 | return ret; | |
1092 | } | |
1093 | ||
137d63ab HR |
1094 | static int smu_suspend(void *handle) |
1095 | { | |
4733cc72 | 1096 | int ret; |
137d63ab | 1097 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; |
4733cc72 | 1098 | struct smu_context *smu = &adev->smu; |
137d63ab | 1099 | |
f067499b | 1100 | ret = smu_system_features_control(smu, false); |
4733cc72 LG |
1101 | if (ret) |
1102 | return ret; | |
1103 | ||
1104 | smu->watermarks_bitmap &= ~(WATERMARKS_LOADED); | |
1105 | ||
e17a512a JX |
1106 | if (adev->asic_type >= CHIP_NAVI10 && |
1107 | adev->gfx.rlc.funcs->stop) | |
1108 | adev->gfx.rlc.funcs->stop(adev); | |
1109 | ||
137d63ab HR |
1110 | return 0; |
1111 | } | |
1112 | ||
1113 | static int smu_resume(void *handle) | |
1114 | { | |
1115 | int ret; | |
1116 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; | |
1117 | struct smu_context *smu = &adev->smu; | |
1118 | ||
fad3ecf2 HR |
1119 | pr_info("SMU is resuming...\n"); |
1120 | ||
137d63ab HR |
1121 | mutex_lock(&smu->mutex); |
1122 | ||
4733cc72 | 1123 | ret = smu_smc_table_hw_init(smu, false); |
fad3ecf2 HR |
1124 | if (ret) |
1125 | goto failed; | |
1126 | ||
4733cc72 | 1127 | ret = smu_start_thermal_control(smu); |
fad3ecf2 HR |
1128 | if (ret) |
1129 | goto failed; | |
137d63ab HR |
1130 | |
1131 | mutex_unlock(&smu->mutex); | |
1132 | ||
fad3ecf2 HR |
1133 | pr_info("SMU is resumed successfully!\n"); |
1134 | ||
137d63ab | 1135 | return 0; |
fad3ecf2 HR |
1136 | failed: |
1137 | mutex_unlock(&smu->mutex); | |
1138 | return ret; | |
137d63ab HR |
1139 | } |
1140 | ||
94ed6d0c HR |
1141 | int smu_display_configuration_change(struct smu_context *smu, |
1142 | const struct amd_pp_display_configuration *display_config) | |
1143 | { | |
1144 | int index = 0; | |
1145 | int num_of_active_display = 0; | |
1146 | ||
a254bfa2 | 1147 | if (!smu->pm_enabled || !is_support_sw_smu(smu->adev)) |
94ed6d0c HR |
1148 | return -EINVAL; |
1149 | ||
1150 | if (!display_config) | |
1151 | return -EINVAL; | |
1152 | ||
1153 | mutex_lock(&smu->mutex); | |
1154 | ||
1155 | smu_set_deep_sleep_dcefclk(smu, | |
1156 | display_config->min_dcef_deep_sleep_set_clk / 100); | |
1157 | ||
1158 | for (index = 0; index < display_config->num_path_including_non_display; index++) { | |
1159 | if (display_config->displays[index].controller_id != 0) | |
1160 | num_of_active_display++; | |
1161 | } | |
1162 | ||
1163 | smu_set_active_display_count(smu, num_of_active_display); | |
1164 | ||
1165 | smu_store_cc6_data(smu, display_config->cpu_pstate_separation_time, | |
1166 | display_config->cpu_cc6_disable, | |
1167 | display_config->cpu_pstate_disable, | |
1168 | display_config->nb_pstate_switch_disable); | |
1169 | ||
1170 | mutex_unlock(&smu->mutex); | |
1171 | ||
1172 | return 0; | |
1173 | } | |
1174 | ||
5e2d3881 HR |
1175 | static int smu_get_clock_info(struct smu_context *smu, |
1176 | struct smu_clock_info *clk_info, | |
1177 | enum smu_perf_level_designation designation) | |
1178 | { | |
1179 | int ret; | |
1180 | struct smu_performance_level level = {0}; | |
1181 | ||
1182 | if (!clk_info) | |
1183 | return -EINVAL; | |
1184 | ||
1185 | ret = smu_get_perf_level(smu, PERF_LEVEL_ACTIVITY, &level); | |
1186 | if (ret) | |
1187 | return -EINVAL; | |
1188 | ||
1189 | clk_info->min_mem_clk = level.memory_clock; | |
1190 | clk_info->min_eng_clk = level.core_clock; | |
1191 | clk_info->min_bus_bandwidth = level.non_local_mem_freq * level.non_local_mem_width; | |
1192 | ||
1193 | ret = smu_get_perf_level(smu, designation, &level); | |
1194 | if (ret) | |
1195 | return -EINVAL; | |
1196 | ||
1197 | clk_info->min_mem_clk = level.memory_clock; | |
1198 | clk_info->min_eng_clk = level.core_clock; | |
1199 | clk_info->min_bus_bandwidth = level.non_local_mem_freq * level.non_local_mem_width; | |
1200 | ||
1201 | return 0; | |
1202 | } | |
1203 | ||
1204 | int smu_get_current_clocks(struct smu_context *smu, | |
1205 | struct amd_pp_clock_info *clocks) | |
1206 | { | |
1207 | struct amd_pp_simple_clock_info simple_clocks = {0}; | |
1208 | struct smu_clock_info hw_clocks; | |
1209 | int ret = 0; | |
1210 | ||
1211 | if (!is_support_sw_smu(smu->adev)) | |
1212 | return -EINVAL; | |
1213 | ||
1214 | mutex_lock(&smu->mutex); | |
1215 | ||
1216 | smu_get_dal_power_level(smu, &simple_clocks); | |
1217 | ||
1218 | if (smu->support_power_containment) | |
1219 | ret = smu_get_clock_info(smu, &hw_clocks, | |
1220 | PERF_LEVEL_POWER_CONTAINMENT); | |
1221 | else | |
1222 | ret = smu_get_clock_info(smu, &hw_clocks, PERF_LEVEL_ACTIVITY); | |
1223 | ||
1224 | if (ret) { | |
1225 | pr_err("Error in smu_get_clock_info\n"); | |
1226 | goto failed; | |
1227 | } | |
1228 | ||
1229 | clocks->min_engine_clock = hw_clocks.min_eng_clk; | |
1230 | clocks->max_engine_clock = hw_clocks.max_eng_clk; | |
1231 | clocks->min_memory_clock = hw_clocks.min_mem_clk; | |
1232 | clocks->max_memory_clock = hw_clocks.max_mem_clk; | |
1233 | clocks->min_bus_bandwidth = hw_clocks.min_bus_bandwidth; | |
1234 | clocks->max_bus_bandwidth = hw_clocks.max_bus_bandwidth; | |
1235 | clocks->max_engine_clock_in_sr = hw_clocks.max_eng_clk; | |
1236 | clocks->min_engine_clock_in_sr = hw_clocks.min_eng_clk; | |
1237 | ||
1238 | if (simple_clocks.level == 0) | |
1239 | clocks->max_clocks_state = PP_DAL_POWERLEVEL_7; | |
1240 | else | |
1241 | clocks->max_clocks_state = simple_clocks.level; | |
1242 | ||
1243 | if (!smu_get_current_shallow_sleep_clocks(smu, &hw_clocks)) { | |
1244 | clocks->max_engine_clock_in_sr = hw_clocks.max_eng_clk; | |
1245 | clocks->min_engine_clock_in_sr = hw_clocks.min_eng_clk; | |
1246 | } | |
1247 | ||
1248 | failed: | |
1249 | mutex_unlock(&smu->mutex); | |
1250 | return ret; | |
1251 | } | |
1252 | ||
137d63ab HR |
1253 | static int smu_set_clockgating_state(void *handle, |
1254 | enum amd_clockgating_state state) | |
1255 | { | |
1256 | return 0; | |
1257 | } | |
1258 | ||
1259 | static int smu_set_powergating_state(void *handle, | |
1260 | enum amd_powergating_state state) | |
1261 | { | |
1262 | return 0; | |
1263 | } | |
1264 | ||
49d27e91 CG |
1265 | static int smu_enable_umd_pstate(void *handle, |
1266 | enum amd_dpm_forced_level *level) | |
1267 | { | |
1268 | uint32_t profile_mode_mask = AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD | | |
1269 | AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK | | |
1270 | AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK | | |
1271 | AMD_DPM_FORCED_LEVEL_PROFILE_PEAK; | |
1272 | ||
1273 | struct smu_context *smu = (struct smu_context*)(handle); | |
1274 | struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm); | |
a254bfa2 | 1275 | if (!smu->pm_enabled || !smu_dpm_ctx->dpm_context) |
49d27e91 CG |
1276 | return -EINVAL; |
1277 | ||
1278 | if (!(smu_dpm_ctx->dpm_level & profile_mode_mask)) { | |
1279 | /* enter umd pstate, save current level, disable gfx cg*/ | |
1280 | if (*level & profile_mode_mask) { | |
1281 | smu_dpm_ctx->saved_dpm_level = smu_dpm_ctx->dpm_level; | |
1282 | smu_dpm_ctx->enable_umd_pstate = true; | |
1283 | amdgpu_device_ip_set_clockgating_state(smu->adev, | |
1284 | AMD_IP_BLOCK_TYPE_GFX, | |
1285 | AMD_CG_STATE_UNGATE); | |
1286 | amdgpu_device_ip_set_powergating_state(smu->adev, | |
1287 | AMD_IP_BLOCK_TYPE_GFX, | |
1288 | AMD_PG_STATE_UNGATE); | |
1289 | } | |
1290 | } else { | |
1291 | /* exit umd pstate, restore level, enable gfx cg*/ | |
1292 | if (!(*level & profile_mode_mask)) { | |
1293 | if (*level == AMD_DPM_FORCED_LEVEL_PROFILE_EXIT) | |
1294 | *level = smu_dpm_ctx->saved_dpm_level; | |
1295 | smu_dpm_ctx->enable_umd_pstate = false; | |
1296 | amdgpu_device_ip_set_clockgating_state(smu->adev, | |
1297 | AMD_IP_BLOCK_TYPE_GFX, | |
1298 | AMD_CG_STATE_GATE); | |
1299 | amdgpu_device_ip_set_powergating_state(smu->adev, | |
1300 | AMD_IP_BLOCK_TYPE_GFX, | |
1301 | AMD_PG_STATE_GATE); | |
1302 | } | |
1303 | } | |
1304 | ||
1305 | return 0; | |
1306 | } | |
1307 | ||
bc0fcffd LG |
1308 | int smu_adjust_power_state_dynamic(struct smu_context *smu, |
1309 | enum amd_dpm_forced_level level, | |
1310 | bool skip_display_settings) | |
1311 | { | |
1312 | int ret = 0; | |
1313 | int index = 0; | |
1314 | uint32_t sclk_mask, mclk_mask, soc_mask; | |
1315 | long workload; | |
1316 | struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm); | |
1317 | ||
a254bfa2 CG |
1318 | if (!smu->pm_enabled) |
1319 | return -EINVAL; | |
bc0fcffd LG |
1320 | if (!skip_display_settings) { |
1321 | ret = smu_display_config_changed(smu); | |
1322 | if (ret) { | |
1323 | pr_err("Failed to change display config!"); | |
1324 | return ret; | |
1325 | } | |
1326 | } | |
1327 | ||
a254bfa2 CG |
1328 | if (!smu->pm_enabled) |
1329 | return -EINVAL; | |
bc0fcffd LG |
1330 | ret = smu_apply_clocks_adjust_rules(smu); |
1331 | if (ret) { | |
1332 | pr_err("Failed to apply clocks adjust rules!"); | |
1333 | return ret; | |
1334 | } | |
1335 | ||
1336 | if (!skip_display_settings) { | |
1337 | ret = smu_notify_smc_dispaly_config(smu); | |
1338 | if (ret) { | |
1339 | pr_err("Failed to notify smc display config!"); | |
1340 | return ret; | |
1341 | } | |
1342 | } | |
1343 | ||
1344 | if (smu_dpm_ctx->dpm_level != level) { | |
1345 | switch (level) { | |
1346 | case AMD_DPM_FORCED_LEVEL_HIGH: | |
1347 | ret = smu_force_dpm_limit_value(smu, true); | |
1348 | break; | |
1349 | case AMD_DPM_FORCED_LEVEL_LOW: | |
1350 | ret = smu_force_dpm_limit_value(smu, false); | |
1351 | break; | |
1352 | ||
1353 | case AMD_DPM_FORCED_LEVEL_AUTO: | |
1354 | ret = smu_unforce_dpm_levels(smu); | |
1355 | break; | |
1356 | ||
1357 | case AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD: | |
1358 | case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK: | |
1359 | case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK: | |
1360 | case AMD_DPM_FORCED_LEVEL_PROFILE_PEAK: | |
1361 | ret = smu_get_profiling_clk_mask(smu, level, | |
1362 | &sclk_mask, | |
1363 | &mclk_mask, | |
1364 | &soc_mask); | |
1365 | if (ret) | |
1366 | return ret; | |
1367 | smu_force_clk_levels(smu, PP_SCLK, 1 << sclk_mask); | |
1368 | smu_force_clk_levels(smu, PP_MCLK, 1 << mclk_mask); | |
1369 | break; | |
1370 | ||
1371 | case AMD_DPM_FORCED_LEVEL_MANUAL: | |
1372 | case AMD_DPM_FORCED_LEVEL_PROFILE_EXIT: | |
1373 | default: | |
1374 | break; | |
1375 | } | |
1376 | ||
1377 | if (!ret) | |
1378 | smu_dpm_ctx->dpm_level = level; | |
1379 | } | |
1380 | ||
1381 | if (smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL) { | |
1382 | index = fls(smu->workload_mask); | |
1383 | index = index > 0 && index <= WORKLOAD_POLICY_MAX ? index - 1 : 0; | |
1384 | workload = smu->workload_setting[index]; | |
1385 | ||
1386 | if (smu->power_profile_mode != workload) | |
1387 | smu_set_power_profile_mode(smu, &workload, 0); | |
1388 | } | |
1389 | ||
1390 | return ret; | |
1391 | } | |
1392 | ||
1393 | int smu_handle_task(struct smu_context *smu, | |
1394 | enum amd_dpm_forced_level level, | |
1395 | enum amd_pp_task task_id) | |
1396 | { | |
1397 | int ret = 0; | |
1398 | ||
1399 | switch (task_id) { | |
1400 | case AMD_PP_TASK_DISPLAY_CONFIG_CHANGE: | |
1401 | ret = smu_pre_display_config_changed(smu); | |
1402 | if (ret) | |
1403 | return ret; | |
1404 | ret = smu_set_cpu_power_state(smu); | |
1405 | if (ret) | |
1406 | return ret; | |
1407 | ret = smu_adjust_power_state_dynamic(smu, level, false); | |
1408 | break; | |
1409 | case AMD_PP_TASK_COMPLETE_INIT: | |
1410 | case AMD_PP_TASK_READJUST_POWER_STATE: | |
1411 | ret = smu_adjust_power_state_dynamic(smu, level, true); | |
1412 | break; | |
1413 | default: | |
1414 | break; | |
1415 | } | |
1416 | ||
1417 | return ret; | |
1418 | } | |
1419 | ||
a38470f0 KW |
1420 | enum amd_dpm_forced_level smu_get_performance_level(struct smu_context *smu) |
1421 | { | |
1422 | struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm); | |
1423 | ||
1424 | if (!smu_dpm_ctx->dpm_context) | |
1425 | return -EINVAL; | |
1426 | ||
1427 | mutex_lock(&(smu->mutex)); | |
1428 | if (smu_dpm_ctx->dpm_level != smu_dpm_ctx->saved_dpm_level) { | |
1429 | smu_dpm_ctx->saved_dpm_level = smu_dpm_ctx->dpm_level; | |
1430 | } | |
1431 | mutex_unlock(&(smu->mutex)); | |
1432 | ||
1433 | return smu_dpm_ctx->dpm_level; | |
1434 | } | |
1435 | ||
1436 | int smu_force_performance_level(struct smu_context *smu, enum amd_dpm_forced_level level) | |
1437 | { | |
1438 | int ret = 0; | |
1439 | int i; | |
1440 | struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm); | |
1441 | ||
1442 | if (!smu_dpm_ctx->dpm_context) | |
1443 | return -EINVAL; | |
1444 | ||
1445 | for (i = 0; i < smu->adev->num_ip_blocks; i++) { | |
1446 | if (smu->adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) | |
1447 | break; | |
1448 | } | |
1449 | ||
a38470f0 KW |
1450 | |
1451 | smu->adev->ip_blocks[i].version->funcs->enable_umd_pstate(smu, &level); | |
1452 | ret = smu_handle_task(smu, level, | |
1453 | AMD_PP_TASK_READJUST_POWER_STATE); | |
6f6a7bba KW |
1454 | if (ret) |
1455 | return ret; | |
a38470f0 | 1456 | |
6f6a7bba KW |
1457 | mutex_lock(&smu->mutex); |
1458 | smu_dpm_ctx->dpm_level = level; | |
a38470f0 KW |
1459 | mutex_unlock(&smu->mutex); |
1460 | ||
1461 | return ret; | |
1462 | } | |
1463 | ||
2e13c755 | 1464 | int smu_set_display_count(struct smu_context *smu, uint32_t count) |
1465 | { | |
1466 | int ret = 0; | |
1467 | ||
1468 | mutex_lock(&smu->mutex); | |
1469 | ret = smu_init_display_count(smu, count); | |
1470 | mutex_unlock(&smu->mutex); | |
1471 | ||
1472 | return ret; | |
1473 | } | |
1474 | ||
137d63ab HR |
1475 | const struct amd_ip_funcs smu_ip_funcs = { |
1476 | .name = "smu", | |
1477 | .early_init = smu_early_init, | |
bee71d26 | 1478 | .late_init = smu_late_init, |
137d63ab HR |
1479 | .sw_init = smu_sw_init, |
1480 | .sw_fini = smu_sw_fini, | |
1481 | .hw_init = smu_hw_init, | |
1482 | .hw_fini = smu_hw_fini, | |
1483 | .suspend = smu_suspend, | |
1484 | .resume = smu_resume, | |
1485 | .is_idle = NULL, | |
1486 | .check_soft_reset = NULL, | |
1487 | .wait_for_idle = NULL, | |
1488 | .soft_reset = NULL, | |
1489 | .set_clockgating_state = smu_set_clockgating_state, | |
1490 | .set_powergating_state = smu_set_powergating_state, | |
49d27e91 | 1491 | .enable_umd_pstate = smu_enable_umd_pstate, |
137d63ab | 1492 | }; |
07845526 HR |
1493 | |
1494 | const struct amdgpu_ip_block_version smu_v11_0_ip_block = | |
1495 | { | |
1496 | .type = AMD_IP_BLOCK_TYPE_SMC, | |
1497 | .major = 11, | |
1498 | .minor = 0, | |
1499 | .rev = 0, | |
1500 | .funcs = &smu_ip_funcs, | |
1501 | }; |