Commit | Line | Data |
---|---|---|
137d63ab HR |
1 | /* |
2 | * Copyright 2019 Advanced Micro Devices, Inc. | |
3 | * | |
4 | * Permission is hereby granted, free of charge, to any person obtaining a | |
5 | * copy of this software and associated documentation files (the "Software"), | |
6 | * to deal in the Software without restriction, including without limitation | |
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | |
8 | * and/or sell copies of the Software, and to permit persons to whom the | |
9 | * Software is furnished to do so, subject to the following conditions: | |
10 | * | |
11 | * The above copyright notice and this permission notice shall be included in | |
12 | * all copies or substantial portions of the Software. | |
13 | * | |
14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |
15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |
16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | |
17 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR | |
18 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | |
19 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | |
20 | * OTHER DEALINGS IN THE SOFTWARE. | |
21 | */ | |
22 | ||
23 | #include "pp_debug.h" | |
24 | #include <linux/firmware.h> | |
25 | #include <drm/drmP.h> | |
26 | #include "amdgpu.h" | |
27 | #include "amdgpu_smu.h" | |
28 | #include "soc15_common.h" | |
07845526 | 29 | #include "smu_v11_0.h" |
e15da5a4 | 30 | #include "atom.h" |
24e141e1 | 31 | #include "amd_pcie.h" |
137d63ab | 32 | |
4fde03a7 KW |
33 | int smu_get_smc_version(struct smu_context *smu, uint32_t *if_version, uint32_t *smu_version) |
34 | { | |
35 | int ret = 0; | |
36 | ||
37 | if (!if_version && !smu_version) | |
38 | return -EINVAL; | |
39 | ||
40 | if (if_version) { | |
41 | ret = smu_send_smc_msg(smu, SMU_MSG_GetDriverIfVersion); | |
42 | if (ret) | |
43 | return ret; | |
44 | ||
45 | ret = smu_read_smc_arg(smu, if_version); | |
46 | if (ret) | |
47 | return ret; | |
48 | } | |
49 | ||
50 | if (smu_version) { | |
51 | ret = smu_send_smc_msg(smu, SMU_MSG_GetSmuVersion); | |
52 | if (ret) | |
53 | return ret; | |
54 | ||
55 | ret = smu_read_smc_arg(smu, smu_version); | |
56 | if (ret) | |
57 | return ret; | |
58 | } | |
59 | ||
60 | return ret; | |
61 | } | |
62 | ||
72e91f37 KW |
63 | int smu_dpm_set_power_gate(struct smu_context *smu, uint32_t block_type, |
64 | bool gate) | |
65 | { | |
66 | int ret = 0; | |
67 | ||
68 | switch (block_type) { | |
69 | case AMD_IP_BLOCK_TYPE_UVD: | |
70 | ret = smu_dpm_set_uvd_enable(smu, gate); | |
71 | break; | |
72 | case AMD_IP_BLOCK_TYPE_VCE: | |
73 | ret = smu_dpm_set_vce_enable(smu, gate); | |
74 | break; | |
75 | default: | |
76 | break; | |
77 | } | |
78 | ||
79 | return ret; | |
80 | } | |
81 | ||
ea2d0bf8 KW |
82 | enum amd_pm_state_type smu_get_current_power_state(struct smu_context *smu) |
83 | { | |
84 | /* not support power state */ | |
85 | return POWER_STATE_TYPE_DEFAULT; | |
86 | } | |
87 | ||
09895323 KW |
88 | int smu_get_power_num_states(struct smu_context *smu, |
89 | struct pp_states_info *state_info) | |
90 | { | |
91 | if (!state_info) | |
92 | return -EINVAL; | |
93 | ||
94 | /* not support power state */ | |
95 | memset(state_info, 0, sizeof(struct pp_states_info)); | |
96 | state_info->nums = 0; | |
97 | ||
98 | return 0; | |
99 | } | |
100 | ||
143c75d6 KW |
101 | int smu_common_read_sensor(struct smu_context *smu, enum amd_pp_sensors sensor, |
102 | void *data, uint32_t *size) | |
103 | { | |
104 | int ret = 0; | |
105 | ||
106 | switch (sensor) { | |
46814f51 CG |
107 | case AMDGPU_PP_SENSOR_STABLE_PSTATE_SCLK: |
108 | *((uint32_t *)data) = smu->pstate_sclk; | |
109 | *size = 4; | |
110 | break; | |
111 | case AMDGPU_PP_SENSOR_STABLE_PSTATE_MCLK: | |
112 | *((uint32_t *)data) = smu->pstate_mclk; | |
113 | *size = 4; | |
114 | break; | |
143c75d6 KW |
115 | case AMDGPU_PP_SENSOR_ENABLED_SMC_FEATURES_MASK: |
116 | ret = smu_feature_get_enabled_mask(smu, (uint32_t *)data, 2); | |
117 | *size = 8; | |
118 | break; | |
119 | default: | |
120 | ret = -EINVAL; | |
121 | break; | |
122 | } | |
123 | ||
124 | if (ret) | |
125 | *size = 0; | |
126 | ||
127 | return ret; | |
128 | } | |
129 | ||
4825d8d6 | 130 | int smu_update_table_with_arg(struct smu_context *smu, uint16_t table_id, uint16_t exarg, |
dbe6a970 KW |
131 | void *table_data, bool drv2smu) |
132 | { | |
133 | struct smu_table_context *smu_table = &smu->smu_table; | |
134 | struct smu_table *table = NULL; | |
135 | int ret = 0; | |
4825d8d6 | 136 | uint32_t table_index; |
dbe6a970 KW |
137 | |
138 | if (!table_data || table_id >= smu_table->table_count) | |
139 | return -EINVAL; | |
140 | ||
4825d8d6 KW |
141 | table_index = (exarg << 16) | table_id; |
142 | ||
dbe6a970 KW |
143 | table = &smu_table->tables[table_id]; |
144 | ||
145 | if (drv2smu) | |
146 | memcpy(table->cpu_addr, table_data, table->size); | |
147 | ||
148 | ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetDriverDramAddrHigh, | |
149 | upper_32_bits(table->mc_address)); | |
150 | if (ret) | |
151 | return ret; | |
152 | ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetDriverDramAddrLow, | |
153 | lower_32_bits(table->mc_address)); | |
154 | if (ret) | |
155 | return ret; | |
156 | ret = smu_send_smc_msg_with_param(smu, drv2smu ? | |
157 | SMU_MSG_TransferTableDram2Smu : | |
158 | SMU_MSG_TransferTableSmu2Dram, | |
4825d8d6 | 159 | table_index); |
dbe6a970 KW |
160 | if (ret) |
161 | return ret; | |
162 | ||
163 | if (!drv2smu) | |
164 | memcpy(table_data, table->cpu_addr, table->size); | |
165 | ||
166 | return ret; | |
167 | } | |
168 | ||
dc8e3a0c KW |
169 | bool is_support_sw_smu(struct amdgpu_device *adev) |
170 | { | |
171 | if (amdgpu_dpm != 1) | |
172 | return false; | |
173 | ||
dff234d2 | 174 | if (adev->asic_type >= CHIP_VEGA20 && adev->asic_type != CHIP_RAVEN) |
dc8e3a0c KW |
175 | return true; |
176 | ||
177 | return false; | |
178 | } | |
179 | ||
289921b0 KW |
180 | int smu_sys_get_pp_table(struct smu_context *smu, void **table) |
181 | { | |
182 | struct smu_table_context *smu_table = &smu->smu_table; | |
183 | ||
184 | if (!smu_table->power_play_table && !smu_table->hardcode_pptable) | |
185 | return -EINVAL; | |
186 | ||
187 | if (smu_table->hardcode_pptable) | |
188 | *table = smu_table->hardcode_pptable; | |
189 | else | |
190 | *table = smu_table->power_play_table; | |
191 | ||
192 | return smu_table->power_play_table_size; | |
193 | } | |
194 | ||
195 | int smu_sys_set_pp_table(struct smu_context *smu, void *buf, size_t size) | |
196 | { | |
197 | struct smu_table_context *smu_table = &smu->smu_table; | |
198 | ATOM_COMMON_TABLE_HEADER *header = (ATOM_COMMON_TABLE_HEADER *)buf; | |
199 | int ret = 0; | |
200 | ||
a254bfa2 CG |
201 | if (!smu->pm_enabled) |
202 | return -EINVAL; | |
289921b0 KW |
203 | if (header->usStructureSize != size) { |
204 | pr_err("pp table size not matched !\n"); | |
205 | return -EIO; | |
206 | } | |
207 | ||
208 | mutex_lock(&smu->mutex); | |
209 | if (!smu_table->hardcode_pptable) | |
210 | smu_table->hardcode_pptable = kzalloc(size, GFP_KERNEL); | |
211 | if (!smu_table->hardcode_pptable) { | |
212 | ret = -ENOMEM; | |
213 | goto failed; | |
214 | } | |
215 | ||
216 | memcpy(smu_table->hardcode_pptable, buf, size); | |
217 | smu_table->power_play_table = smu_table->hardcode_pptable; | |
218 | smu_table->power_play_table_size = size; | |
219 | mutex_unlock(&smu->mutex); | |
220 | ||
221 | ret = smu_reset(smu); | |
222 | if (ret) | |
223 | pr_info("smu reset failed, ret = %d\n", ret); | |
224 | ||
6c851417 DC |
225 | return ret; |
226 | ||
289921b0 KW |
227 | failed: |
228 | mutex_unlock(&smu->mutex); | |
229 | return ret; | |
230 | } | |
231 | ||
6b816d73 KW |
232 | int smu_feature_init_dpm(struct smu_context *smu) |
233 | { | |
234 | struct smu_feature *feature = &smu->smu_feature; | |
235 | int ret = 0; | |
236 | uint32_t unallowed_feature_mask[SMU_FEATURE_MAX/32]; | |
237 | ||
a254bfa2 CG |
238 | if (!smu->pm_enabled) |
239 | return ret; | |
f14a323d | 240 | mutex_lock(&feature->mutex); |
6b816d73 | 241 | bitmap_fill(feature->allowed, SMU_FEATURE_MAX); |
f14a323d | 242 | mutex_unlock(&feature->mutex); |
6b816d73 KW |
243 | |
244 | ret = smu_get_unallowed_feature_mask(smu, unallowed_feature_mask, | |
245 | SMU_FEATURE_MAX/32); | |
246 | if (ret) | |
247 | return ret; | |
248 | ||
f14a323d | 249 | mutex_lock(&feature->mutex); |
6b816d73 KW |
250 | bitmap_andnot(feature->allowed, feature->allowed, |
251 | (unsigned long *)unallowed_feature_mask, | |
252 | feature->feature_num); | |
f14a323d | 253 | mutex_unlock(&feature->mutex); |
6b816d73 KW |
254 | |
255 | return ret; | |
256 | } | |
257 | ||
2f25158d KW |
258 | int smu_feature_is_enabled(struct smu_context *smu, int feature_id) |
259 | { | |
260 | struct smu_feature *feature = &smu->smu_feature; | |
f14a323d KW |
261 | int ret = 0; |
262 | ||
2f25158d | 263 | WARN_ON(feature_id > feature->feature_num); |
f14a323d KW |
264 | |
265 | mutex_lock(&feature->mutex); | |
266 | ret = test_bit(feature_id, feature->enabled); | |
267 | mutex_unlock(&feature->mutex); | |
268 | ||
269 | return ret; | |
2f25158d KW |
270 | } |
271 | ||
272 | int smu_feature_set_enabled(struct smu_context *smu, int feature_id, bool enable) | |
273 | { | |
274 | struct smu_feature *feature = &smu->smu_feature; | |
f14a323d KW |
275 | int ret = 0; |
276 | ||
2f25158d | 277 | WARN_ON(feature_id > feature->feature_num); |
f14a323d KW |
278 | |
279 | mutex_lock(&feature->mutex); | |
280 | ret = smu_feature_update_enable_state(smu, feature_id, enable); | |
281 | if (ret) | |
282 | goto failed; | |
283 | ||
2f25158d KW |
284 | if (enable) |
285 | test_and_set_bit(feature_id, feature->enabled); | |
286 | else | |
287 | test_and_clear_bit(feature_id, feature->enabled); | |
f14a323d KW |
288 | |
289 | failed: | |
290 | mutex_unlock(&feature->mutex); | |
291 | ||
292 | return ret; | |
2f25158d KW |
293 | } |
294 | ||
295 | int smu_feature_is_supported(struct smu_context *smu, int feature_id) | |
296 | { | |
297 | struct smu_feature *feature = &smu->smu_feature; | |
f14a323d KW |
298 | int ret = 0; |
299 | ||
2f25158d | 300 | WARN_ON(feature_id > feature->feature_num); |
f14a323d KW |
301 | |
302 | mutex_lock(&feature->mutex); | |
303 | ret = test_bit(feature_id, feature->supported); | |
304 | mutex_unlock(&feature->mutex); | |
305 | ||
306 | return ret; | |
2f25158d KW |
307 | } |
308 | ||
309 | int smu_feature_set_supported(struct smu_context *smu, int feature_id, | |
310 | bool enable) | |
311 | { | |
312 | struct smu_feature *feature = &smu->smu_feature; | |
f14a323d KW |
313 | int ret = 0; |
314 | ||
2f25158d | 315 | WARN_ON(feature_id > feature->feature_num); |
f14a323d | 316 | |
029f4153 | 317 | mutex_lock(&feature->mutex); |
2f25158d KW |
318 | if (enable) |
319 | test_and_set_bit(feature_id, feature->supported); | |
320 | else | |
321 | test_and_clear_bit(feature_id, feature->supported); | |
f14a323d KW |
322 | mutex_unlock(&feature->mutex); |
323 | ||
324 | return ret; | |
2f25158d KW |
325 | } |
326 | ||
137d63ab HR |
327 | static int smu_set_funcs(struct amdgpu_device *adev) |
328 | { | |
07845526 HR |
329 | struct smu_context *smu = &adev->smu; |
330 | ||
331 | switch (adev->asic_type) { | |
332 | case CHIP_VEGA20: | |
2573e870 | 333 | case CHIP_NAVI10: |
3b94fb10 LG |
334 | adev->pm.pp_feature &= ~PP_GFXOFF_MASK; |
335 | if (adev->pm.pp_feature & PP_OVERDRIVE_MASK) | |
336 | smu->od_enabled = true; | |
07845526 HR |
337 | smu_v11_0_set_smu_funcs(smu); |
338 | break; | |
339 | default: | |
340 | return -EINVAL; | |
341 | } | |
342 | ||
137d63ab HR |
343 | return 0; |
344 | } | |
345 | ||
346 | static int smu_early_init(void *handle) | |
347 | { | |
348 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; | |
349 | struct smu_context *smu = &adev->smu; | |
137d63ab HR |
350 | |
351 | smu->adev = adev; | |
a7517677 | 352 | smu->pm_enabled = !!amdgpu_dpm; |
137d63ab HR |
353 | mutex_init(&smu->mutex); |
354 | ||
74e07f9d | 355 | return smu_set_funcs(adev); |
137d63ab HR |
356 | } |
357 | ||
bee71d26 CG |
358 | static int smu_late_init(void *handle) |
359 | { | |
360 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; | |
361 | struct smu_context *smu = &adev->smu; | |
a254bfa2 CG |
362 | |
363 | if (!smu->pm_enabled) | |
364 | return 0; | |
bee71d26 CG |
365 | mutex_lock(&smu->mutex); |
366 | smu_handle_task(&adev->smu, | |
367 | smu->smu_dpm.dpm_level, | |
368 | AMD_PP_TASK_COMPLETE_INIT); | |
369 | mutex_unlock(&smu->mutex); | |
370 | ||
371 | return 0; | |
372 | } | |
373 | ||
e15da5a4 HR |
374 | int smu_get_atom_data_table(struct smu_context *smu, uint32_t table, |
375 | uint16_t *size, uint8_t *frev, uint8_t *crev, | |
376 | uint8_t **addr) | |
377 | { | |
378 | struct amdgpu_device *adev = smu->adev; | |
379 | uint16_t data_start; | |
380 | ||
381 | if (!amdgpu_atom_parse_data_header(adev->mode_info.atom_context, table, | |
382 | size, frev, crev, &data_start)) | |
383 | return -EINVAL; | |
384 | ||
385 | *addr = (uint8_t *)adev->mode_info.atom_context->bios + data_start; | |
386 | ||
387 | return 0; | |
388 | } | |
389 | ||
b5624000 HR |
390 | static int smu_initialize_pptable(struct smu_context *smu) |
391 | { | |
392 | /* TODO */ | |
393 | return 0; | |
394 | } | |
395 | ||
396 | static int smu_smc_table_sw_init(struct smu_context *smu) | |
397 | { | |
398 | int ret; | |
399 | ||
400 | ret = smu_initialize_pptable(smu); | |
401 | if (ret) { | |
402 | pr_err("Failed to init smu_initialize_pptable!\n"); | |
403 | return ret; | |
404 | } | |
405 | ||
cabd44c0 HR |
406 | /** |
407 | * Create smu_table structure, and init smc tables such as | |
408 | * TABLE_PPTABLE, TABLE_WATERMARKS, TABLE_SMU_METRICS, and etc. | |
409 | */ | |
410 | ret = smu_init_smc_tables(smu); | |
411 | if (ret) { | |
412 | pr_err("Failed to init smc tables!\n"); | |
413 | return ret; | |
414 | } | |
415 | ||
17e6081b HR |
416 | /** |
417 | * Create smu_power_context structure, and allocate smu_dpm_context and | |
418 | * context size to fill the smu_power_context data. | |
419 | */ | |
420 | ret = smu_init_power(smu); | |
421 | if (ret) { | |
422 | pr_err("Failed to init smu_init_power!\n"); | |
423 | return ret; | |
424 | } | |
425 | ||
b5624000 HR |
426 | return 0; |
427 | } | |
428 | ||
813ce279 KW |
429 | static int smu_smc_table_sw_fini(struct smu_context *smu) |
430 | { | |
431 | int ret; | |
432 | ||
433 | ret = smu_fini_smc_tables(smu); | |
434 | if (ret) { | |
435 | pr_err("Failed to smu_fini_smc_tables!\n"); | |
436 | return ret; | |
437 | } | |
438 | ||
439 | return 0; | |
440 | } | |
441 | ||
137d63ab HR |
442 | static int smu_sw_init(void *handle) |
443 | { | |
444 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; | |
445 | struct smu_context *smu = &adev->smu; | |
446 | int ret; | |
447 | ||
0b51d993 | 448 | smu->pool_size = adev->pm.smu_prv_buffer_size; |
6b816d73 | 449 | smu->smu_feature.feature_num = SMU_FEATURE_MAX; |
f14a323d | 450 | mutex_init(&smu->smu_feature.mutex); |
6b816d73 KW |
451 | bitmap_zero(smu->smu_feature.supported, SMU_FEATURE_MAX); |
452 | bitmap_zero(smu->smu_feature.enabled, SMU_FEATURE_MAX); | |
453 | bitmap_zero(smu->smu_feature.allowed, SMU_FEATURE_MAX); | |
2e069391 | 454 | smu->watermarks_bitmap = 0; |
16177fd0 CG |
455 | smu->power_profile_mode = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT; |
456 | smu->default_power_profile_mode = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT; | |
457 | ||
458 | smu->workload_mask = 1 << smu->workload_prority[PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT]; | |
459 | smu->workload_prority[PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT] = 0; | |
460 | smu->workload_prority[PP_SMC_POWER_PROFILE_FULLSCREEN3D] = 1; | |
461 | smu->workload_prority[PP_SMC_POWER_PROFILE_POWERSAVING] = 2; | |
462 | smu->workload_prority[PP_SMC_POWER_PROFILE_VIDEO] = 3; | |
463 | smu->workload_prority[PP_SMC_POWER_PROFILE_VR] = 4; | |
464 | smu->workload_prority[PP_SMC_POWER_PROFILE_COMPUTE] = 5; | |
465 | smu->workload_prority[PP_SMC_POWER_PROFILE_CUSTOM] = 6; | |
466 | ||
467 | smu->workload_setting[0] = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT; | |
468 | smu->workload_setting[1] = PP_SMC_POWER_PROFILE_FULLSCREEN3D; | |
469 | smu->workload_setting[2] = PP_SMC_POWER_PROFILE_POWERSAVING; | |
470 | smu->workload_setting[3] = PP_SMC_POWER_PROFILE_VIDEO; | |
471 | smu->workload_setting[4] = PP_SMC_POWER_PROFILE_VR; | |
472 | smu->workload_setting[5] = PP_SMC_POWER_PROFILE_COMPUTE; | |
473 | smu->workload_setting[6] = PP_SMC_POWER_PROFILE_CUSTOM; | |
379a4454 | 474 | smu->display_config = &adev->pm.pm_display_cfg; |
0b51d993 | 475 | |
9a431038 CG |
476 | smu->smu_dpm.dpm_level = AMD_DPM_FORCED_LEVEL_AUTO; |
477 | smu->smu_dpm.requested_dpm_level = AMD_DPM_FORCED_LEVEL_AUTO; | |
137d63ab HR |
478 | ret = smu_init_microcode(smu); |
479 | if (ret) { | |
480 | pr_err("Failed to load smu firmware!\n"); | |
481 | return ret; | |
482 | } | |
483 | ||
b5624000 HR |
484 | ret = smu_smc_table_sw_init(smu); |
485 | if (ret) { | |
486 | pr_err("Failed to sw init smc table!\n"); | |
487 | return ret; | |
488 | } | |
489 | ||
137d63ab HR |
490 | return 0; |
491 | } | |
492 | ||
493 | static int smu_sw_fini(void *handle) | |
494 | { | |
495 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; | |
813ce279 KW |
496 | struct smu_context *smu = &adev->smu; |
497 | int ret; | |
137d63ab | 498 | |
813ce279 KW |
499 | ret = smu_smc_table_sw_fini(smu); |
500 | if (ret) { | |
501 | pr_err("Failed to sw fini smc table!\n"); | |
502 | return ret; | |
503 | } | |
504 | ||
8bf16963 KW |
505 | ret = smu_fini_power(smu); |
506 | if (ret) { | |
507 | pr_err("Failed to init smu_fini_power!\n"); | |
508 | return ret; | |
509 | } | |
510 | ||
137d63ab HR |
511 | return 0; |
512 | } | |
513 | ||
9c9a1747 HR |
514 | static int smu_init_fb_allocations(struct smu_context *smu) |
515 | { | |
f96357a9 KW |
516 | struct amdgpu_device *adev = smu->adev; |
517 | struct smu_table_context *smu_table = &smu->smu_table; | |
518 | struct smu_table *tables = smu_table->tables; | |
519 | uint32_t table_count = smu_table->table_count; | |
520 | uint32_t i = 0; | |
521 | int32_t ret = 0; | |
522 | ||
523 | if (table_count <= 0) | |
524 | return -EINVAL; | |
525 | ||
526 | for (i = 0 ; i < table_count; i++) { | |
527 | if (tables[i].size == 0) | |
528 | continue; | |
529 | ret = amdgpu_bo_create_kernel(adev, | |
530 | tables[i].size, | |
531 | tables[i].align, | |
532 | tables[i].domain, | |
533 | &tables[i].bo, | |
534 | &tables[i].mc_address, | |
535 | &tables[i].cpu_addr); | |
536 | if (ret) | |
537 | goto failed; | |
538 | } | |
539 | ||
9c9a1747 | 540 | return 0; |
f96357a9 KW |
541 | failed: |
542 | for (; i > 0; i--) { | |
543 | if (tables[i].size == 0) | |
544 | continue; | |
545 | amdgpu_bo_free_kernel(&tables[i].bo, | |
546 | &tables[i].mc_address, | |
547 | &tables[i].cpu_addr); | |
548 | ||
549 | } | |
550 | return ret; | |
9c9a1747 HR |
551 | } |
552 | ||
f96357a9 KW |
553 | static int smu_fini_fb_allocations(struct smu_context *smu) |
554 | { | |
555 | struct smu_table_context *smu_table = &smu->smu_table; | |
556 | struct smu_table *tables = smu_table->tables; | |
557 | uint32_t table_count = smu_table->table_count; | |
558 | uint32_t i = 0; | |
559 | ||
560 | if (table_count == 0 || tables == NULL) | |
289921b0 | 561 | return 0; |
f96357a9 KW |
562 | |
563 | for (i = 0 ; i < table_count; i++) { | |
564 | if (tables[i].size == 0) | |
565 | continue; | |
566 | amdgpu_bo_free_kernel(&tables[i].bo, | |
567 | &tables[i].mc_address, | |
568 | &tables[i].cpu_addr); | |
569 | } | |
570 | ||
571 | return 0; | |
572 | } | |
f6a6b952 | 573 | |
24e141e1 LG |
574 | static int smu_override_pcie_parameters(struct smu_context *smu) |
575 | { | |
576 | struct amdgpu_device *adev = smu->adev; | |
577 | uint32_t pcie_gen = 0, pcie_width = 0, smu_pcie_arg; | |
578 | int ret; | |
579 | ||
580 | if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN4) | |
581 | pcie_gen = 3; | |
582 | else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3) | |
583 | pcie_gen = 2; | |
584 | else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2) | |
585 | pcie_gen = 1; | |
586 | else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1) | |
587 | pcie_gen = 0; | |
588 | ||
589 | /* Bit 31:16: LCLK DPM level. 0 is DPM0, and 1 is DPM1 | |
590 | * Bit 15:8: PCIE GEN, 0 to 3 corresponds to GEN1 to GEN4 | |
591 | * Bit 7:0: PCIE lane width, 1 to 7 corresponds is x1 to x32 | |
592 | */ | |
593 | if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X16) | |
594 | pcie_width = 6; | |
595 | else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X12) | |
596 | pcie_width = 5; | |
597 | else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X8) | |
598 | pcie_width = 4; | |
599 | else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X4) | |
600 | pcie_width = 3; | |
601 | else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X2) | |
602 | pcie_width = 2; | |
603 | else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X1) | |
604 | pcie_width = 1; | |
605 | ||
606 | smu_pcie_arg = (1 << 16) | (pcie_gen << 8) | pcie_width; | |
607 | ret = smu_send_smc_msg_with_param(smu, | |
608 | SMU_MSG_OverridePcieParameters, | |
609 | smu_pcie_arg); | |
610 | if (ret) | |
611 | pr_err("[%s] Attempt to override pcie params failed!\n", __func__); | |
612 | return ret; | |
613 | } | |
614 | ||
4733cc72 LG |
615 | static int smu_smc_table_hw_init(struct smu_context *smu, |
616 | bool initialize) | |
05cadcd3 | 617 | { |
f067499b | 618 | struct amdgpu_device *adev = smu->adev; |
05cadcd3 HR |
619 | int ret; |
620 | ||
f067499b LG |
621 | if (smu_is_dpm_running(smu) && adev->in_suspend) { |
622 | pr_info("dpm has been enabled\n"); | |
623 | return 0; | |
624 | } | |
625 | ||
56c53ad6 KW |
626 | ret = smu_init_display(smu); |
627 | if (ret) | |
628 | return ret; | |
629 | ||
4733cc72 | 630 | if (initialize) { |
4009b9b5 HR |
631 | /* get boot_values from vbios to set revision, gfxclk, and etc. */ |
632 | ret = smu_get_vbios_bootup_values(smu); | |
4733cc72 LG |
633 | if (ret) |
634 | return ret; | |
05cadcd3 | 635 | |
4009b9b5 | 636 | ret = smu_read_pptable_from_vbios(smu); |
4733cc72 LG |
637 | if (ret) |
638 | return ret; | |
a6b35900 | 639 | |
4733cc72 LG |
640 | ret = smu_get_clk_info_from_vbios(smu); |
641 | if (ret) | |
642 | return ret; | |
08115f87 | 643 | |
4733cc72 LG |
644 | /* |
645 | * check if the format_revision in vbios is up to pptable header | |
646 | * version, and the structure size is not 0. | |
647 | */ | |
4733cc72 LG |
648 | ret = smu_check_pptable(smu); |
649 | if (ret) | |
650 | return ret; | |
46126e6d | 651 | |
4733cc72 LG |
652 | /* |
653 | * allocate vram bos to store smc table contents. | |
654 | */ | |
655 | ret = smu_init_fb_allocations(smu); | |
656 | if (ret) | |
657 | return ret; | |
9c9a1747 | 658 | |
4733cc72 LG |
659 | /* |
660 | * Parse pptable format and fill PPTable_t smc_pptable to | |
661 | * smu_table_context structure. And read the smc_dpm_table from vbios, | |
662 | * then fill it into smc_pptable. | |
663 | */ | |
664 | ret = smu_parse_pptable(smu); | |
665 | if (ret) | |
666 | return ret; | |
9e4848a4 | 667 | |
4733cc72 LG |
668 | /* |
669 | * Send msg GetDriverIfVersion to check if the return value is equal | |
670 | * with DRIVER_IF_VERSION of smc header. | |
671 | */ | |
672 | ret = smu_check_fw_version(smu); | |
673 | if (ret) | |
674 | return ret; | |
675 | } | |
a751b095 | 676 | |
31b5ae49 HR |
677 | /* |
678 | * Copy pptable bo in the vram to smc with SMU MSGs such as | |
679 | * SetDriverDramAddr and TransferTableDram2Smu. | |
680 | */ | |
681 | ret = smu_write_pptable(smu); | |
682 | if (ret) | |
683 | return ret; | |
684 | ||
f6a6b952 KW |
685 | /* issue RunAfllBtc msg */ |
686 | ret = smu_run_afll_btc(smu); | |
687 | if (ret) | |
688 | return ret; | |
689 | ||
d4631cba HR |
690 | ret = smu_feature_set_allowed_mask(smu); |
691 | if (ret) | |
692 | return ret; | |
693 | ||
f067499b | 694 | ret = smu_system_features_control(smu, true); |
6b816d73 KW |
695 | if (ret) |
696 | return ret; | |
697 | ||
24e141e1 LG |
698 | ret = smu_override_pcie_parameters(smu); |
699 | if (ret) | |
700 | return ret; | |
701 | ||
e1c6f86a KW |
702 | ret = smu_notify_display_change(smu); |
703 | if (ret) | |
704 | return ret; | |
705 | ||
a7ebb6d2 HR |
706 | /* |
707 | * Set min deep sleep dce fclk with bootup value from vbios via | |
708 | * SetMinDeepSleepDcefclk MSG. | |
709 | */ | |
710 | ret = smu_set_min_dcef_deep_sleep(smu); | |
711 | if (ret) | |
712 | return ret; | |
713 | ||
d6a4aa82 LG |
714 | /* |
715 | * Set initialized values (get from vbios) to dpm tables context such as | |
716 | * gfxclk, memclk, dcefclk, and etc. And enable the DPM feature for each | |
717 | * type of clks. | |
718 | */ | |
4733cc72 LG |
719 | if (initialize) { |
720 | ret = smu_populate_smc_pptable(smu); | |
721 | if (ret) | |
722 | return ret; | |
d6a4aa82 | 723 | |
4733cc72 LG |
724 | ret = smu_init_max_sustainable_clocks(smu); |
725 | if (ret) | |
726 | return ret; | |
727 | } | |
7457cf02 | 728 | |
4733cc72 | 729 | ret = smu_set_od8_default_settings(smu, initialize); |
2c80abe3 LG |
730 | if (ret) |
731 | return ret; | |
732 | ||
4733cc72 LG |
733 | if (initialize) { |
734 | ret = smu_populate_umd_state_clk(smu); | |
735 | if (ret) | |
736 | return ret; | |
133438fa | 737 | |
4733cc72 LG |
738 | ret = smu_get_power_limit(smu, &smu->default_power_limit, false); |
739 | if (ret) | |
740 | return ret; | |
741 | } | |
e66adb1e | 742 | |
206bc589 HR |
743 | /* |
744 | * Set PMSTATUSLOG table bo address with SetToolsDramAddr MSG for tools. | |
745 | */ | |
746 | ret = smu_set_tool_table_location(smu); | |
747 | ||
a254bfa2 CG |
748 | if (!smu_is_dpm_running(smu)) |
749 | pr_info("dpm has been disabled\n"); | |
750 | ||
206bc589 | 751 | return ret; |
05cadcd3 HR |
752 | } |
753 | ||
e65d45f2 HR |
754 | /** |
755 | * smu_alloc_memory_pool - allocate memory pool in the system memory | |
756 | * | |
757 | * @smu: amdgpu_device pointer | |
758 | * | |
759 | * This memory pool will be used for SMC use and msg SetSystemVirtualDramAddr | |
760 | * and DramLogSetDramAddr can notify it changed. | |
761 | * | |
762 | * Returns 0 on success, error on failure. | |
763 | */ | |
764 | static int smu_alloc_memory_pool(struct smu_context *smu) | |
765 | { | |
0b51d993 KW |
766 | struct amdgpu_device *adev = smu->adev; |
767 | struct smu_table_context *smu_table = &smu->smu_table; | |
768 | struct smu_table *memory_pool = &smu_table->memory_pool; | |
769 | uint64_t pool_size = smu->pool_size; | |
770 | int ret = 0; | |
771 | ||
772 | if (pool_size == SMU_MEMORY_POOL_SIZE_ZERO) | |
773 | return ret; | |
774 | ||
775 | memory_pool->size = pool_size; | |
776 | memory_pool->align = PAGE_SIZE; | |
777 | memory_pool->domain = AMDGPU_GEM_DOMAIN_GTT; | |
778 | ||
779 | switch (pool_size) { | |
780 | case SMU_MEMORY_POOL_SIZE_256_MB: | |
781 | case SMU_MEMORY_POOL_SIZE_512_MB: | |
782 | case SMU_MEMORY_POOL_SIZE_1_GB: | |
783 | case SMU_MEMORY_POOL_SIZE_2_GB: | |
784 | ret = amdgpu_bo_create_kernel(adev, | |
785 | memory_pool->size, | |
786 | memory_pool->align, | |
787 | memory_pool->domain, | |
788 | &memory_pool->bo, | |
789 | &memory_pool->mc_address, | |
790 | &memory_pool->cpu_addr); | |
791 | break; | |
792 | default: | |
793 | break; | |
794 | } | |
795 | ||
796 | return ret; | |
e65d45f2 HR |
797 | } |
798 | ||
0b51d993 KW |
799 | static int smu_free_memory_pool(struct smu_context *smu) |
800 | { | |
801 | struct smu_table_context *smu_table = &smu->smu_table; | |
802 | struct smu_table *memory_pool = &smu_table->memory_pool; | |
803 | int ret = 0; | |
804 | ||
805 | if (memory_pool->size == SMU_MEMORY_POOL_SIZE_ZERO) | |
806 | return ret; | |
807 | ||
808 | amdgpu_bo_free_kernel(&memory_pool->bo, | |
809 | &memory_pool->mc_address, | |
810 | &memory_pool->cpu_addr); | |
811 | ||
812 | memset(memory_pool, 0, sizeof(struct smu_table)); | |
813 | ||
814 | return ret; | |
815 | } | |
4733cc72 | 816 | |
137d63ab HR |
817 | static int smu_hw_init(void *handle) |
818 | { | |
819 | int ret; | |
820 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; | |
821 | struct smu_context *smu = &adev->smu; | |
822 | ||
3d2f5200 HR |
823 | if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) { |
824 | ret = smu_load_microcode(smu); | |
825 | if (ret) | |
826 | return ret; | |
827 | } | |
828 | ||
e11c4fd5 HR |
829 | ret = smu_check_fw_status(smu); |
830 | if (ret) { | |
831 | pr_err("SMC firmware status is not correct\n"); | |
832 | return ret; | |
833 | } | |
834 | ||
137d63ab HR |
835 | mutex_lock(&smu->mutex); |
836 | ||
6b816d73 KW |
837 | ret = smu_feature_init_dpm(smu); |
838 | if (ret) | |
839 | goto failed; | |
840 | ||
4733cc72 | 841 | ret = smu_smc_table_hw_init(smu, true); |
05cadcd3 HR |
842 | if (ret) |
843 | goto failed; | |
137d63ab | 844 | |
e65d45f2 HR |
845 | ret = smu_alloc_memory_pool(smu); |
846 | if (ret) | |
847 | goto failed; | |
848 | ||
c56de9e8 HR |
849 | /* |
850 | * Use msg SetSystemVirtualDramAddr and DramLogSetDramAddr can notify | |
851 | * pool location. | |
852 | */ | |
853 | ret = smu_notify_memory_pool_location(smu); | |
854 | if (ret) | |
855 | goto failed; | |
856 | ||
74ba3553 LG |
857 | ret = smu_start_thermal_control(smu); |
858 | if (ret) | |
859 | goto failed; | |
860 | ||
137d63ab HR |
861 | mutex_unlock(&smu->mutex); |
862 | ||
a254bfa2 CG |
863 | if (!smu->pm_enabled) |
864 | adev->pm.dpm_enabled = false; | |
865 | else | |
866 | adev->pm.dpm_enabled = true; | |
a317cf03 | 867 | |
137d63ab HR |
868 | pr_info("SMU is initialized successfully!\n"); |
869 | ||
870 | return 0; | |
05cadcd3 HR |
871 | |
872 | failed: | |
873 | mutex_unlock(&smu->mutex); | |
874 | return ret; | |
137d63ab HR |
875 | } |
876 | ||
877 | static int smu_hw_fini(void *handle) | |
878 | { | |
879 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; | |
880 | struct smu_context *smu = &adev->smu; | |
afba8282 | 881 | struct smu_table_context *table_context = &smu->smu_table; |
f96357a9 | 882 | int ret = 0; |
137d63ab | 883 | |
6316f51c HR |
884 | kfree(table_context->driver_pptable); |
885 | table_context->driver_pptable = NULL; | |
afba8282 | 886 | |
6316f51c HR |
887 | kfree(table_context->max_sustainable_clocks); |
888 | table_context->max_sustainable_clocks = NULL; | |
7457cf02 | 889 | |
6316f51c HR |
890 | kfree(table_context->od_feature_capabilities); |
891 | table_context->od_feature_capabilities = NULL; | |
b55ca3bd | 892 | |
6316f51c HR |
893 | kfree(table_context->od_settings_max); |
894 | table_context->od_settings_max = NULL; | |
b55ca3bd | 895 | |
6316f51c HR |
896 | kfree(table_context->od_settings_min); |
897 | table_context->od_settings_min = NULL; | |
b55ca3bd | 898 | |
6316f51c HR |
899 | kfree(table_context->overdrive_table); |
900 | table_context->overdrive_table = NULL; | |
2c80abe3 | 901 | |
6316f51c HR |
902 | kfree(table_context->od8_settings); |
903 | table_context->od8_settings = NULL; | |
2c80abe3 | 904 | |
f96357a9 KW |
905 | ret = smu_fini_fb_allocations(smu); |
906 | if (ret) | |
907 | return ret; | |
908 | ||
0b51d993 KW |
909 | ret = smu_free_memory_pool(smu); |
910 | if (ret) | |
911 | return ret; | |
912 | ||
137d63ab HR |
913 | return 0; |
914 | } | |
915 | ||
289921b0 KW |
916 | int smu_reset(struct smu_context *smu) |
917 | { | |
918 | struct amdgpu_device *adev = smu->adev; | |
919 | int ret = 0; | |
920 | ||
921 | ret = smu_hw_fini(adev); | |
922 | if (ret) | |
923 | return ret; | |
924 | ||
925 | ret = smu_hw_init(adev); | |
926 | if (ret) | |
927 | return ret; | |
928 | ||
929 | return ret; | |
930 | } | |
931 | ||
137d63ab HR |
932 | static int smu_suspend(void *handle) |
933 | { | |
4733cc72 | 934 | int ret; |
137d63ab | 935 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; |
4733cc72 | 936 | struct smu_context *smu = &adev->smu; |
137d63ab | 937 | |
f067499b | 938 | ret = smu_system_features_control(smu, false); |
4733cc72 LG |
939 | if (ret) |
940 | return ret; | |
941 | ||
942 | smu->watermarks_bitmap &= ~(WATERMARKS_LOADED); | |
943 | ||
137d63ab HR |
944 | return 0; |
945 | } | |
946 | ||
947 | static int smu_resume(void *handle) | |
948 | { | |
949 | int ret; | |
950 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; | |
951 | struct smu_context *smu = &adev->smu; | |
952 | ||
fad3ecf2 HR |
953 | pr_info("SMU is resuming...\n"); |
954 | ||
137d63ab HR |
955 | mutex_lock(&smu->mutex); |
956 | ||
4733cc72 | 957 | ret = smu_smc_table_hw_init(smu, false); |
fad3ecf2 HR |
958 | if (ret) |
959 | goto failed; | |
960 | ||
4733cc72 | 961 | ret = smu_start_thermal_control(smu); |
fad3ecf2 HR |
962 | if (ret) |
963 | goto failed; | |
137d63ab HR |
964 | |
965 | mutex_unlock(&smu->mutex); | |
966 | ||
fad3ecf2 HR |
967 | pr_info("SMU is resumed successfully!\n"); |
968 | ||
137d63ab | 969 | return 0; |
fad3ecf2 HR |
970 | failed: |
971 | mutex_unlock(&smu->mutex); | |
972 | return ret; | |
137d63ab HR |
973 | } |
974 | ||
94ed6d0c HR |
975 | int smu_display_configuration_change(struct smu_context *smu, |
976 | const struct amd_pp_display_configuration *display_config) | |
977 | { | |
978 | int index = 0; | |
979 | int num_of_active_display = 0; | |
980 | ||
a254bfa2 | 981 | if (!smu->pm_enabled || !is_support_sw_smu(smu->adev)) |
94ed6d0c HR |
982 | return -EINVAL; |
983 | ||
984 | if (!display_config) | |
985 | return -EINVAL; | |
986 | ||
987 | mutex_lock(&smu->mutex); | |
988 | ||
989 | smu_set_deep_sleep_dcefclk(smu, | |
990 | display_config->min_dcef_deep_sleep_set_clk / 100); | |
991 | ||
992 | for (index = 0; index < display_config->num_path_including_non_display; index++) { | |
993 | if (display_config->displays[index].controller_id != 0) | |
994 | num_of_active_display++; | |
995 | } | |
996 | ||
997 | smu_set_active_display_count(smu, num_of_active_display); | |
998 | ||
999 | smu_store_cc6_data(smu, display_config->cpu_pstate_separation_time, | |
1000 | display_config->cpu_cc6_disable, | |
1001 | display_config->cpu_pstate_disable, | |
1002 | display_config->nb_pstate_switch_disable); | |
1003 | ||
1004 | mutex_unlock(&smu->mutex); | |
1005 | ||
1006 | return 0; | |
1007 | } | |
1008 | ||
5e2d3881 HR |
1009 | static int smu_get_clock_info(struct smu_context *smu, |
1010 | struct smu_clock_info *clk_info, | |
1011 | enum smu_perf_level_designation designation) | |
1012 | { | |
1013 | int ret; | |
1014 | struct smu_performance_level level = {0}; | |
1015 | ||
1016 | if (!clk_info) | |
1017 | return -EINVAL; | |
1018 | ||
1019 | ret = smu_get_perf_level(smu, PERF_LEVEL_ACTIVITY, &level); | |
1020 | if (ret) | |
1021 | return -EINVAL; | |
1022 | ||
1023 | clk_info->min_mem_clk = level.memory_clock; | |
1024 | clk_info->min_eng_clk = level.core_clock; | |
1025 | clk_info->min_bus_bandwidth = level.non_local_mem_freq * level.non_local_mem_width; | |
1026 | ||
1027 | ret = smu_get_perf_level(smu, designation, &level); | |
1028 | if (ret) | |
1029 | return -EINVAL; | |
1030 | ||
1031 | clk_info->min_mem_clk = level.memory_clock; | |
1032 | clk_info->min_eng_clk = level.core_clock; | |
1033 | clk_info->min_bus_bandwidth = level.non_local_mem_freq * level.non_local_mem_width; | |
1034 | ||
1035 | return 0; | |
1036 | } | |
1037 | ||
1038 | int smu_get_current_clocks(struct smu_context *smu, | |
1039 | struct amd_pp_clock_info *clocks) | |
1040 | { | |
1041 | struct amd_pp_simple_clock_info simple_clocks = {0}; | |
1042 | struct smu_clock_info hw_clocks; | |
1043 | int ret = 0; | |
1044 | ||
1045 | if (!is_support_sw_smu(smu->adev)) | |
1046 | return -EINVAL; | |
1047 | ||
1048 | mutex_lock(&smu->mutex); | |
1049 | ||
1050 | smu_get_dal_power_level(smu, &simple_clocks); | |
1051 | ||
1052 | if (smu->support_power_containment) | |
1053 | ret = smu_get_clock_info(smu, &hw_clocks, | |
1054 | PERF_LEVEL_POWER_CONTAINMENT); | |
1055 | else | |
1056 | ret = smu_get_clock_info(smu, &hw_clocks, PERF_LEVEL_ACTIVITY); | |
1057 | ||
1058 | if (ret) { | |
1059 | pr_err("Error in smu_get_clock_info\n"); | |
1060 | goto failed; | |
1061 | } | |
1062 | ||
1063 | clocks->min_engine_clock = hw_clocks.min_eng_clk; | |
1064 | clocks->max_engine_clock = hw_clocks.max_eng_clk; | |
1065 | clocks->min_memory_clock = hw_clocks.min_mem_clk; | |
1066 | clocks->max_memory_clock = hw_clocks.max_mem_clk; | |
1067 | clocks->min_bus_bandwidth = hw_clocks.min_bus_bandwidth; | |
1068 | clocks->max_bus_bandwidth = hw_clocks.max_bus_bandwidth; | |
1069 | clocks->max_engine_clock_in_sr = hw_clocks.max_eng_clk; | |
1070 | clocks->min_engine_clock_in_sr = hw_clocks.min_eng_clk; | |
1071 | ||
1072 | if (simple_clocks.level == 0) | |
1073 | clocks->max_clocks_state = PP_DAL_POWERLEVEL_7; | |
1074 | else | |
1075 | clocks->max_clocks_state = simple_clocks.level; | |
1076 | ||
1077 | if (!smu_get_current_shallow_sleep_clocks(smu, &hw_clocks)) { | |
1078 | clocks->max_engine_clock_in_sr = hw_clocks.max_eng_clk; | |
1079 | clocks->min_engine_clock_in_sr = hw_clocks.min_eng_clk; | |
1080 | } | |
1081 | ||
1082 | failed: | |
1083 | mutex_unlock(&smu->mutex); | |
1084 | return ret; | |
1085 | } | |
1086 | ||
137d63ab HR |
1087 | static int smu_set_clockgating_state(void *handle, |
1088 | enum amd_clockgating_state state) | |
1089 | { | |
1090 | return 0; | |
1091 | } | |
1092 | ||
1093 | static int smu_set_powergating_state(void *handle, | |
1094 | enum amd_powergating_state state) | |
1095 | { | |
1096 | return 0; | |
1097 | } | |
1098 | ||
49d27e91 CG |
1099 | static int smu_enable_umd_pstate(void *handle, |
1100 | enum amd_dpm_forced_level *level) | |
1101 | { | |
1102 | uint32_t profile_mode_mask = AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD | | |
1103 | AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK | | |
1104 | AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK | | |
1105 | AMD_DPM_FORCED_LEVEL_PROFILE_PEAK; | |
1106 | ||
1107 | struct smu_context *smu = (struct smu_context*)(handle); | |
1108 | struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm); | |
a254bfa2 | 1109 | if (!smu->pm_enabled || !smu_dpm_ctx->dpm_context) |
49d27e91 CG |
1110 | return -EINVAL; |
1111 | ||
1112 | if (!(smu_dpm_ctx->dpm_level & profile_mode_mask)) { | |
1113 | /* enter umd pstate, save current level, disable gfx cg*/ | |
1114 | if (*level & profile_mode_mask) { | |
1115 | smu_dpm_ctx->saved_dpm_level = smu_dpm_ctx->dpm_level; | |
1116 | smu_dpm_ctx->enable_umd_pstate = true; | |
1117 | amdgpu_device_ip_set_clockgating_state(smu->adev, | |
1118 | AMD_IP_BLOCK_TYPE_GFX, | |
1119 | AMD_CG_STATE_UNGATE); | |
1120 | amdgpu_device_ip_set_powergating_state(smu->adev, | |
1121 | AMD_IP_BLOCK_TYPE_GFX, | |
1122 | AMD_PG_STATE_UNGATE); | |
1123 | } | |
1124 | } else { | |
1125 | /* exit umd pstate, restore level, enable gfx cg*/ | |
1126 | if (!(*level & profile_mode_mask)) { | |
1127 | if (*level == AMD_DPM_FORCED_LEVEL_PROFILE_EXIT) | |
1128 | *level = smu_dpm_ctx->saved_dpm_level; | |
1129 | smu_dpm_ctx->enable_umd_pstate = false; | |
1130 | amdgpu_device_ip_set_clockgating_state(smu->adev, | |
1131 | AMD_IP_BLOCK_TYPE_GFX, | |
1132 | AMD_CG_STATE_GATE); | |
1133 | amdgpu_device_ip_set_powergating_state(smu->adev, | |
1134 | AMD_IP_BLOCK_TYPE_GFX, | |
1135 | AMD_PG_STATE_GATE); | |
1136 | } | |
1137 | } | |
1138 | ||
1139 | return 0; | |
1140 | } | |
1141 | ||
bc0fcffd LG |
1142 | int smu_adjust_power_state_dynamic(struct smu_context *smu, |
1143 | enum amd_dpm_forced_level level, | |
1144 | bool skip_display_settings) | |
1145 | { | |
1146 | int ret = 0; | |
1147 | int index = 0; | |
1148 | uint32_t sclk_mask, mclk_mask, soc_mask; | |
1149 | long workload; | |
1150 | struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm); | |
1151 | ||
a254bfa2 CG |
1152 | if (!smu->pm_enabled) |
1153 | return -EINVAL; | |
bc0fcffd LG |
1154 | if (!skip_display_settings) { |
1155 | ret = smu_display_config_changed(smu); | |
1156 | if (ret) { | |
1157 | pr_err("Failed to change display config!"); | |
1158 | return ret; | |
1159 | } | |
1160 | } | |
1161 | ||
a254bfa2 CG |
1162 | if (!smu->pm_enabled) |
1163 | return -EINVAL; | |
bc0fcffd LG |
1164 | ret = smu_apply_clocks_adjust_rules(smu); |
1165 | if (ret) { | |
1166 | pr_err("Failed to apply clocks adjust rules!"); | |
1167 | return ret; | |
1168 | } | |
1169 | ||
1170 | if (!skip_display_settings) { | |
1171 | ret = smu_notify_smc_dispaly_config(smu); | |
1172 | if (ret) { | |
1173 | pr_err("Failed to notify smc display config!"); | |
1174 | return ret; | |
1175 | } | |
1176 | } | |
1177 | ||
1178 | if (smu_dpm_ctx->dpm_level != level) { | |
1179 | switch (level) { | |
1180 | case AMD_DPM_FORCED_LEVEL_HIGH: | |
1181 | ret = smu_force_dpm_limit_value(smu, true); | |
1182 | break; | |
1183 | case AMD_DPM_FORCED_LEVEL_LOW: | |
1184 | ret = smu_force_dpm_limit_value(smu, false); | |
1185 | break; | |
1186 | ||
1187 | case AMD_DPM_FORCED_LEVEL_AUTO: | |
1188 | ret = smu_unforce_dpm_levels(smu); | |
1189 | break; | |
1190 | ||
1191 | case AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD: | |
1192 | case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK: | |
1193 | case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK: | |
1194 | case AMD_DPM_FORCED_LEVEL_PROFILE_PEAK: | |
1195 | ret = smu_get_profiling_clk_mask(smu, level, | |
1196 | &sclk_mask, | |
1197 | &mclk_mask, | |
1198 | &soc_mask); | |
1199 | if (ret) | |
1200 | return ret; | |
1201 | smu_force_clk_levels(smu, PP_SCLK, 1 << sclk_mask); | |
1202 | smu_force_clk_levels(smu, PP_MCLK, 1 << mclk_mask); | |
1203 | break; | |
1204 | ||
1205 | case AMD_DPM_FORCED_LEVEL_MANUAL: | |
1206 | case AMD_DPM_FORCED_LEVEL_PROFILE_EXIT: | |
1207 | default: | |
1208 | break; | |
1209 | } | |
1210 | ||
1211 | if (!ret) | |
1212 | smu_dpm_ctx->dpm_level = level; | |
1213 | } | |
1214 | ||
1215 | if (smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL) { | |
1216 | index = fls(smu->workload_mask); | |
1217 | index = index > 0 && index <= WORKLOAD_POLICY_MAX ? index - 1 : 0; | |
1218 | workload = smu->workload_setting[index]; | |
1219 | ||
1220 | if (smu->power_profile_mode != workload) | |
1221 | smu_set_power_profile_mode(smu, &workload, 0); | |
1222 | } | |
1223 | ||
1224 | return ret; | |
1225 | } | |
1226 | ||
1227 | int smu_handle_task(struct smu_context *smu, | |
1228 | enum amd_dpm_forced_level level, | |
1229 | enum amd_pp_task task_id) | |
1230 | { | |
1231 | int ret = 0; | |
1232 | ||
1233 | switch (task_id) { | |
1234 | case AMD_PP_TASK_DISPLAY_CONFIG_CHANGE: | |
1235 | ret = smu_pre_display_config_changed(smu); | |
1236 | if (ret) | |
1237 | return ret; | |
1238 | ret = smu_set_cpu_power_state(smu); | |
1239 | if (ret) | |
1240 | return ret; | |
1241 | ret = smu_adjust_power_state_dynamic(smu, level, false); | |
1242 | break; | |
1243 | case AMD_PP_TASK_COMPLETE_INIT: | |
1244 | case AMD_PP_TASK_READJUST_POWER_STATE: | |
1245 | ret = smu_adjust_power_state_dynamic(smu, level, true); | |
1246 | break; | |
1247 | default: | |
1248 | break; | |
1249 | } | |
1250 | ||
1251 | return ret; | |
1252 | } | |
1253 | ||
137d63ab HR |
1254 | const struct amd_ip_funcs smu_ip_funcs = { |
1255 | .name = "smu", | |
1256 | .early_init = smu_early_init, | |
bee71d26 | 1257 | .late_init = smu_late_init, |
137d63ab HR |
1258 | .sw_init = smu_sw_init, |
1259 | .sw_fini = smu_sw_fini, | |
1260 | .hw_init = smu_hw_init, | |
1261 | .hw_fini = smu_hw_fini, | |
1262 | .suspend = smu_suspend, | |
1263 | .resume = smu_resume, | |
1264 | .is_idle = NULL, | |
1265 | .check_soft_reset = NULL, | |
1266 | .wait_for_idle = NULL, | |
1267 | .soft_reset = NULL, | |
1268 | .set_clockgating_state = smu_set_clockgating_state, | |
1269 | .set_powergating_state = smu_set_powergating_state, | |
49d27e91 | 1270 | .enable_umd_pstate = smu_enable_umd_pstate, |
137d63ab | 1271 | }; |
07845526 HR |
1272 | |
1273 | const struct amdgpu_ip_block_version smu_v11_0_ip_block = | |
1274 | { | |
1275 | .type = AMD_IP_BLOCK_TYPE_SMC, | |
1276 | .major = 11, | |
1277 | .minor = 0, | |
1278 | .rev = 0, | |
1279 | .funcs = &smu_ip_funcs, | |
1280 | }; |