drm/amd/powerplay: add helper function of smu_get_dpm_freq_range
[linux-2.6-block.git] / drivers / gpu / drm / amd / powerplay / amdgpu_smu.c
CommitLineData
137d63ab
HR
1/*
2 * Copyright 2019 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 */
22
23#include "pp_debug.h"
24#include <linux/firmware.h>
25#include <drm/drmP.h>
26#include "amdgpu.h"
27#include "amdgpu_smu.h"
28#include "soc15_common.h"
07845526 29#include "smu_v11_0.h"
e15da5a4 30#include "atom.h"
24e141e1 31#include "amd_pcie.h"
137d63ab 32
4fde03a7
KW
33int smu_get_smc_version(struct smu_context *smu, uint32_t *if_version, uint32_t *smu_version)
34{
35 int ret = 0;
36
37 if (!if_version && !smu_version)
38 return -EINVAL;
39
40 if (if_version) {
41 ret = smu_send_smc_msg(smu, SMU_MSG_GetDriverIfVersion);
42 if (ret)
43 return ret;
44
45 ret = smu_read_smc_arg(smu, if_version);
46 if (ret)
47 return ret;
48 }
49
50 if (smu_version) {
51 ret = smu_send_smc_msg(smu, SMU_MSG_GetSmuVersion);
52 if (ret)
53 return ret;
54
55 ret = smu_read_smc_arg(smu, smu_version);
56 if (ret)
57 return ret;
58 }
59
60 return ret;
61}
62
8b3d243e
KW
63int smu_get_dpm_freq_range(struct smu_context *smu, enum smu_clk_type clk_type,
64 uint32_t *min, uint32_t *max)
65{
66 int ret = 0, clk_id = 0;
67 uint32_t param = 0;
68
69 if (!min && !max)
70 return -EINVAL;
71
72 clk_id = smu_clk_get_index(smu, clk_type);
73 if (clk_id < 0)
74 return clk_id;
75
76 param = (clk_id & 0xffff) << 16;
77
78 if (max) {
79 ret = smu_send_smc_msg_with_param(smu, SMU_MSG_GetMaxDpmFreq, param);
80 if (ret)
81 return ret;
82 ret = smu_read_smc_arg(smu, max);
83 if (ret)
84 return ret;
85 }
86
87 if (min) {
88 ret = smu_send_smc_msg_with_param(smu, SMU_MSG_GetMinDpmFreq, param);
89 if (ret)
90 return ret;
91 ret = smu_read_smc_arg(smu, min);
92 if (ret)
93 return ret;
94 }
95
96 return ret;
97}
98
3ac54a50
KW
99int smu_get_dpm_freq_by_index(struct smu_context *smu, enum smu_clk_type clk_type,
100 uint16_t level, uint32_t *value)
101{
102 int ret = 0, clk_id = 0;
103 uint32_t param;
104
105 if (!value)
106 return -EINVAL;
107
108 clk_id = smu_clk_get_index(smu, clk_type);
109 if (clk_id < 0)
110 return clk_id;
111
112 param = (uint32_t)(((clk_id & 0xffff) << 16) | (level & 0xffff));
113
114 ret = smu_send_smc_msg_with_param(smu,SMU_MSG_GetDpmFreqByIndex,
115 param);
116 if (ret)
117 return ret;
118
119 ret = smu_read_smc_arg(smu, &param);
120 if (ret)
121 return ret;
122
123 /* BIT31: 0 - Fine grained DPM, 1 - Dicrete DPM
124 * now, we un-support it */
125 *value = param & 0x7fffffff;
126
127 return ret;
128}
129
130int smu_get_dpm_level_count(struct smu_context *smu, enum smu_clk_type clk_type,
131 uint32_t *value)
132{
133 return smu_get_dpm_freq_by_index(smu, clk_type, 0xff, value);
134}
135
72e91f37
KW
136int smu_dpm_set_power_gate(struct smu_context *smu, uint32_t block_type,
137 bool gate)
138{
139 int ret = 0;
140
141 switch (block_type) {
142 case AMD_IP_BLOCK_TYPE_UVD:
143 ret = smu_dpm_set_uvd_enable(smu, gate);
144 break;
145 case AMD_IP_BLOCK_TYPE_VCE:
146 ret = smu_dpm_set_vce_enable(smu, gate);
147 break;
148 default:
149 break;
150 }
151
152 return ret;
153}
154
ea2d0bf8
KW
155enum amd_pm_state_type smu_get_current_power_state(struct smu_context *smu)
156{
157 /* not support power state */
158 return POWER_STATE_TYPE_DEFAULT;
159}
160
09895323
KW
161int smu_get_power_num_states(struct smu_context *smu,
162 struct pp_states_info *state_info)
163{
164 if (!state_info)
165 return -EINVAL;
166
167 /* not support power state */
168 memset(state_info, 0, sizeof(struct pp_states_info));
169 state_info->nums = 0;
170
171 return 0;
172}
173
143c75d6
KW
174int smu_common_read_sensor(struct smu_context *smu, enum amd_pp_sensors sensor,
175 void *data, uint32_t *size)
176{
177 int ret = 0;
178
179 switch (sensor) {
46814f51
CG
180 case AMDGPU_PP_SENSOR_STABLE_PSTATE_SCLK:
181 *((uint32_t *)data) = smu->pstate_sclk;
182 *size = 4;
183 break;
184 case AMDGPU_PP_SENSOR_STABLE_PSTATE_MCLK:
185 *((uint32_t *)data) = smu->pstate_mclk;
186 *size = 4;
187 break;
143c75d6
KW
188 case AMDGPU_PP_SENSOR_ENABLED_SMC_FEATURES_MASK:
189 ret = smu_feature_get_enabled_mask(smu, (uint32_t *)data, 2);
190 *size = 8;
191 break;
192 default:
193 ret = -EINVAL;
194 break;
195 }
196
197 if (ret)
198 *size = 0;
199
200 return ret;
201}
202
33bd73ae 203int smu_update_table(struct smu_context *smu, enum smu_table_id table_index,
dbe6a970
KW
204 void *table_data, bool drv2smu)
205{
206 struct smu_table_context *smu_table = &smu->smu_table;
207 struct smu_table *table = NULL;
208 int ret = 0;
33bd73ae 209 int table_id = smu_table_get_index(smu, table_index);
dbe6a970
KW
210
211 if (!table_data || table_id >= smu_table->table_count)
212 return -EINVAL;
213
33bd73ae 214 table = &smu_table->tables[table_index];
dbe6a970
KW
215
216 if (drv2smu)
217 memcpy(table->cpu_addr, table_data, table->size);
218
219 ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetDriverDramAddrHigh,
220 upper_32_bits(table->mc_address));
221 if (ret)
222 return ret;
223 ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetDriverDramAddrLow,
224 lower_32_bits(table->mc_address));
225 if (ret)
226 return ret;
227 ret = smu_send_smc_msg_with_param(smu, drv2smu ?
228 SMU_MSG_TransferTableDram2Smu :
229 SMU_MSG_TransferTableSmu2Dram,
33bd73ae 230 table_id);
dbe6a970
KW
231 if (ret)
232 return ret;
233
234 if (!drv2smu)
235 memcpy(table_data, table->cpu_addr, table->size);
236
237 return ret;
238}
239
dc8e3a0c
KW
240bool is_support_sw_smu(struct amdgpu_device *adev)
241{
54b998ca
HZ
242 if (adev->asic_type == CHIP_VEGA20)
243 return (amdgpu_dpm == 2) ? true : false;
244 else if (adev->asic_type >= CHIP_NAVI10)
dc8e3a0c 245 return true;
54b998ca
HZ
246 else
247 return false;
dc8e3a0c
KW
248}
249
289921b0
KW
250int smu_sys_get_pp_table(struct smu_context *smu, void **table)
251{
252 struct smu_table_context *smu_table = &smu->smu_table;
253
254 if (!smu_table->power_play_table && !smu_table->hardcode_pptable)
255 return -EINVAL;
256
257 if (smu_table->hardcode_pptable)
258 *table = smu_table->hardcode_pptable;
259 else
260 *table = smu_table->power_play_table;
261
262 return smu_table->power_play_table_size;
263}
264
265int smu_sys_set_pp_table(struct smu_context *smu, void *buf, size_t size)
266{
267 struct smu_table_context *smu_table = &smu->smu_table;
268 ATOM_COMMON_TABLE_HEADER *header = (ATOM_COMMON_TABLE_HEADER *)buf;
269 int ret = 0;
270
a254bfa2
CG
271 if (!smu->pm_enabled)
272 return -EINVAL;
289921b0
KW
273 if (header->usStructureSize != size) {
274 pr_err("pp table size not matched !\n");
275 return -EIO;
276 }
277
278 mutex_lock(&smu->mutex);
279 if (!smu_table->hardcode_pptable)
280 smu_table->hardcode_pptable = kzalloc(size, GFP_KERNEL);
281 if (!smu_table->hardcode_pptable) {
282 ret = -ENOMEM;
283 goto failed;
284 }
285
286 memcpy(smu_table->hardcode_pptable, buf, size);
287 smu_table->power_play_table = smu_table->hardcode_pptable;
288 smu_table->power_play_table_size = size;
289 mutex_unlock(&smu->mutex);
290
291 ret = smu_reset(smu);
292 if (ret)
293 pr_info("smu reset failed, ret = %d\n", ret);
294
6c851417
DC
295 return ret;
296
289921b0
KW
297failed:
298 mutex_unlock(&smu->mutex);
299 return ret;
300}
301
6b816d73
KW
302int smu_feature_init_dpm(struct smu_context *smu)
303{
304 struct smu_feature *feature = &smu->smu_feature;
305 int ret = 0;
74c958a3 306 uint32_t allowed_feature_mask[SMU_FEATURE_MAX/32];
6b816d73 307
a254bfa2
CG
308 if (!smu->pm_enabled)
309 return ret;
f14a323d 310 mutex_lock(&feature->mutex);
74c958a3 311 bitmap_zero(feature->allowed, SMU_FEATURE_MAX);
f14a323d 312 mutex_unlock(&feature->mutex);
6b816d73 313
74c958a3 314 ret = smu_get_allowed_feature_mask(smu, allowed_feature_mask,
6b816d73
KW
315 SMU_FEATURE_MAX/32);
316 if (ret)
317 return ret;
318
f14a323d 319 mutex_lock(&feature->mutex);
74c958a3
KW
320 bitmap_or(feature->allowed, feature->allowed,
321 (unsigned long *)allowed_feature_mask,
6b816d73 322 feature->feature_num);
f14a323d 323 mutex_unlock(&feature->mutex);
6b816d73
KW
324
325 return ret;
326}
327
ffcb08df 328int smu_feature_is_enabled(struct smu_context *smu, enum smu_feature_mask mask)
2f25158d
KW
329{
330 struct smu_feature *feature = &smu->smu_feature;
ffcb08df 331 uint32_t feature_id;
f14a323d
KW
332 int ret = 0;
333
ffcb08df
HR
334 feature_id = smu_feature_get_index(smu, mask);
335
2f25158d 336 WARN_ON(feature_id > feature->feature_num);
f14a323d
KW
337
338 mutex_lock(&feature->mutex);
339 ret = test_bit(feature_id, feature->enabled);
340 mutex_unlock(&feature->mutex);
341
342 return ret;
2f25158d
KW
343}
344
ffcb08df
HR
345int smu_feature_set_enabled(struct smu_context *smu, enum smu_feature_mask mask,
346 bool enable)
2f25158d
KW
347{
348 struct smu_feature *feature = &smu->smu_feature;
ffcb08df 349 uint32_t feature_id;
f14a323d
KW
350 int ret = 0;
351
ffcb08df
HR
352 feature_id = smu_feature_get_index(smu, mask);
353
2f25158d 354 WARN_ON(feature_id > feature->feature_num);
f14a323d
KW
355
356 mutex_lock(&feature->mutex);
357 ret = smu_feature_update_enable_state(smu, feature_id, enable);
358 if (ret)
359 goto failed;
360
2f25158d
KW
361 if (enable)
362 test_and_set_bit(feature_id, feature->enabled);
363 else
364 test_and_clear_bit(feature_id, feature->enabled);
f14a323d
KW
365
366failed:
367 mutex_unlock(&feature->mutex);
368
369 return ret;
2f25158d
KW
370}
371
ffcb08df 372int smu_feature_is_supported(struct smu_context *smu, enum smu_feature_mask mask)
2f25158d
KW
373{
374 struct smu_feature *feature = &smu->smu_feature;
ffcb08df 375 uint32_t feature_id;
f14a323d
KW
376 int ret = 0;
377
ffcb08df
HR
378 feature_id = smu_feature_get_index(smu, mask);
379
2f25158d 380 WARN_ON(feature_id > feature->feature_num);
f14a323d
KW
381
382 mutex_lock(&feature->mutex);
383 ret = test_bit(feature_id, feature->supported);
384 mutex_unlock(&feature->mutex);
385
386 return ret;
2f25158d
KW
387}
388
ffcb08df
HR
389int smu_feature_set_supported(struct smu_context *smu,
390 enum smu_feature_mask mask,
2f25158d
KW
391 bool enable)
392{
393 struct smu_feature *feature = &smu->smu_feature;
ffcb08df 394 uint32_t feature_id;
f14a323d
KW
395 int ret = 0;
396
ffcb08df
HR
397 feature_id = smu_feature_get_index(smu, mask);
398
2f25158d 399 WARN_ON(feature_id > feature->feature_num);
f14a323d 400
029f4153 401 mutex_lock(&feature->mutex);
2f25158d
KW
402 if (enable)
403 test_and_set_bit(feature_id, feature->supported);
404 else
405 test_and_clear_bit(feature_id, feature->supported);
f14a323d
KW
406 mutex_unlock(&feature->mutex);
407
408 return ret;
2f25158d
KW
409}
410
137d63ab
HR
411static int smu_set_funcs(struct amdgpu_device *adev)
412{
07845526
HR
413 struct smu_context *smu = &adev->smu;
414
415 switch (adev->asic_type) {
416 case CHIP_VEGA20:
2573e870 417 case CHIP_NAVI10:
3b94fb10
LG
418 if (adev->pm.pp_feature & PP_OVERDRIVE_MASK)
419 smu->od_enabled = true;
07845526
HR
420 smu_v11_0_set_smu_funcs(smu);
421 break;
422 default:
423 return -EINVAL;
424 }
425
137d63ab
HR
426 return 0;
427}
428
429static int smu_early_init(void *handle)
430{
431 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
432 struct smu_context *smu = &adev->smu;
137d63ab
HR
433
434 smu->adev = adev;
a7517677 435 smu->pm_enabled = !!amdgpu_dpm;
137d63ab
HR
436 mutex_init(&smu->mutex);
437
74e07f9d 438 return smu_set_funcs(adev);
137d63ab
HR
439}
440
bee71d26
CG
441static int smu_late_init(void *handle)
442{
443 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
444 struct smu_context *smu = &adev->smu;
a254bfa2
CG
445
446 if (!smu->pm_enabled)
447 return 0;
bee71d26
CG
448 mutex_lock(&smu->mutex);
449 smu_handle_task(&adev->smu,
450 smu->smu_dpm.dpm_level,
451 AMD_PP_TASK_COMPLETE_INIT);
452 mutex_unlock(&smu->mutex);
453
454 return 0;
455}
456
e15da5a4
HR
457int smu_get_atom_data_table(struct smu_context *smu, uint32_t table,
458 uint16_t *size, uint8_t *frev, uint8_t *crev,
459 uint8_t **addr)
460{
461 struct amdgpu_device *adev = smu->adev;
462 uint16_t data_start;
463
464 if (!amdgpu_atom_parse_data_header(adev->mode_info.atom_context, table,
465 size, frev, crev, &data_start))
466 return -EINVAL;
467
468 *addr = (uint8_t *)adev->mode_info.atom_context->bios + data_start;
469
470 return 0;
471}
472
b5624000
HR
473static int smu_initialize_pptable(struct smu_context *smu)
474{
475 /* TODO */
476 return 0;
477}
478
479static int smu_smc_table_sw_init(struct smu_context *smu)
480{
481 int ret;
482
483 ret = smu_initialize_pptable(smu);
484 if (ret) {
485 pr_err("Failed to init smu_initialize_pptable!\n");
486 return ret;
487 }
488
cabd44c0
HR
489 /**
490 * Create smu_table structure, and init smc tables such as
491 * TABLE_PPTABLE, TABLE_WATERMARKS, TABLE_SMU_METRICS, and etc.
492 */
493 ret = smu_init_smc_tables(smu);
494 if (ret) {
495 pr_err("Failed to init smc tables!\n");
496 return ret;
497 }
498
17e6081b
HR
499 /**
500 * Create smu_power_context structure, and allocate smu_dpm_context and
501 * context size to fill the smu_power_context data.
502 */
503 ret = smu_init_power(smu);
504 if (ret) {
505 pr_err("Failed to init smu_init_power!\n");
506 return ret;
507 }
508
b5624000
HR
509 return 0;
510}
511
813ce279
KW
512static int smu_smc_table_sw_fini(struct smu_context *smu)
513{
514 int ret;
515
516 ret = smu_fini_smc_tables(smu);
517 if (ret) {
518 pr_err("Failed to smu_fini_smc_tables!\n");
519 return ret;
520 }
521
522 return 0;
523}
524
137d63ab
HR
525static int smu_sw_init(void *handle)
526{
527 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
528 struct smu_context *smu = &adev->smu;
529 int ret;
530
0b51d993 531 smu->pool_size = adev->pm.smu_prv_buffer_size;
6b816d73 532 smu->smu_feature.feature_num = SMU_FEATURE_MAX;
f14a323d 533 mutex_init(&smu->smu_feature.mutex);
6b816d73
KW
534 bitmap_zero(smu->smu_feature.supported, SMU_FEATURE_MAX);
535 bitmap_zero(smu->smu_feature.enabled, SMU_FEATURE_MAX);
536 bitmap_zero(smu->smu_feature.allowed, SMU_FEATURE_MAX);
2e069391 537 smu->watermarks_bitmap = 0;
16177fd0
CG
538 smu->power_profile_mode = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT;
539 smu->default_power_profile_mode = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT;
540
541 smu->workload_mask = 1 << smu->workload_prority[PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT];
542 smu->workload_prority[PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT] = 0;
543 smu->workload_prority[PP_SMC_POWER_PROFILE_FULLSCREEN3D] = 1;
544 smu->workload_prority[PP_SMC_POWER_PROFILE_POWERSAVING] = 2;
545 smu->workload_prority[PP_SMC_POWER_PROFILE_VIDEO] = 3;
546 smu->workload_prority[PP_SMC_POWER_PROFILE_VR] = 4;
547 smu->workload_prority[PP_SMC_POWER_PROFILE_COMPUTE] = 5;
548 smu->workload_prority[PP_SMC_POWER_PROFILE_CUSTOM] = 6;
549
550 smu->workload_setting[0] = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT;
551 smu->workload_setting[1] = PP_SMC_POWER_PROFILE_FULLSCREEN3D;
552 smu->workload_setting[2] = PP_SMC_POWER_PROFILE_POWERSAVING;
553 smu->workload_setting[3] = PP_SMC_POWER_PROFILE_VIDEO;
554 smu->workload_setting[4] = PP_SMC_POWER_PROFILE_VR;
555 smu->workload_setting[5] = PP_SMC_POWER_PROFILE_COMPUTE;
556 smu->workload_setting[6] = PP_SMC_POWER_PROFILE_CUSTOM;
379a4454 557 smu->display_config = &adev->pm.pm_display_cfg;
0b51d993 558
9a431038
CG
559 smu->smu_dpm.dpm_level = AMD_DPM_FORCED_LEVEL_AUTO;
560 smu->smu_dpm.requested_dpm_level = AMD_DPM_FORCED_LEVEL_AUTO;
137d63ab
HR
561 ret = smu_init_microcode(smu);
562 if (ret) {
563 pr_err("Failed to load smu firmware!\n");
564 return ret;
565 }
566
b5624000
HR
567 ret = smu_smc_table_sw_init(smu);
568 if (ret) {
569 pr_err("Failed to sw init smc table!\n");
570 return ret;
571 }
572
137d63ab
HR
573 return 0;
574}
575
576static int smu_sw_fini(void *handle)
577{
578 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
813ce279
KW
579 struct smu_context *smu = &adev->smu;
580 int ret;
137d63ab 581
813ce279
KW
582 ret = smu_smc_table_sw_fini(smu);
583 if (ret) {
584 pr_err("Failed to sw fini smc table!\n");
585 return ret;
586 }
587
8bf16963
KW
588 ret = smu_fini_power(smu);
589 if (ret) {
590 pr_err("Failed to init smu_fini_power!\n");
591 return ret;
592 }
593
137d63ab
HR
594 return 0;
595}
596
9c9a1747
HR
597static int smu_init_fb_allocations(struct smu_context *smu)
598{
f96357a9
KW
599 struct amdgpu_device *adev = smu->adev;
600 struct smu_table_context *smu_table = &smu->smu_table;
601 struct smu_table *tables = smu_table->tables;
602 uint32_t table_count = smu_table->table_count;
603 uint32_t i = 0;
604 int32_t ret = 0;
605
606 if (table_count <= 0)
607 return -EINVAL;
608
609 for (i = 0 ; i < table_count; i++) {
610 if (tables[i].size == 0)
611 continue;
612 ret = amdgpu_bo_create_kernel(adev,
613 tables[i].size,
614 tables[i].align,
615 tables[i].domain,
616 &tables[i].bo,
617 &tables[i].mc_address,
618 &tables[i].cpu_addr);
619 if (ret)
620 goto failed;
621 }
622
9c9a1747 623 return 0;
f96357a9
KW
624failed:
625 for (; i > 0; i--) {
626 if (tables[i].size == 0)
627 continue;
628 amdgpu_bo_free_kernel(&tables[i].bo,
629 &tables[i].mc_address,
630 &tables[i].cpu_addr);
631
632 }
633 return ret;
9c9a1747
HR
634}
635
f96357a9
KW
636static int smu_fini_fb_allocations(struct smu_context *smu)
637{
638 struct smu_table_context *smu_table = &smu->smu_table;
639 struct smu_table *tables = smu_table->tables;
640 uint32_t table_count = smu_table->table_count;
641 uint32_t i = 0;
642
643 if (table_count == 0 || tables == NULL)
289921b0 644 return 0;
f96357a9
KW
645
646 for (i = 0 ; i < table_count; i++) {
647 if (tables[i].size == 0)
648 continue;
649 amdgpu_bo_free_kernel(&tables[i].bo,
650 &tables[i].mc_address,
651 &tables[i].cpu_addr);
652 }
653
654 return 0;
655}
f6a6b952 656
24e141e1
LG
657static int smu_override_pcie_parameters(struct smu_context *smu)
658{
659 struct amdgpu_device *adev = smu->adev;
660 uint32_t pcie_gen = 0, pcie_width = 0, smu_pcie_arg;
661 int ret;
662
663 if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN4)
664 pcie_gen = 3;
665 else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3)
666 pcie_gen = 2;
667 else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2)
668 pcie_gen = 1;
669 else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1)
670 pcie_gen = 0;
671
672 /* Bit 31:16: LCLK DPM level. 0 is DPM0, and 1 is DPM1
673 * Bit 15:8: PCIE GEN, 0 to 3 corresponds to GEN1 to GEN4
674 * Bit 7:0: PCIE lane width, 1 to 7 corresponds is x1 to x32
675 */
676 if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X16)
677 pcie_width = 6;
678 else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X12)
679 pcie_width = 5;
680 else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X8)
681 pcie_width = 4;
682 else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X4)
683 pcie_width = 3;
684 else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X2)
685 pcie_width = 2;
686 else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X1)
687 pcie_width = 1;
688
689 smu_pcie_arg = (1 << 16) | (pcie_gen << 8) | pcie_width;
690 ret = smu_send_smc_msg_with_param(smu,
691 SMU_MSG_OverridePcieParameters,
692 smu_pcie_arg);
693 if (ret)
694 pr_err("[%s] Attempt to override pcie params failed!\n", __func__);
695 return ret;
696}
697
4733cc72
LG
698static int smu_smc_table_hw_init(struct smu_context *smu,
699 bool initialize)
05cadcd3 700{
f067499b 701 struct amdgpu_device *adev = smu->adev;
05cadcd3
HR
702 int ret;
703
f067499b
LG
704 if (smu_is_dpm_running(smu) && adev->in_suspend) {
705 pr_info("dpm has been enabled\n");
706 return 0;
707 }
708
56c53ad6
KW
709 ret = smu_init_display(smu);
710 if (ret)
711 return ret;
712
4733cc72 713 if (initialize) {
4009b9b5
HR
714 /* get boot_values from vbios to set revision, gfxclk, and etc. */
715 ret = smu_get_vbios_bootup_values(smu);
4733cc72
LG
716 if (ret)
717 return ret;
05cadcd3 718
b55c83a7 719 ret = smu_setup_pptable(smu);
4733cc72
LG
720 if (ret)
721 return ret;
a6b35900 722
4733cc72
LG
723 /*
724 * check if the format_revision in vbios is up to pptable header
725 * version, and the structure size is not 0.
726 */
4733cc72
LG
727 ret = smu_check_pptable(smu);
728 if (ret)
729 return ret;
46126e6d 730
4733cc72
LG
731 /*
732 * allocate vram bos to store smc table contents.
733 */
734 ret = smu_init_fb_allocations(smu);
735 if (ret)
736 return ret;
9c9a1747 737
4733cc72
LG
738 /*
739 * Parse pptable format and fill PPTable_t smc_pptable to
740 * smu_table_context structure. And read the smc_dpm_table from vbios,
741 * then fill it into smc_pptable.
742 */
743 ret = smu_parse_pptable(smu);
744 if (ret)
745 return ret;
9e4848a4 746
4733cc72
LG
747 /*
748 * Send msg GetDriverIfVersion to check if the return value is equal
749 * with DRIVER_IF_VERSION of smc header.
750 */
751 ret = smu_check_fw_version(smu);
752 if (ret)
753 return ret;
754 }
a751b095 755
31b5ae49
HR
756 /*
757 * Copy pptable bo in the vram to smc with SMU MSGs such as
758 * SetDriverDramAddr and TransferTableDram2Smu.
759 */
760 ret = smu_write_pptable(smu);
761 if (ret)
762 return ret;
763
f6a6b952
KW
764 /* issue RunAfllBtc msg */
765 ret = smu_run_afll_btc(smu);
766 if (ret)
767 return ret;
768
d4631cba
HR
769 ret = smu_feature_set_allowed_mask(smu);
770 if (ret)
771 return ret;
772
f067499b 773 ret = smu_system_features_control(smu, true);
6b816d73
KW
774 if (ret)
775 return ret;
776
24e141e1
LG
777 ret = smu_override_pcie_parameters(smu);
778 if (ret)
779 return ret;
780
e1c6f86a
KW
781 ret = smu_notify_display_change(smu);
782 if (ret)
783 return ret;
784
a7ebb6d2
HR
785 /*
786 * Set min deep sleep dce fclk with bootup value from vbios via
787 * SetMinDeepSleepDcefclk MSG.
788 */
789 ret = smu_set_min_dcef_deep_sleep(smu);
790 if (ret)
791 return ret;
792
d6a4aa82
LG
793 /*
794 * Set initialized values (get from vbios) to dpm tables context such as
795 * gfxclk, memclk, dcefclk, and etc. And enable the DPM feature for each
796 * type of clks.
797 */
4733cc72
LG
798 if (initialize) {
799 ret = smu_populate_smc_pptable(smu);
800 if (ret)
801 return ret;
d6a4aa82 802
4733cc72
LG
803 ret = smu_init_max_sustainable_clocks(smu);
804 if (ret)
805 return ret;
806 }
7457cf02 807
4733cc72 808 ret = smu_set_od8_default_settings(smu, initialize);
2c80abe3
LG
809 if (ret)
810 return ret;
811
4733cc72
LG
812 if (initialize) {
813 ret = smu_populate_umd_state_clk(smu);
814 if (ret)
815 return ret;
133438fa 816
4733cc72
LG
817 ret = smu_get_power_limit(smu, &smu->default_power_limit, false);
818 if (ret)
819 return ret;
820 }
e66adb1e 821
206bc589
HR
822 /*
823 * Set PMSTATUSLOG table bo address with SetToolsDramAddr MSG for tools.
824 */
825 ret = smu_set_tool_table_location(smu);
826
a254bfa2
CG
827 if (!smu_is_dpm_running(smu))
828 pr_info("dpm has been disabled\n");
829
206bc589 830 return ret;
05cadcd3
HR
831}
832
e65d45f2
HR
833/**
834 * smu_alloc_memory_pool - allocate memory pool in the system memory
835 *
836 * @smu: amdgpu_device pointer
837 *
838 * This memory pool will be used for SMC use and msg SetSystemVirtualDramAddr
839 * and DramLogSetDramAddr can notify it changed.
840 *
841 * Returns 0 on success, error on failure.
842 */
843static int smu_alloc_memory_pool(struct smu_context *smu)
844{
0b51d993
KW
845 struct amdgpu_device *adev = smu->adev;
846 struct smu_table_context *smu_table = &smu->smu_table;
847 struct smu_table *memory_pool = &smu_table->memory_pool;
848 uint64_t pool_size = smu->pool_size;
849 int ret = 0;
850
851 if (pool_size == SMU_MEMORY_POOL_SIZE_ZERO)
852 return ret;
853
854 memory_pool->size = pool_size;
855 memory_pool->align = PAGE_SIZE;
856 memory_pool->domain = AMDGPU_GEM_DOMAIN_GTT;
857
858 switch (pool_size) {
859 case SMU_MEMORY_POOL_SIZE_256_MB:
860 case SMU_MEMORY_POOL_SIZE_512_MB:
861 case SMU_MEMORY_POOL_SIZE_1_GB:
862 case SMU_MEMORY_POOL_SIZE_2_GB:
863 ret = amdgpu_bo_create_kernel(adev,
864 memory_pool->size,
865 memory_pool->align,
866 memory_pool->domain,
867 &memory_pool->bo,
868 &memory_pool->mc_address,
869 &memory_pool->cpu_addr);
870 break;
871 default:
872 break;
873 }
874
875 return ret;
e65d45f2
HR
876}
877
0b51d993
KW
878static int smu_free_memory_pool(struct smu_context *smu)
879{
880 struct smu_table_context *smu_table = &smu->smu_table;
881 struct smu_table *memory_pool = &smu_table->memory_pool;
882 int ret = 0;
883
884 if (memory_pool->size == SMU_MEMORY_POOL_SIZE_ZERO)
885 return ret;
886
887 amdgpu_bo_free_kernel(&memory_pool->bo,
888 &memory_pool->mc_address,
889 &memory_pool->cpu_addr);
890
891 memset(memory_pool, 0, sizeof(struct smu_table));
892
893 return ret;
894}
4733cc72 895
137d63ab
HR
896static int smu_hw_init(void *handle)
897{
898 int ret;
899 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
900 struct smu_context *smu = &adev->smu;
901
0186eb96
HR
902 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
903 ret = smu_check_fw_status(smu);
904 if (ret) {
905 pr_err("SMC firmware status is not correct\n");
3d2f5200 906 return ret;
0186eb96 907 }
e11c4fd5
HR
908 }
909
137d63ab
HR
910 mutex_lock(&smu->mutex);
911
6b816d73
KW
912 ret = smu_feature_init_dpm(smu);
913 if (ret)
914 goto failed;
915
4733cc72 916 ret = smu_smc_table_hw_init(smu, true);
05cadcd3
HR
917 if (ret)
918 goto failed;
137d63ab 919
e65d45f2
HR
920 ret = smu_alloc_memory_pool(smu);
921 if (ret)
922 goto failed;
923
c56de9e8
HR
924 /*
925 * Use msg SetSystemVirtualDramAddr and DramLogSetDramAddr can notify
926 * pool location.
927 */
928 ret = smu_notify_memory_pool_location(smu);
929 if (ret)
930 goto failed;
931
74ba3553
LG
932 ret = smu_start_thermal_control(smu);
933 if (ret)
934 goto failed;
935
137d63ab
HR
936 mutex_unlock(&smu->mutex);
937
a254bfa2
CG
938 if (!smu->pm_enabled)
939 adev->pm.dpm_enabled = false;
940 else
948f540c 941 adev->pm.dpm_enabled = true; /* TODO: will set dpm_enabled flag while VCN and DAL DPM is workable */
a317cf03 942
137d63ab
HR
943 pr_info("SMU is initialized successfully!\n");
944
945 return 0;
05cadcd3
HR
946
947failed:
948 mutex_unlock(&smu->mutex);
949 return ret;
137d63ab
HR
950}
951
952static int smu_hw_fini(void *handle)
953{
954 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
955 struct smu_context *smu = &adev->smu;
afba8282 956 struct smu_table_context *table_context = &smu->smu_table;
f96357a9 957 int ret = 0;
137d63ab 958
6316f51c
HR
959 kfree(table_context->driver_pptable);
960 table_context->driver_pptable = NULL;
afba8282 961
6316f51c
HR
962 kfree(table_context->max_sustainable_clocks);
963 table_context->max_sustainable_clocks = NULL;
7457cf02 964
6316f51c
HR
965 kfree(table_context->od_feature_capabilities);
966 table_context->od_feature_capabilities = NULL;
b55ca3bd 967
6316f51c
HR
968 kfree(table_context->od_settings_max);
969 table_context->od_settings_max = NULL;
b55ca3bd 970
6316f51c
HR
971 kfree(table_context->od_settings_min);
972 table_context->od_settings_min = NULL;
b55ca3bd 973
6316f51c
HR
974 kfree(table_context->overdrive_table);
975 table_context->overdrive_table = NULL;
2c80abe3 976
6316f51c
HR
977 kfree(table_context->od8_settings);
978 table_context->od8_settings = NULL;
2c80abe3 979
f96357a9
KW
980 ret = smu_fini_fb_allocations(smu);
981 if (ret)
982 return ret;
983
0b51d993
KW
984 ret = smu_free_memory_pool(smu);
985 if (ret)
986 return ret;
987
137d63ab
HR
988 return 0;
989}
990
289921b0
KW
991int smu_reset(struct smu_context *smu)
992{
993 struct amdgpu_device *adev = smu->adev;
994 int ret = 0;
995
996 ret = smu_hw_fini(adev);
997 if (ret)
998 return ret;
999
1000 ret = smu_hw_init(adev);
1001 if (ret)
1002 return ret;
1003
1004 return ret;
1005}
1006
137d63ab
HR
1007static int smu_suspend(void *handle)
1008{
4733cc72 1009 int ret;
137d63ab 1010 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
4733cc72 1011 struct smu_context *smu = &adev->smu;
137d63ab 1012
f067499b 1013 ret = smu_system_features_control(smu, false);
4733cc72
LG
1014 if (ret)
1015 return ret;
1016
1017 smu->watermarks_bitmap &= ~(WATERMARKS_LOADED);
1018
e17a512a
JX
1019 if (adev->asic_type >= CHIP_NAVI10 &&
1020 adev->gfx.rlc.funcs->stop)
1021 adev->gfx.rlc.funcs->stop(adev);
1022
137d63ab
HR
1023 return 0;
1024}
1025
1026static int smu_resume(void *handle)
1027{
1028 int ret;
1029 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1030 struct smu_context *smu = &adev->smu;
1031
fad3ecf2
HR
1032 pr_info("SMU is resuming...\n");
1033
137d63ab
HR
1034 mutex_lock(&smu->mutex);
1035
4733cc72 1036 ret = smu_smc_table_hw_init(smu, false);
fad3ecf2
HR
1037 if (ret)
1038 goto failed;
1039
4733cc72 1040 ret = smu_start_thermal_control(smu);
fad3ecf2
HR
1041 if (ret)
1042 goto failed;
137d63ab
HR
1043
1044 mutex_unlock(&smu->mutex);
1045
fad3ecf2
HR
1046 pr_info("SMU is resumed successfully!\n");
1047
137d63ab 1048 return 0;
fad3ecf2
HR
1049failed:
1050 mutex_unlock(&smu->mutex);
1051 return ret;
137d63ab
HR
1052}
1053
94ed6d0c
HR
1054int smu_display_configuration_change(struct smu_context *smu,
1055 const struct amd_pp_display_configuration *display_config)
1056{
1057 int index = 0;
1058 int num_of_active_display = 0;
1059
a254bfa2 1060 if (!smu->pm_enabled || !is_support_sw_smu(smu->adev))
94ed6d0c
HR
1061 return -EINVAL;
1062
1063 if (!display_config)
1064 return -EINVAL;
1065
1066 mutex_lock(&smu->mutex);
1067
1068 smu_set_deep_sleep_dcefclk(smu,
1069 display_config->min_dcef_deep_sleep_set_clk / 100);
1070
1071 for (index = 0; index < display_config->num_path_including_non_display; index++) {
1072 if (display_config->displays[index].controller_id != 0)
1073 num_of_active_display++;
1074 }
1075
1076 smu_set_active_display_count(smu, num_of_active_display);
1077
1078 smu_store_cc6_data(smu, display_config->cpu_pstate_separation_time,
1079 display_config->cpu_cc6_disable,
1080 display_config->cpu_pstate_disable,
1081 display_config->nb_pstate_switch_disable);
1082
1083 mutex_unlock(&smu->mutex);
1084
1085 return 0;
1086}
1087
5e2d3881
HR
1088static int smu_get_clock_info(struct smu_context *smu,
1089 struct smu_clock_info *clk_info,
1090 enum smu_perf_level_designation designation)
1091{
1092 int ret;
1093 struct smu_performance_level level = {0};
1094
1095 if (!clk_info)
1096 return -EINVAL;
1097
1098 ret = smu_get_perf_level(smu, PERF_LEVEL_ACTIVITY, &level);
1099 if (ret)
1100 return -EINVAL;
1101
1102 clk_info->min_mem_clk = level.memory_clock;
1103 clk_info->min_eng_clk = level.core_clock;
1104 clk_info->min_bus_bandwidth = level.non_local_mem_freq * level.non_local_mem_width;
1105
1106 ret = smu_get_perf_level(smu, designation, &level);
1107 if (ret)
1108 return -EINVAL;
1109
1110 clk_info->min_mem_clk = level.memory_clock;
1111 clk_info->min_eng_clk = level.core_clock;
1112 clk_info->min_bus_bandwidth = level.non_local_mem_freq * level.non_local_mem_width;
1113
1114 return 0;
1115}
1116
1117int smu_get_current_clocks(struct smu_context *smu,
1118 struct amd_pp_clock_info *clocks)
1119{
1120 struct amd_pp_simple_clock_info simple_clocks = {0};
1121 struct smu_clock_info hw_clocks;
1122 int ret = 0;
1123
1124 if (!is_support_sw_smu(smu->adev))
1125 return -EINVAL;
1126
1127 mutex_lock(&smu->mutex);
1128
1129 smu_get_dal_power_level(smu, &simple_clocks);
1130
1131 if (smu->support_power_containment)
1132 ret = smu_get_clock_info(smu, &hw_clocks,
1133 PERF_LEVEL_POWER_CONTAINMENT);
1134 else
1135 ret = smu_get_clock_info(smu, &hw_clocks, PERF_LEVEL_ACTIVITY);
1136
1137 if (ret) {
1138 pr_err("Error in smu_get_clock_info\n");
1139 goto failed;
1140 }
1141
1142 clocks->min_engine_clock = hw_clocks.min_eng_clk;
1143 clocks->max_engine_clock = hw_clocks.max_eng_clk;
1144 clocks->min_memory_clock = hw_clocks.min_mem_clk;
1145 clocks->max_memory_clock = hw_clocks.max_mem_clk;
1146 clocks->min_bus_bandwidth = hw_clocks.min_bus_bandwidth;
1147 clocks->max_bus_bandwidth = hw_clocks.max_bus_bandwidth;
1148 clocks->max_engine_clock_in_sr = hw_clocks.max_eng_clk;
1149 clocks->min_engine_clock_in_sr = hw_clocks.min_eng_clk;
1150
1151 if (simple_clocks.level == 0)
1152 clocks->max_clocks_state = PP_DAL_POWERLEVEL_7;
1153 else
1154 clocks->max_clocks_state = simple_clocks.level;
1155
1156 if (!smu_get_current_shallow_sleep_clocks(smu, &hw_clocks)) {
1157 clocks->max_engine_clock_in_sr = hw_clocks.max_eng_clk;
1158 clocks->min_engine_clock_in_sr = hw_clocks.min_eng_clk;
1159 }
1160
1161failed:
1162 mutex_unlock(&smu->mutex);
1163 return ret;
1164}
1165
137d63ab
HR
1166static int smu_set_clockgating_state(void *handle,
1167 enum amd_clockgating_state state)
1168{
1169 return 0;
1170}
1171
1172static int smu_set_powergating_state(void *handle,
1173 enum amd_powergating_state state)
1174{
1175 return 0;
1176}
1177
49d27e91
CG
1178static int smu_enable_umd_pstate(void *handle,
1179 enum amd_dpm_forced_level *level)
1180{
1181 uint32_t profile_mode_mask = AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD |
1182 AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK |
1183 AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK |
1184 AMD_DPM_FORCED_LEVEL_PROFILE_PEAK;
1185
1186 struct smu_context *smu = (struct smu_context*)(handle);
1187 struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
a254bfa2 1188 if (!smu->pm_enabled || !smu_dpm_ctx->dpm_context)
49d27e91
CG
1189 return -EINVAL;
1190
1191 if (!(smu_dpm_ctx->dpm_level & profile_mode_mask)) {
1192 /* enter umd pstate, save current level, disable gfx cg*/
1193 if (*level & profile_mode_mask) {
1194 smu_dpm_ctx->saved_dpm_level = smu_dpm_ctx->dpm_level;
1195 smu_dpm_ctx->enable_umd_pstate = true;
1196 amdgpu_device_ip_set_clockgating_state(smu->adev,
1197 AMD_IP_BLOCK_TYPE_GFX,
1198 AMD_CG_STATE_UNGATE);
1199 amdgpu_device_ip_set_powergating_state(smu->adev,
1200 AMD_IP_BLOCK_TYPE_GFX,
1201 AMD_PG_STATE_UNGATE);
1202 }
1203 } else {
1204 /* exit umd pstate, restore level, enable gfx cg*/
1205 if (!(*level & profile_mode_mask)) {
1206 if (*level == AMD_DPM_FORCED_LEVEL_PROFILE_EXIT)
1207 *level = smu_dpm_ctx->saved_dpm_level;
1208 smu_dpm_ctx->enable_umd_pstate = false;
1209 amdgpu_device_ip_set_clockgating_state(smu->adev,
1210 AMD_IP_BLOCK_TYPE_GFX,
1211 AMD_CG_STATE_GATE);
1212 amdgpu_device_ip_set_powergating_state(smu->adev,
1213 AMD_IP_BLOCK_TYPE_GFX,
1214 AMD_PG_STATE_GATE);
1215 }
1216 }
1217
1218 return 0;
1219}
1220
bc0fcffd
LG
1221int smu_adjust_power_state_dynamic(struct smu_context *smu,
1222 enum amd_dpm_forced_level level,
1223 bool skip_display_settings)
1224{
1225 int ret = 0;
1226 int index = 0;
1227 uint32_t sclk_mask, mclk_mask, soc_mask;
1228 long workload;
1229 struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
1230
a254bfa2
CG
1231 if (!smu->pm_enabled)
1232 return -EINVAL;
bc0fcffd
LG
1233 if (!skip_display_settings) {
1234 ret = smu_display_config_changed(smu);
1235 if (ret) {
1236 pr_err("Failed to change display config!");
1237 return ret;
1238 }
1239 }
1240
a254bfa2
CG
1241 if (!smu->pm_enabled)
1242 return -EINVAL;
bc0fcffd
LG
1243 ret = smu_apply_clocks_adjust_rules(smu);
1244 if (ret) {
1245 pr_err("Failed to apply clocks adjust rules!");
1246 return ret;
1247 }
1248
1249 if (!skip_display_settings) {
1250 ret = smu_notify_smc_dispaly_config(smu);
1251 if (ret) {
1252 pr_err("Failed to notify smc display config!");
1253 return ret;
1254 }
1255 }
1256
1257 if (smu_dpm_ctx->dpm_level != level) {
1258 switch (level) {
1259 case AMD_DPM_FORCED_LEVEL_HIGH:
1260 ret = smu_force_dpm_limit_value(smu, true);
1261 break;
1262 case AMD_DPM_FORCED_LEVEL_LOW:
1263 ret = smu_force_dpm_limit_value(smu, false);
1264 break;
1265
1266 case AMD_DPM_FORCED_LEVEL_AUTO:
1267 ret = smu_unforce_dpm_levels(smu);
1268 break;
1269
1270 case AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD:
1271 case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK:
1272 case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK:
1273 case AMD_DPM_FORCED_LEVEL_PROFILE_PEAK:
1274 ret = smu_get_profiling_clk_mask(smu, level,
1275 &sclk_mask,
1276 &mclk_mask,
1277 &soc_mask);
1278 if (ret)
1279 return ret;
1280 smu_force_clk_levels(smu, PP_SCLK, 1 << sclk_mask);
1281 smu_force_clk_levels(smu, PP_MCLK, 1 << mclk_mask);
1282 break;
1283
1284 case AMD_DPM_FORCED_LEVEL_MANUAL:
1285 case AMD_DPM_FORCED_LEVEL_PROFILE_EXIT:
1286 default:
1287 break;
1288 }
1289
1290 if (!ret)
1291 smu_dpm_ctx->dpm_level = level;
1292 }
1293
1294 if (smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL) {
1295 index = fls(smu->workload_mask);
1296 index = index > 0 && index <= WORKLOAD_POLICY_MAX ? index - 1 : 0;
1297 workload = smu->workload_setting[index];
1298
1299 if (smu->power_profile_mode != workload)
1300 smu_set_power_profile_mode(smu, &workload, 0);
1301 }
1302
1303 return ret;
1304}
1305
1306int smu_handle_task(struct smu_context *smu,
1307 enum amd_dpm_forced_level level,
1308 enum amd_pp_task task_id)
1309{
1310 int ret = 0;
1311
1312 switch (task_id) {
1313 case AMD_PP_TASK_DISPLAY_CONFIG_CHANGE:
1314 ret = smu_pre_display_config_changed(smu);
1315 if (ret)
1316 return ret;
1317 ret = smu_set_cpu_power_state(smu);
1318 if (ret)
1319 return ret;
1320 ret = smu_adjust_power_state_dynamic(smu, level, false);
1321 break;
1322 case AMD_PP_TASK_COMPLETE_INIT:
1323 case AMD_PP_TASK_READJUST_POWER_STATE:
1324 ret = smu_adjust_power_state_dynamic(smu, level, true);
1325 break;
1326 default:
1327 break;
1328 }
1329
1330 return ret;
1331}
1332
137d63ab
HR
1333const struct amd_ip_funcs smu_ip_funcs = {
1334 .name = "smu",
1335 .early_init = smu_early_init,
bee71d26 1336 .late_init = smu_late_init,
137d63ab
HR
1337 .sw_init = smu_sw_init,
1338 .sw_fini = smu_sw_fini,
1339 .hw_init = smu_hw_init,
1340 .hw_fini = smu_hw_fini,
1341 .suspend = smu_suspend,
1342 .resume = smu_resume,
1343 .is_idle = NULL,
1344 .check_soft_reset = NULL,
1345 .wait_for_idle = NULL,
1346 .soft_reset = NULL,
1347 .set_clockgating_state = smu_set_clockgating_state,
1348 .set_powergating_state = smu_set_powergating_state,
49d27e91 1349 .enable_umd_pstate = smu_enable_umd_pstate,
137d63ab 1350};
07845526
HR
1351
1352const struct amdgpu_ip_block_version smu_v11_0_ip_block =
1353{
1354 .type = AMD_IP_BLOCK_TYPE_SMC,
1355 .major = 11,
1356 .minor = 0,
1357 .rev = 0,
1358 .funcs = &smu_ip_funcs,
1359};