Merge branch 'for-5.4/apple' into for-linus
[linux-2.6-block.git] / drivers / gpu / drm / amd / powerplay / amdgpu_smu.c
CommitLineData
137d63ab
HR
1/*
2 * Copyright 2019 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 */
22
137d63ab 23#include <linux/firmware.h>
9fdd91aa
SR
24
25#include "pp_debug.h"
137d63ab
HR
26#include "amdgpu.h"
27#include "amdgpu_smu.h"
28#include "soc15_common.h"
07845526 29#include "smu_v11_0.h"
e15da5a4 30#include "atom.h"
24e141e1 31#include "amd_pcie.h"
137d63ab 32
4fde03a7
KW
33int smu_get_smc_version(struct smu_context *smu, uint32_t *if_version, uint32_t *smu_version)
34{
35 int ret = 0;
36
37 if (!if_version && !smu_version)
38 return -EINVAL;
39
40 if (if_version) {
41 ret = smu_send_smc_msg(smu, SMU_MSG_GetDriverIfVersion);
42 if (ret)
43 return ret;
44
45 ret = smu_read_smc_arg(smu, if_version);
46 if (ret)
47 return ret;
48 }
49
50 if (smu_version) {
51 ret = smu_send_smc_msg(smu, SMU_MSG_GetSmuVersion);
52 if (ret)
53 return ret;
54
55 ret = smu_read_smc_arg(smu, smu_version);
56 if (ret)
57 return ret;
58 }
59
60 return ret;
61}
62
0d7cbd28
KW
63int smu_set_soft_freq_range(struct smu_context *smu, enum smu_clk_type clk_type,
64 uint32_t min, uint32_t max)
65{
66 int ret = 0, clk_id = 0;
67 uint32_t param;
68
69 if (min <= 0 && max <= 0)
70 return -EINVAL;
71
54728170
KW
72 if (!smu_clk_dpm_is_enabled(smu, clk_type))
73 return 0;
74
0d7cbd28
KW
75 clk_id = smu_clk_get_index(smu, clk_type);
76 if (clk_id < 0)
77 return clk_id;
78
79 if (max > 0) {
80 param = (uint32_t)((clk_id << 16) | (max & 0xffff));
81 ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxByFreq,
82 param);
83 if (ret)
84 return ret;
85 }
86
87 if (min > 0) {
88 param = (uint32_t)((clk_id << 16) | (min & 0xffff));
89 ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMinByFreq,
90 param);
91 if (ret)
92 return ret;
93 }
94
95
96 return ret;
97}
98
33665617
KW
99int smu_set_hard_freq_range(struct smu_context *smu, enum smu_clk_type clk_type,
100 uint32_t min, uint32_t max)
101{
102 int ret = 0, clk_id = 0;
103 uint32_t param;
104
105 if (min <= 0 && max <= 0)
106 return -EINVAL;
107
54728170
KW
108 if (!smu_clk_dpm_is_enabled(smu, clk_type))
109 return 0;
110
33665617
KW
111 clk_id = smu_clk_get_index(smu, clk_type);
112 if (clk_id < 0)
113 return clk_id;
114
115 if (max > 0) {
116 param = (uint32_t)((clk_id << 16) | (max & 0xffff));
117 ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetHardMaxByFreq,
118 param);
119 if (ret)
120 return ret;
121 }
122
123 if (min > 0) {
124 param = (uint32_t)((clk_id << 16) | (min & 0xffff));
125 ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinByFreq,
126 param);
127 if (ret)
128 return ret;
129 }
130
131
132 return ret;
133}
134
8b3d243e
KW
135int smu_get_dpm_freq_range(struct smu_context *smu, enum smu_clk_type clk_type,
136 uint32_t *min, uint32_t *max)
137{
138 int ret = 0, clk_id = 0;
139 uint32_t param = 0;
23d66e75 140 uint32_t clock_limit;
8b3d243e
KW
141
142 if (!min && !max)
143 return -EINVAL;
144
23d66e75
EQ
145 if (!smu_clk_dpm_is_enabled(smu, clk_type)) {
146 switch (clk_type) {
147 case SMU_MCLK:
148 case SMU_UCLK:
149 clock_limit = smu->smu_table.boot_values.uclk;
150 break;
151 case SMU_GFXCLK:
152 case SMU_SCLK:
153 clock_limit = smu->smu_table.boot_values.gfxclk;
154 break;
155 case SMU_SOCCLK:
156 clock_limit = smu->smu_table.boot_values.socclk;
157 break;
158 default:
159 clock_limit = 0;
160 break;
161 }
162
163 /* clock in Mhz unit */
164 if (min)
165 *min = clock_limit / 100;
166 if (max)
167 *max = clock_limit / 100;
168
54728170 169 return 0;
23d66e75 170 }
57685134
KW
171
172 mutex_lock(&smu->mutex);
8b3d243e 173 clk_id = smu_clk_get_index(smu, clk_type);
57685134
KW
174 if (clk_id < 0) {
175 ret = -EINVAL;
176 goto failed;
177 }
8b3d243e
KW
178
179 param = (clk_id & 0xffff) << 16;
180
181 if (max) {
182 ret = smu_send_smc_msg_with_param(smu, SMU_MSG_GetMaxDpmFreq, param);
183 if (ret)
57685134 184 goto failed;
8b3d243e
KW
185 ret = smu_read_smc_arg(smu, max);
186 if (ret)
57685134 187 goto failed;
8b3d243e
KW
188 }
189
190 if (min) {
191 ret = smu_send_smc_msg_with_param(smu, SMU_MSG_GetMinDpmFreq, param);
192 if (ret)
57685134 193 goto failed;
8b3d243e
KW
194 ret = smu_read_smc_arg(smu, min);
195 if (ret)
57685134 196 goto failed;
8b3d243e
KW
197 }
198
57685134
KW
199failed:
200 mutex_unlock(&smu->mutex);
8b3d243e
KW
201 return ret;
202}
203
3ac54a50
KW
204int smu_get_dpm_freq_by_index(struct smu_context *smu, enum smu_clk_type clk_type,
205 uint16_t level, uint32_t *value)
206{
207 int ret = 0, clk_id = 0;
208 uint32_t param;
209
210 if (!value)
211 return -EINVAL;
212
54728170
KW
213 if (!smu_clk_dpm_is_enabled(smu, clk_type))
214 return 0;
215
3ac54a50
KW
216 clk_id = smu_clk_get_index(smu, clk_type);
217 if (clk_id < 0)
218 return clk_id;
219
220 param = (uint32_t)(((clk_id & 0xffff) << 16) | (level & 0xffff));
221
222 ret = smu_send_smc_msg_with_param(smu,SMU_MSG_GetDpmFreqByIndex,
223 param);
224 if (ret)
225 return ret;
226
227 ret = smu_read_smc_arg(smu, &param);
228 if (ret)
229 return ret;
230
231 /* BIT31: 0 - Fine grained DPM, 1 - Dicrete DPM
232 * now, we un-support it */
233 *value = param & 0x7fffffff;
234
235 return ret;
236}
237
238int smu_get_dpm_level_count(struct smu_context *smu, enum smu_clk_type clk_type,
239 uint32_t *value)
240{
241 return smu_get_dpm_freq_by_index(smu, clk_type, 0xff, value);
242}
243
54728170
KW
244bool smu_clk_dpm_is_enabled(struct smu_context *smu, enum smu_clk_type clk_type)
245{
246 enum smu_feature_mask feature_id = 0;
247
248 switch (clk_type) {
249 case SMU_MCLK:
250 case SMU_UCLK:
251 feature_id = SMU_FEATURE_DPM_UCLK_BIT;
252 break;
253 case SMU_GFXCLK:
254 case SMU_SCLK:
255 feature_id = SMU_FEATURE_DPM_GFXCLK_BIT;
256 break;
257 case SMU_SOCCLK:
258 feature_id = SMU_FEATURE_DPM_SOCCLK_BIT;
259 break;
260 default:
261 return true;
262 }
263
264 if(!smu_feature_is_enabled(smu, feature_id)) {
265 pr_warn("smu %d clk dpm feature %d is not enabled\n", clk_type, feature_id);
266 return false;
267 }
268
269 return true;
270}
271
272
72e91f37
KW
273int smu_dpm_set_power_gate(struct smu_context *smu, uint32_t block_type,
274 bool gate)
275{
276 int ret = 0;
277
278 switch (block_type) {
279 case AMD_IP_BLOCK_TYPE_UVD:
280 ret = smu_dpm_set_uvd_enable(smu, gate);
281 break;
282 case AMD_IP_BLOCK_TYPE_VCE:
283 ret = smu_dpm_set_vce_enable(smu, gate);
284 break;
73c86d62
HZ
285 case AMD_IP_BLOCK_TYPE_GFX:
286 ret = smu_gfx_off_control(smu, gate);
287 break;
72e91f37
KW
288 default:
289 break;
290 }
291
292 return ret;
293}
294
ea2d0bf8
KW
295enum amd_pm_state_type smu_get_current_power_state(struct smu_context *smu)
296{
297 /* not support power state */
298 return POWER_STATE_TYPE_DEFAULT;
299}
300
09895323
KW
301int smu_get_power_num_states(struct smu_context *smu,
302 struct pp_states_info *state_info)
303{
304 if (!state_info)
305 return -EINVAL;
306
307 /* not support power state */
308 memset(state_info, 0, sizeof(struct pp_states_info));
479156f2
EQ
309 state_info->nums = 1;
310 state_info->states[0] = POWER_STATE_TYPE_DEFAULT;
09895323
KW
311
312 return 0;
313}
314
143c75d6
KW
315int smu_common_read_sensor(struct smu_context *smu, enum amd_pp_sensors sensor,
316 void *data, uint32_t *size)
317{
b8870118
EQ
318 struct smu_power_context *smu_power = &smu->smu_power;
319 struct smu_power_gate *power_gate = &smu_power->power_gate;
143c75d6
KW
320 int ret = 0;
321
322 switch (sensor) {
46814f51
CG
323 case AMDGPU_PP_SENSOR_STABLE_PSTATE_SCLK:
324 *((uint32_t *)data) = smu->pstate_sclk;
325 *size = 4;
326 break;
327 case AMDGPU_PP_SENSOR_STABLE_PSTATE_MCLK:
328 *((uint32_t *)data) = smu->pstate_mclk;
329 *size = 4;
330 break;
143c75d6
KW
331 case AMDGPU_PP_SENSOR_ENABLED_SMC_FEATURES_MASK:
332 ret = smu_feature_get_enabled_mask(smu, (uint32_t *)data, 2);
333 *size = 8;
334 break;
6b1b7b5b
KW
335 case AMDGPU_PP_SENSOR_UVD_POWER:
336 *(uint32_t *)data = smu_feature_is_enabled(smu, SMU_FEATURE_DPM_UVD_BIT) ? 1 : 0;
337 *size = 4;
338 break;
339 case AMDGPU_PP_SENSOR_VCE_POWER:
340 *(uint32_t *)data = smu_feature_is_enabled(smu, SMU_FEATURE_DPM_VCE_BIT) ? 1 : 0;
341 *size = 4;
342 break;
e21e3581 343 case AMDGPU_PP_SENSOR_VCN_POWER_STATE:
b8870118 344 *(uint32_t *)data = power_gate->vcn_gated ? 0 : 1;
e21e3581
EQ
345 *size = 4;
346 break;
143c75d6
KW
347 default:
348 ret = -EINVAL;
349 break;
350 }
351
352 if (ret)
353 *size = 0;
354
355 return ret;
356}
357
0d9d78b5 358int smu_update_table(struct smu_context *smu, enum smu_table_id table_index, int argument,
dbe6a970
KW
359 void *table_data, bool drv2smu)
360{
361 struct smu_table_context *smu_table = &smu->smu_table;
362 struct smu_table *table = NULL;
363 int ret = 0;
33bd73ae 364 int table_id = smu_table_get_index(smu, table_index);
dbe6a970
KW
365
366 if (!table_data || table_id >= smu_table->table_count)
367 return -EINVAL;
368
33bd73ae 369 table = &smu_table->tables[table_index];
dbe6a970
KW
370
371 if (drv2smu)
372 memcpy(table->cpu_addr, table_data, table->size);
373
374 ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetDriverDramAddrHigh,
375 upper_32_bits(table->mc_address));
376 if (ret)
377 return ret;
378 ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetDriverDramAddrLow,
379 lower_32_bits(table->mc_address));
380 if (ret)
381 return ret;
382 ret = smu_send_smc_msg_with_param(smu, drv2smu ?
383 SMU_MSG_TransferTableDram2Smu :
384 SMU_MSG_TransferTableSmu2Dram,
0d9d78b5 385 table_id | ((argument & 0xFFFF) << 16));
dbe6a970
KW
386 if (ret)
387 return ret;
388
389 if (!drv2smu)
390 memcpy(table_data, table->cpu_addr, table->size);
391
392 return ret;
393}
394
dc8e3a0c
KW
395bool is_support_sw_smu(struct amdgpu_device *adev)
396{
54b998ca
HZ
397 if (adev->asic_type == CHIP_VEGA20)
398 return (amdgpu_dpm == 2) ? true : false;
399 else if (adev->asic_type >= CHIP_NAVI10)
dc8e3a0c 400 return true;
54b998ca
HZ
401 else
402 return false;
dc8e3a0c
KW
403}
404
289921b0
KW
405int smu_sys_get_pp_table(struct smu_context *smu, void **table)
406{
407 struct smu_table_context *smu_table = &smu->smu_table;
408
409 if (!smu_table->power_play_table && !smu_table->hardcode_pptable)
410 return -EINVAL;
411
412 if (smu_table->hardcode_pptable)
413 *table = smu_table->hardcode_pptable;
414 else
415 *table = smu_table->power_play_table;
416
417 return smu_table->power_play_table_size;
418}
419
420int smu_sys_set_pp_table(struct smu_context *smu, void *buf, size_t size)
421{
422 struct smu_table_context *smu_table = &smu->smu_table;
423 ATOM_COMMON_TABLE_HEADER *header = (ATOM_COMMON_TABLE_HEADER *)buf;
424 int ret = 0;
425
a254bfa2
CG
426 if (!smu->pm_enabled)
427 return -EINVAL;
289921b0
KW
428 if (header->usStructureSize != size) {
429 pr_err("pp table size not matched !\n");
430 return -EIO;
431 }
432
433 mutex_lock(&smu->mutex);
434 if (!smu_table->hardcode_pptable)
435 smu_table->hardcode_pptable = kzalloc(size, GFP_KERNEL);
436 if (!smu_table->hardcode_pptable) {
437 ret = -ENOMEM;
438 goto failed;
439 }
440
441 memcpy(smu_table->hardcode_pptable, buf, size);
442 smu_table->power_play_table = smu_table->hardcode_pptable;
443 smu_table->power_play_table_size = size;
444 mutex_unlock(&smu->mutex);
445
446 ret = smu_reset(smu);
447 if (ret)
448 pr_info("smu reset failed, ret = %d\n", ret);
449
6c851417
DC
450 return ret;
451
289921b0
KW
452failed:
453 mutex_unlock(&smu->mutex);
454 return ret;
455}
456
6b816d73
KW
457int smu_feature_init_dpm(struct smu_context *smu)
458{
459 struct smu_feature *feature = &smu->smu_feature;
460 int ret = 0;
74c958a3 461 uint32_t allowed_feature_mask[SMU_FEATURE_MAX/32];
6b816d73 462
a254bfa2
CG
463 if (!smu->pm_enabled)
464 return ret;
f14a323d 465 mutex_lock(&feature->mutex);
74c958a3 466 bitmap_zero(feature->allowed, SMU_FEATURE_MAX);
f14a323d 467 mutex_unlock(&feature->mutex);
6b816d73 468
74c958a3 469 ret = smu_get_allowed_feature_mask(smu, allowed_feature_mask,
6b816d73
KW
470 SMU_FEATURE_MAX/32);
471 if (ret)
472 return ret;
473
f14a323d 474 mutex_lock(&feature->mutex);
74c958a3
KW
475 bitmap_or(feature->allowed, feature->allowed,
476 (unsigned long *)allowed_feature_mask,
6b816d73 477 feature->feature_num);
f14a323d 478 mutex_unlock(&feature->mutex);
6b816d73
KW
479
480 return ret;
481}
482
ffcb08df 483int smu_feature_is_enabled(struct smu_context *smu, enum smu_feature_mask mask)
2f25158d
KW
484{
485 struct smu_feature *feature = &smu->smu_feature;
ffcb08df 486 uint32_t feature_id;
f14a323d
KW
487 int ret = 0;
488
ffcb08df
HR
489 feature_id = smu_feature_get_index(smu, mask);
490
2f25158d 491 WARN_ON(feature_id > feature->feature_num);
f14a323d
KW
492
493 mutex_lock(&feature->mutex);
494 ret = test_bit(feature_id, feature->enabled);
495 mutex_unlock(&feature->mutex);
496
497 return ret;
2f25158d
KW
498}
499
ffcb08df
HR
500int smu_feature_set_enabled(struct smu_context *smu, enum smu_feature_mask mask,
501 bool enable)
2f25158d
KW
502{
503 struct smu_feature *feature = &smu->smu_feature;
ffcb08df 504 uint32_t feature_id;
f14a323d
KW
505 int ret = 0;
506
ffcb08df
HR
507 feature_id = smu_feature_get_index(smu, mask);
508
2f25158d 509 WARN_ON(feature_id > feature->feature_num);
f14a323d
KW
510
511 mutex_lock(&feature->mutex);
512 ret = smu_feature_update_enable_state(smu, feature_id, enable);
513 if (ret)
514 goto failed;
515
2f25158d
KW
516 if (enable)
517 test_and_set_bit(feature_id, feature->enabled);
518 else
519 test_and_clear_bit(feature_id, feature->enabled);
f14a323d
KW
520
521failed:
522 mutex_unlock(&feature->mutex);
523
524 return ret;
2f25158d
KW
525}
526
ffcb08df 527int smu_feature_is_supported(struct smu_context *smu, enum smu_feature_mask mask)
2f25158d
KW
528{
529 struct smu_feature *feature = &smu->smu_feature;
ffcb08df 530 uint32_t feature_id;
f14a323d
KW
531 int ret = 0;
532
ffcb08df
HR
533 feature_id = smu_feature_get_index(smu, mask);
534
2f25158d 535 WARN_ON(feature_id > feature->feature_num);
f14a323d
KW
536
537 mutex_lock(&feature->mutex);
538 ret = test_bit(feature_id, feature->supported);
539 mutex_unlock(&feature->mutex);
540
541 return ret;
2f25158d
KW
542}
543
ffcb08df
HR
544int smu_feature_set_supported(struct smu_context *smu,
545 enum smu_feature_mask mask,
2f25158d
KW
546 bool enable)
547{
548 struct smu_feature *feature = &smu->smu_feature;
ffcb08df 549 uint32_t feature_id;
f14a323d
KW
550 int ret = 0;
551
ffcb08df
HR
552 feature_id = smu_feature_get_index(smu, mask);
553
2f25158d 554 WARN_ON(feature_id > feature->feature_num);
f14a323d 555
029f4153 556 mutex_lock(&feature->mutex);
2f25158d
KW
557 if (enable)
558 test_and_set_bit(feature_id, feature->supported);
559 else
560 test_and_clear_bit(feature_id, feature->supported);
f14a323d
KW
561 mutex_unlock(&feature->mutex);
562
563 return ret;
2f25158d
KW
564}
565
137d63ab
HR
566static int smu_set_funcs(struct amdgpu_device *adev)
567{
07845526
HR
568 struct smu_context *smu = &adev->smu;
569
570 switch (adev->asic_type) {
571 case CHIP_VEGA20:
2573e870 572 case CHIP_NAVI10:
3b94fb10
LG
573 if (adev->pm.pp_feature & PP_OVERDRIVE_MASK)
574 smu->od_enabled = true;
07845526
HR
575 smu_v11_0_set_smu_funcs(smu);
576 break;
577 default:
578 return -EINVAL;
579 }
580
137d63ab
HR
581 return 0;
582}
583
584static int smu_early_init(void *handle)
585{
586 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
587 struct smu_context *smu = &adev->smu;
137d63ab
HR
588
589 smu->adev = adev;
a7517677 590 smu->pm_enabled = !!amdgpu_dpm;
137d63ab
HR
591 mutex_init(&smu->mutex);
592
74e07f9d 593 return smu_set_funcs(adev);
137d63ab
HR
594}
595
bee71d26
CG
596static int smu_late_init(void *handle)
597{
598 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
599 struct smu_context *smu = &adev->smu;
a254bfa2
CG
600
601 if (!smu->pm_enabled)
602 return 0;
bee71d26
CG
603 mutex_lock(&smu->mutex);
604 smu_handle_task(&adev->smu,
605 smu->smu_dpm.dpm_level,
606 AMD_PP_TASK_COMPLETE_INIT);
607 mutex_unlock(&smu->mutex);
608
609 return 0;
610}
611
e15da5a4
HR
612int smu_get_atom_data_table(struct smu_context *smu, uint32_t table,
613 uint16_t *size, uint8_t *frev, uint8_t *crev,
614 uint8_t **addr)
615{
616 struct amdgpu_device *adev = smu->adev;
617 uint16_t data_start;
618
619 if (!amdgpu_atom_parse_data_header(adev->mode_info.atom_context, table,
620 size, frev, crev, &data_start))
621 return -EINVAL;
622
623 *addr = (uint8_t *)adev->mode_info.atom_context->bios + data_start;
624
625 return 0;
626}
627
b5624000
HR
628static int smu_initialize_pptable(struct smu_context *smu)
629{
630 /* TODO */
631 return 0;
632}
633
634static int smu_smc_table_sw_init(struct smu_context *smu)
635{
636 int ret;
637
638 ret = smu_initialize_pptable(smu);
639 if (ret) {
640 pr_err("Failed to init smu_initialize_pptable!\n");
641 return ret;
642 }
643
cabd44c0
HR
644 /**
645 * Create smu_table structure, and init smc tables such as
646 * TABLE_PPTABLE, TABLE_WATERMARKS, TABLE_SMU_METRICS, and etc.
647 */
648 ret = smu_init_smc_tables(smu);
649 if (ret) {
650 pr_err("Failed to init smc tables!\n");
651 return ret;
652 }
653
17e6081b
HR
654 /**
655 * Create smu_power_context structure, and allocate smu_dpm_context and
656 * context size to fill the smu_power_context data.
657 */
658 ret = smu_init_power(smu);
659 if (ret) {
660 pr_err("Failed to init smu_init_power!\n");
661 return ret;
662 }
663
b5624000
HR
664 return 0;
665}
666
813ce279
KW
667static int smu_smc_table_sw_fini(struct smu_context *smu)
668{
669 int ret;
670
671 ret = smu_fini_smc_tables(smu);
672 if (ret) {
673 pr_err("Failed to smu_fini_smc_tables!\n");
674 return ret;
675 }
676
677 return 0;
678}
679
137d63ab
HR
680static int smu_sw_init(void *handle)
681{
682 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
683 struct smu_context *smu = &adev->smu;
684 int ret;
685
0b51d993 686 smu->pool_size = adev->pm.smu_prv_buffer_size;
6b816d73 687 smu->smu_feature.feature_num = SMU_FEATURE_MAX;
f14a323d 688 mutex_init(&smu->smu_feature.mutex);
6b816d73
KW
689 bitmap_zero(smu->smu_feature.supported, SMU_FEATURE_MAX);
690 bitmap_zero(smu->smu_feature.enabled, SMU_FEATURE_MAX);
691 bitmap_zero(smu->smu_feature.allowed, SMU_FEATURE_MAX);
767acabd
KW
692
693 mutex_init(&smu->smu_baco.mutex);
694 smu->smu_baco.state = SMU_BACO_STATE_EXIT;
695 smu->smu_baco.platform_support = false;
696
2e069391 697 smu->watermarks_bitmap = 0;
16177fd0
CG
698 smu->power_profile_mode = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT;
699 smu->default_power_profile_mode = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT;
700
701 smu->workload_mask = 1 << smu->workload_prority[PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT];
702 smu->workload_prority[PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT] = 0;
703 smu->workload_prority[PP_SMC_POWER_PROFILE_FULLSCREEN3D] = 1;
704 smu->workload_prority[PP_SMC_POWER_PROFILE_POWERSAVING] = 2;
705 smu->workload_prority[PP_SMC_POWER_PROFILE_VIDEO] = 3;
706 smu->workload_prority[PP_SMC_POWER_PROFILE_VR] = 4;
707 smu->workload_prority[PP_SMC_POWER_PROFILE_COMPUTE] = 5;
708 smu->workload_prority[PP_SMC_POWER_PROFILE_CUSTOM] = 6;
709
710 smu->workload_setting[0] = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT;
711 smu->workload_setting[1] = PP_SMC_POWER_PROFILE_FULLSCREEN3D;
712 smu->workload_setting[2] = PP_SMC_POWER_PROFILE_POWERSAVING;
713 smu->workload_setting[3] = PP_SMC_POWER_PROFILE_VIDEO;
714 smu->workload_setting[4] = PP_SMC_POWER_PROFILE_VR;
715 smu->workload_setting[5] = PP_SMC_POWER_PROFILE_COMPUTE;
716 smu->workload_setting[6] = PP_SMC_POWER_PROFILE_CUSTOM;
379a4454 717 smu->display_config = &adev->pm.pm_display_cfg;
0b51d993 718
9a431038
CG
719 smu->smu_dpm.dpm_level = AMD_DPM_FORCED_LEVEL_AUTO;
720 smu->smu_dpm.requested_dpm_level = AMD_DPM_FORCED_LEVEL_AUTO;
137d63ab
HR
721 ret = smu_init_microcode(smu);
722 if (ret) {
723 pr_err("Failed to load smu firmware!\n");
724 return ret;
725 }
726
b5624000
HR
727 ret = smu_smc_table_sw_init(smu);
728 if (ret) {
729 pr_err("Failed to sw init smc table!\n");
730 return ret;
731 }
732
f0bc1ee4
EQ
733 ret = smu_register_irq_handler(smu);
734 if (ret) {
735 pr_err("Failed to register smc irq handler!\n");
736 return ret;
737 }
738
137d63ab
HR
739 return 0;
740}
741
742static int smu_sw_fini(void *handle)
743{
744 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
813ce279
KW
745 struct smu_context *smu = &adev->smu;
746 int ret;
137d63ab 747
f0bc1ee4
EQ
748 kfree(smu->irq_source);
749 smu->irq_source = NULL;
750
813ce279
KW
751 ret = smu_smc_table_sw_fini(smu);
752 if (ret) {
753 pr_err("Failed to sw fini smc table!\n");
754 return ret;
755 }
756
8bf16963
KW
757 ret = smu_fini_power(smu);
758 if (ret) {
759 pr_err("Failed to init smu_fini_power!\n");
760 return ret;
761 }
762
137d63ab
HR
763 return 0;
764}
765
9c9a1747
HR
766static int smu_init_fb_allocations(struct smu_context *smu)
767{
f96357a9
KW
768 struct amdgpu_device *adev = smu->adev;
769 struct smu_table_context *smu_table = &smu->smu_table;
770 struct smu_table *tables = smu_table->tables;
771 uint32_t table_count = smu_table->table_count;
772 uint32_t i = 0;
773 int32_t ret = 0;
774
775 if (table_count <= 0)
776 return -EINVAL;
777
778 for (i = 0 ; i < table_count; i++) {
779 if (tables[i].size == 0)
780 continue;
781 ret = amdgpu_bo_create_kernel(adev,
782 tables[i].size,
783 tables[i].align,
784 tables[i].domain,
785 &tables[i].bo,
786 &tables[i].mc_address,
787 &tables[i].cpu_addr);
788 if (ret)
789 goto failed;
790 }
791
9c9a1747 792 return 0;
f96357a9
KW
793failed:
794 for (; i > 0; i--) {
795 if (tables[i].size == 0)
796 continue;
797 amdgpu_bo_free_kernel(&tables[i].bo,
798 &tables[i].mc_address,
799 &tables[i].cpu_addr);
800
801 }
802 return ret;
9c9a1747
HR
803}
804
f96357a9
KW
805static int smu_fini_fb_allocations(struct smu_context *smu)
806{
807 struct smu_table_context *smu_table = &smu->smu_table;
808 struct smu_table *tables = smu_table->tables;
809 uint32_t table_count = smu_table->table_count;
810 uint32_t i = 0;
811
812 if (table_count == 0 || tables == NULL)
289921b0 813 return 0;
f96357a9
KW
814
815 for (i = 0 ; i < table_count; i++) {
816 if (tables[i].size == 0)
817 continue;
818 amdgpu_bo_free_kernel(&tables[i].bo,
819 &tables[i].mc_address,
820 &tables[i].cpu_addr);
821 }
822
823 return 0;
824}
f6a6b952 825
24e141e1
LG
826static int smu_override_pcie_parameters(struct smu_context *smu)
827{
828 struct amdgpu_device *adev = smu->adev;
829 uint32_t pcie_gen = 0, pcie_width = 0, smu_pcie_arg;
830 int ret;
831
832 if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN4)
833 pcie_gen = 3;
834 else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3)
835 pcie_gen = 2;
836 else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2)
837 pcie_gen = 1;
838 else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1)
839 pcie_gen = 0;
840
841 /* Bit 31:16: LCLK DPM level. 0 is DPM0, and 1 is DPM1
842 * Bit 15:8: PCIE GEN, 0 to 3 corresponds to GEN1 to GEN4
843 * Bit 7:0: PCIE lane width, 1 to 7 corresponds is x1 to x32
844 */
845 if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X16)
846 pcie_width = 6;
847 else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X12)
848 pcie_width = 5;
849 else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X8)
850 pcie_width = 4;
851 else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X4)
852 pcie_width = 3;
853 else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X2)
854 pcie_width = 2;
855 else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X1)
856 pcie_width = 1;
857
858 smu_pcie_arg = (1 << 16) | (pcie_gen << 8) | pcie_width;
859 ret = smu_send_smc_msg_with_param(smu,
860 SMU_MSG_OverridePcieParameters,
861 smu_pcie_arg);
862 if (ret)
863 pr_err("[%s] Attempt to override pcie params failed!\n", __func__);
864 return ret;
865}
866
4733cc72
LG
867static int smu_smc_table_hw_init(struct smu_context *smu,
868 bool initialize)
05cadcd3 869{
f067499b 870 struct amdgpu_device *adev = smu->adev;
05cadcd3
HR
871 int ret;
872
f067499b
LG
873 if (smu_is_dpm_running(smu) && adev->in_suspend) {
874 pr_info("dpm has been enabled\n");
875 return 0;
876 }
877
2e13c755 878 ret = smu_init_display_count(smu, 0);
56c53ad6
KW
879 if (ret)
880 return ret;
881
4733cc72 882 if (initialize) {
4009b9b5
HR
883 /* get boot_values from vbios to set revision, gfxclk, and etc. */
884 ret = smu_get_vbios_bootup_values(smu);
4733cc72
LG
885 if (ret)
886 return ret;
05cadcd3 887
b55c83a7 888 ret = smu_setup_pptable(smu);
4733cc72
LG
889 if (ret)
890 return ret;
a6b35900 891
309bce0c
EQ
892 ret = smu_get_clk_info_from_vbios(smu);
893 if (ret)
894 return ret;
895
4733cc72
LG
896 /*
897 * check if the format_revision in vbios is up to pptable header
898 * version, and the structure size is not 0.
899 */
4733cc72
LG
900 ret = smu_check_pptable(smu);
901 if (ret)
902 return ret;
46126e6d 903
4733cc72
LG
904 /*
905 * allocate vram bos to store smc table contents.
906 */
907 ret = smu_init_fb_allocations(smu);
908 if (ret)
909 return ret;
9c9a1747 910
4733cc72
LG
911 /*
912 * Parse pptable format and fill PPTable_t smc_pptable to
913 * smu_table_context structure. And read the smc_dpm_table from vbios,
914 * then fill it into smc_pptable.
915 */
916 ret = smu_parse_pptable(smu);
917 if (ret)
918 return ret;
9e4848a4 919
4733cc72
LG
920 /*
921 * Send msg GetDriverIfVersion to check if the return value is equal
922 * with DRIVER_IF_VERSION of smc header.
923 */
924 ret = smu_check_fw_version(smu);
925 if (ret)
926 return ret;
927 }
a751b095 928
31b5ae49
HR
929 /*
930 * Copy pptable bo in the vram to smc with SMU MSGs such as
931 * SetDriverDramAddr and TransferTableDram2Smu.
932 */
933 ret = smu_write_pptable(smu);
934 if (ret)
935 return ret;
936
f6a6b952
KW
937 /* issue RunAfllBtc msg */
938 ret = smu_run_afll_btc(smu);
939 if (ret)
940 return ret;
941
d4631cba
HR
942 ret = smu_feature_set_allowed_mask(smu);
943 if (ret)
944 return ret;
945
f067499b 946 ret = smu_system_features_control(smu, true);
6b816d73
KW
947 if (ret)
948 return ret;
949
24e141e1
LG
950 ret = smu_override_pcie_parameters(smu);
951 if (ret)
952 return ret;
953
e1c6f86a
KW
954 ret = smu_notify_display_change(smu);
955 if (ret)
956 return ret;
957
a7ebb6d2
HR
958 /*
959 * Set min deep sleep dce fclk with bootup value from vbios via
960 * SetMinDeepSleepDcefclk MSG.
961 */
962 ret = smu_set_min_dcef_deep_sleep(smu);
963 if (ret)
964 return ret;
965
d6a4aa82
LG
966 /*
967 * Set initialized values (get from vbios) to dpm tables context such as
968 * gfxclk, memclk, dcefclk, and etc. And enable the DPM feature for each
969 * type of clks.
970 */
4733cc72
LG
971 if (initialize) {
972 ret = smu_populate_smc_pptable(smu);
973 if (ret)
974 return ret;
d6a4aa82 975
4733cc72
LG
976 ret = smu_init_max_sustainable_clocks(smu);
977 if (ret)
978 return ret;
979 }
7457cf02 980
8f30a16d 981 ret = smu_set_default_od_settings(smu, initialize);
2c80abe3
LG
982 if (ret)
983 return ret;
984
4733cc72
LG
985 if (initialize) {
986 ret = smu_populate_umd_state_clk(smu);
987 if (ret)
988 return ret;
133438fa 989
4733cc72
LG
990 ret = smu_get_power_limit(smu, &smu->default_power_limit, false);
991 if (ret)
992 return ret;
993 }
e66adb1e 994
206bc589
HR
995 /*
996 * Set PMSTATUSLOG table bo address with SetToolsDramAddr MSG for tools.
997 */
998 ret = smu_set_tool_table_location(smu);
999
a254bfa2
CG
1000 if (!smu_is_dpm_running(smu))
1001 pr_info("dpm has been disabled\n");
1002
206bc589 1003 return ret;
05cadcd3
HR
1004}
1005
e65d45f2
HR
1006/**
1007 * smu_alloc_memory_pool - allocate memory pool in the system memory
1008 *
1009 * @smu: amdgpu_device pointer
1010 *
1011 * This memory pool will be used for SMC use and msg SetSystemVirtualDramAddr
1012 * and DramLogSetDramAddr can notify it changed.
1013 *
1014 * Returns 0 on success, error on failure.
1015 */
1016static int smu_alloc_memory_pool(struct smu_context *smu)
1017{
0b51d993
KW
1018 struct amdgpu_device *adev = smu->adev;
1019 struct smu_table_context *smu_table = &smu->smu_table;
1020 struct smu_table *memory_pool = &smu_table->memory_pool;
1021 uint64_t pool_size = smu->pool_size;
1022 int ret = 0;
1023
1024 if (pool_size == SMU_MEMORY_POOL_SIZE_ZERO)
1025 return ret;
1026
1027 memory_pool->size = pool_size;
1028 memory_pool->align = PAGE_SIZE;
1029 memory_pool->domain = AMDGPU_GEM_DOMAIN_GTT;
1030
1031 switch (pool_size) {
1032 case SMU_MEMORY_POOL_SIZE_256_MB:
1033 case SMU_MEMORY_POOL_SIZE_512_MB:
1034 case SMU_MEMORY_POOL_SIZE_1_GB:
1035 case SMU_MEMORY_POOL_SIZE_2_GB:
1036 ret = amdgpu_bo_create_kernel(adev,
1037 memory_pool->size,
1038 memory_pool->align,
1039 memory_pool->domain,
1040 &memory_pool->bo,
1041 &memory_pool->mc_address,
1042 &memory_pool->cpu_addr);
1043 break;
1044 default:
1045 break;
1046 }
1047
1048 return ret;
e65d45f2
HR
1049}
1050
0b51d993
KW
1051static int smu_free_memory_pool(struct smu_context *smu)
1052{
1053 struct smu_table_context *smu_table = &smu->smu_table;
1054 struct smu_table *memory_pool = &smu_table->memory_pool;
1055 int ret = 0;
1056
1057 if (memory_pool->size == SMU_MEMORY_POOL_SIZE_ZERO)
1058 return ret;
1059
1060 amdgpu_bo_free_kernel(&memory_pool->bo,
1061 &memory_pool->mc_address,
1062 &memory_pool->cpu_addr);
1063
1064 memset(memory_pool, 0, sizeof(struct smu_table));
1065
1066 return ret;
1067}
4733cc72 1068
137d63ab
HR
1069static int smu_hw_init(void *handle)
1070{
1071 int ret;
1072 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1073 struct smu_context *smu = &adev->smu;
1074
0186eb96
HR
1075 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
1076 ret = smu_check_fw_status(smu);
1077 if (ret) {
1078 pr_err("SMC firmware status is not correct\n");
3d2f5200 1079 return ret;
0186eb96 1080 }
e11c4fd5
HR
1081 }
1082
6b816d73
KW
1083 ret = smu_feature_init_dpm(smu);
1084 if (ret)
1085 goto failed;
1086
4733cc72 1087 ret = smu_smc_table_hw_init(smu, true);
05cadcd3
HR
1088 if (ret)
1089 goto failed;
137d63ab 1090
e65d45f2
HR
1091 ret = smu_alloc_memory_pool(smu);
1092 if (ret)
1093 goto failed;
1094
c56de9e8
HR
1095 /*
1096 * Use msg SetSystemVirtualDramAddr and DramLogSetDramAddr can notify
1097 * pool location.
1098 */
1099 ret = smu_notify_memory_pool_location(smu);
1100 if (ret)
1101 goto failed;
1102
74ba3553
LG
1103 ret = smu_start_thermal_control(smu);
1104 if (ret)
1105 goto failed;
1106
a254bfa2
CG
1107 if (!smu->pm_enabled)
1108 adev->pm.dpm_enabled = false;
1109 else
948f540c 1110 adev->pm.dpm_enabled = true; /* TODO: will set dpm_enabled flag while VCN and DAL DPM is workable */
a317cf03 1111
137d63ab
HR
1112 pr_info("SMU is initialized successfully!\n");
1113
1114 return 0;
05cadcd3
HR
1115
1116failed:
05cadcd3 1117 return ret;
137d63ab
HR
1118}
1119
1120static int smu_hw_fini(void *handle)
1121{
1122 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1123 struct smu_context *smu = &adev->smu;
afba8282 1124 struct smu_table_context *table_context = &smu->smu_table;
f96357a9 1125 int ret = 0;
137d63ab 1126
6316f51c
HR
1127 kfree(table_context->driver_pptable);
1128 table_context->driver_pptable = NULL;
afba8282 1129
6316f51c
HR
1130 kfree(table_context->max_sustainable_clocks);
1131 table_context->max_sustainable_clocks = NULL;
7457cf02 1132
6316f51c
HR
1133 kfree(table_context->overdrive_table);
1134 table_context->overdrive_table = NULL;
2c80abe3 1135
f96357a9
KW
1136 ret = smu_fini_fb_allocations(smu);
1137 if (ret)
1138 return ret;
1139
0b51d993
KW
1140 ret = smu_free_memory_pool(smu);
1141 if (ret)
1142 return ret;
1143
137d63ab
HR
1144 return 0;
1145}
1146
289921b0
KW
1147int smu_reset(struct smu_context *smu)
1148{
1149 struct amdgpu_device *adev = smu->adev;
1150 int ret = 0;
1151
1152 ret = smu_hw_fini(adev);
1153 if (ret)
1154 return ret;
1155
1156 ret = smu_hw_init(adev);
1157 if (ret)
1158 return ret;
1159
1160 return ret;
1161}
1162
137d63ab
HR
1163static int smu_suspend(void *handle)
1164{
4733cc72 1165 int ret;
137d63ab 1166 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
4733cc72 1167 struct smu_context *smu = &adev->smu;
767acabd 1168 bool baco_feature_is_enabled = smu_feature_is_enabled(smu, SMU_FEATURE_BACO_BIT);
137d63ab 1169
f067499b 1170 ret = smu_system_features_control(smu, false);
4733cc72
LG
1171 if (ret)
1172 return ret;
1173
767acabd
KW
1174 if (adev->in_gpu_reset && baco_feature_is_enabled) {
1175 ret = smu_feature_set_enabled(smu, SMU_FEATURE_BACO_BIT, true);
1176 if (ret) {
1177 pr_warn("set BACO feature enabled failed, return %d\n", ret);
1178 return ret;
1179 }
1180 }
1181
4733cc72
LG
1182 smu->watermarks_bitmap &= ~(WATERMARKS_LOADED);
1183
e17a512a
JX
1184 if (adev->asic_type >= CHIP_NAVI10 &&
1185 adev->gfx.rlc.funcs->stop)
1186 adev->gfx.rlc.funcs->stop(adev);
1187
137d63ab
HR
1188 return 0;
1189}
1190
1191static int smu_resume(void *handle)
1192{
1193 int ret;
1194 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1195 struct smu_context *smu = &adev->smu;
1196
fad3ecf2
HR
1197 pr_info("SMU is resuming...\n");
1198
137d63ab
HR
1199 mutex_lock(&smu->mutex);
1200
4733cc72 1201 ret = smu_smc_table_hw_init(smu, false);
fad3ecf2
HR
1202 if (ret)
1203 goto failed;
1204
4733cc72 1205 ret = smu_start_thermal_control(smu);
fad3ecf2
HR
1206 if (ret)
1207 goto failed;
137d63ab
HR
1208
1209 mutex_unlock(&smu->mutex);
1210
fad3ecf2
HR
1211 pr_info("SMU is resumed successfully!\n");
1212
137d63ab 1213 return 0;
fad3ecf2
HR
1214failed:
1215 mutex_unlock(&smu->mutex);
1216 return ret;
137d63ab
HR
1217}
1218
94ed6d0c
HR
1219int smu_display_configuration_change(struct smu_context *smu,
1220 const struct amd_pp_display_configuration *display_config)
1221{
1222 int index = 0;
1223 int num_of_active_display = 0;
1224
a254bfa2 1225 if (!smu->pm_enabled || !is_support_sw_smu(smu->adev))
94ed6d0c
HR
1226 return -EINVAL;
1227
1228 if (!display_config)
1229 return -EINVAL;
1230
1231 mutex_lock(&smu->mutex);
1232
1233 smu_set_deep_sleep_dcefclk(smu,
1234 display_config->min_dcef_deep_sleep_set_clk / 100);
1235
1236 for (index = 0; index < display_config->num_path_including_non_display; index++) {
1237 if (display_config->displays[index].controller_id != 0)
1238 num_of_active_display++;
1239 }
1240
1241 smu_set_active_display_count(smu, num_of_active_display);
1242
1243 smu_store_cc6_data(smu, display_config->cpu_pstate_separation_time,
1244 display_config->cpu_cc6_disable,
1245 display_config->cpu_pstate_disable,
1246 display_config->nb_pstate_switch_disable);
1247
1248 mutex_unlock(&smu->mutex);
1249
1250 return 0;
1251}
1252
5e2d3881
HR
1253static int smu_get_clock_info(struct smu_context *smu,
1254 struct smu_clock_info *clk_info,
1255 enum smu_perf_level_designation designation)
1256{
1257 int ret;
1258 struct smu_performance_level level = {0};
1259
1260 if (!clk_info)
1261 return -EINVAL;
1262
1263 ret = smu_get_perf_level(smu, PERF_LEVEL_ACTIVITY, &level);
1264 if (ret)
1265 return -EINVAL;
1266
1267 clk_info->min_mem_clk = level.memory_clock;
1268 clk_info->min_eng_clk = level.core_clock;
1269 clk_info->min_bus_bandwidth = level.non_local_mem_freq * level.non_local_mem_width;
1270
1271 ret = smu_get_perf_level(smu, designation, &level);
1272 if (ret)
1273 return -EINVAL;
1274
1275 clk_info->min_mem_clk = level.memory_clock;
1276 clk_info->min_eng_clk = level.core_clock;
1277 clk_info->min_bus_bandwidth = level.non_local_mem_freq * level.non_local_mem_width;
1278
1279 return 0;
1280}
1281
1282int smu_get_current_clocks(struct smu_context *smu,
1283 struct amd_pp_clock_info *clocks)
1284{
1285 struct amd_pp_simple_clock_info simple_clocks = {0};
1286 struct smu_clock_info hw_clocks;
1287 int ret = 0;
1288
1289 if (!is_support_sw_smu(smu->adev))
1290 return -EINVAL;
1291
1292 mutex_lock(&smu->mutex);
1293
1294 smu_get_dal_power_level(smu, &simple_clocks);
1295
1296 if (smu->support_power_containment)
1297 ret = smu_get_clock_info(smu, &hw_clocks,
1298 PERF_LEVEL_POWER_CONTAINMENT);
1299 else
1300 ret = smu_get_clock_info(smu, &hw_clocks, PERF_LEVEL_ACTIVITY);
1301
1302 if (ret) {
1303 pr_err("Error in smu_get_clock_info\n");
1304 goto failed;
1305 }
1306
1307 clocks->min_engine_clock = hw_clocks.min_eng_clk;
1308 clocks->max_engine_clock = hw_clocks.max_eng_clk;
1309 clocks->min_memory_clock = hw_clocks.min_mem_clk;
1310 clocks->max_memory_clock = hw_clocks.max_mem_clk;
1311 clocks->min_bus_bandwidth = hw_clocks.min_bus_bandwidth;
1312 clocks->max_bus_bandwidth = hw_clocks.max_bus_bandwidth;
1313 clocks->max_engine_clock_in_sr = hw_clocks.max_eng_clk;
1314 clocks->min_engine_clock_in_sr = hw_clocks.min_eng_clk;
1315
1316 if (simple_clocks.level == 0)
1317 clocks->max_clocks_state = PP_DAL_POWERLEVEL_7;
1318 else
1319 clocks->max_clocks_state = simple_clocks.level;
1320
1321 if (!smu_get_current_shallow_sleep_clocks(smu, &hw_clocks)) {
1322 clocks->max_engine_clock_in_sr = hw_clocks.max_eng_clk;
1323 clocks->min_engine_clock_in_sr = hw_clocks.min_eng_clk;
1324 }
1325
1326failed:
1327 mutex_unlock(&smu->mutex);
1328 return ret;
1329}
1330
137d63ab
HR
1331static int smu_set_clockgating_state(void *handle,
1332 enum amd_clockgating_state state)
1333{
1334 return 0;
1335}
1336
1337static int smu_set_powergating_state(void *handle,
1338 enum amd_powergating_state state)
1339{
1340 return 0;
1341}
1342
49d27e91
CG
1343static int smu_enable_umd_pstate(void *handle,
1344 enum amd_dpm_forced_level *level)
1345{
1346 uint32_t profile_mode_mask = AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD |
1347 AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK |
1348 AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK |
1349 AMD_DPM_FORCED_LEVEL_PROFILE_PEAK;
1350
1351 struct smu_context *smu = (struct smu_context*)(handle);
1352 struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
a254bfa2 1353 if (!smu->pm_enabled || !smu_dpm_ctx->dpm_context)
49d27e91
CG
1354 return -EINVAL;
1355
1356 if (!(smu_dpm_ctx->dpm_level & profile_mode_mask)) {
1357 /* enter umd pstate, save current level, disable gfx cg*/
1358 if (*level & profile_mode_mask) {
1359 smu_dpm_ctx->saved_dpm_level = smu_dpm_ctx->dpm_level;
1360 smu_dpm_ctx->enable_umd_pstate = true;
1361 amdgpu_device_ip_set_clockgating_state(smu->adev,
1362 AMD_IP_BLOCK_TYPE_GFX,
1363 AMD_CG_STATE_UNGATE);
1364 amdgpu_device_ip_set_powergating_state(smu->adev,
1365 AMD_IP_BLOCK_TYPE_GFX,
1366 AMD_PG_STATE_UNGATE);
1367 }
1368 } else {
1369 /* exit umd pstate, restore level, enable gfx cg*/
1370 if (!(*level & profile_mode_mask)) {
1371 if (*level == AMD_DPM_FORCED_LEVEL_PROFILE_EXIT)
1372 *level = smu_dpm_ctx->saved_dpm_level;
1373 smu_dpm_ctx->enable_umd_pstate = false;
1374 amdgpu_device_ip_set_clockgating_state(smu->adev,
1375 AMD_IP_BLOCK_TYPE_GFX,
1376 AMD_CG_STATE_GATE);
1377 amdgpu_device_ip_set_powergating_state(smu->adev,
1378 AMD_IP_BLOCK_TYPE_GFX,
1379 AMD_PG_STATE_GATE);
1380 }
1381 }
1382
1383 return 0;
1384}
1385
b840e4d5
KW
1386static int smu_default_set_performance_level(struct smu_context *smu, enum amd_dpm_forced_level level)
1387{
1388 int ret = 0;
1389 uint32_t sclk_mask, mclk_mask, soc_mask;
1390
1391 switch (level) {
1392 case AMD_DPM_FORCED_LEVEL_HIGH:
1393 ret = smu_force_dpm_limit_value(smu, true);
1394 break;
1395 case AMD_DPM_FORCED_LEVEL_LOW:
1396 ret = smu_force_dpm_limit_value(smu, false);
1397 break;
1398 case AMD_DPM_FORCED_LEVEL_AUTO:
1399 case AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD:
1400 ret = smu_unforce_dpm_levels(smu);
1401 break;
1402 case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK:
1403 case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK:
1404 case AMD_DPM_FORCED_LEVEL_PROFILE_PEAK:
1405 ret = smu_get_profiling_clk_mask(smu, level,
1406 &sclk_mask,
1407 &mclk_mask,
1408 &soc_mask);
1409 if (ret)
1410 return ret;
1411 smu_force_clk_levels(smu, SMU_SCLK, 1 << sclk_mask);
1412 smu_force_clk_levels(smu, SMU_MCLK, 1 << mclk_mask);
1413 smu_force_clk_levels(smu, SMU_SOCCLK, 1 << soc_mask);
1414 break;
1415 case AMD_DPM_FORCED_LEVEL_MANUAL:
1416 case AMD_DPM_FORCED_LEVEL_PROFILE_EXIT:
1417 default:
1418 break;
1419 }
1420 return ret;
1421}
1422
bc0fcffd
LG
1423int smu_adjust_power_state_dynamic(struct smu_context *smu,
1424 enum amd_dpm_forced_level level,
1425 bool skip_display_settings)
1426{
1427 int ret = 0;
1428 int index = 0;
bc0fcffd
LG
1429 long workload;
1430 struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
1431
a254bfa2
CG
1432 if (!smu->pm_enabled)
1433 return -EINVAL;
bc0fcffd
LG
1434 if (!skip_display_settings) {
1435 ret = smu_display_config_changed(smu);
1436 if (ret) {
1437 pr_err("Failed to change display config!");
1438 return ret;
1439 }
1440 }
1441
a254bfa2
CG
1442 if (!smu->pm_enabled)
1443 return -EINVAL;
bc0fcffd
LG
1444 ret = smu_apply_clocks_adjust_rules(smu);
1445 if (ret) {
1446 pr_err("Failed to apply clocks adjust rules!");
1447 return ret;
1448 }
1449
1450 if (!skip_display_settings) {
1451 ret = smu_notify_smc_dispaly_config(smu);
1452 if (ret) {
1453 pr_err("Failed to notify smc display config!");
1454 return ret;
1455 }
1456 }
1457
1458 if (smu_dpm_ctx->dpm_level != level) {
b840e4d5
KW
1459 ret = smu_asic_set_performance_level(smu, level);
1460 if (ret) {
1461 ret = smu_default_set_performance_level(smu, level);
bc0fcffd 1462 }
bc0fcffd
LG
1463 if (!ret)
1464 smu_dpm_ctx->dpm_level = level;
1465 }
1466
1467 if (smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL) {
1468 index = fls(smu->workload_mask);
1469 index = index > 0 && index <= WORKLOAD_POLICY_MAX ? index - 1 : 0;
1470 workload = smu->workload_setting[index];
1471
1472 if (smu->power_profile_mode != workload)
1473 smu_set_power_profile_mode(smu, &workload, 0);
1474 }
1475
1476 return ret;
1477}
1478
1479int smu_handle_task(struct smu_context *smu,
1480 enum amd_dpm_forced_level level,
1481 enum amd_pp_task task_id)
1482{
1483 int ret = 0;
1484
1485 switch (task_id) {
1486 case AMD_PP_TASK_DISPLAY_CONFIG_CHANGE:
1487 ret = smu_pre_display_config_changed(smu);
1488 if (ret)
1489 return ret;
1490 ret = smu_set_cpu_power_state(smu);
1491 if (ret)
1492 return ret;
1493 ret = smu_adjust_power_state_dynamic(smu, level, false);
1494 break;
1495 case AMD_PP_TASK_COMPLETE_INIT:
1496 case AMD_PP_TASK_READJUST_POWER_STATE:
1497 ret = smu_adjust_power_state_dynamic(smu, level, true);
1498 break;
1499 default:
1500 break;
1501 }
1502
1503 return ret;
1504}
1505
a38470f0
KW
1506enum amd_dpm_forced_level smu_get_performance_level(struct smu_context *smu)
1507{
1508 struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
8e33376b 1509 enum amd_dpm_forced_level level;
a38470f0
KW
1510
1511 if (!smu_dpm_ctx->dpm_context)
1512 return -EINVAL;
1513
1514 mutex_lock(&(smu->mutex));
8e33376b 1515 level = smu_dpm_ctx->dpm_level;
a38470f0
KW
1516 mutex_unlock(&(smu->mutex));
1517
8e33376b 1518 return level;
a38470f0
KW
1519}
1520
1521int smu_force_performance_level(struct smu_context *smu, enum amd_dpm_forced_level level)
1522{
1523 int ret = 0;
1524 int i;
1525 struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
1526
1527 if (!smu_dpm_ctx->dpm_context)
1528 return -EINVAL;
1529
1530 for (i = 0; i < smu->adev->num_ip_blocks; i++) {
1531 if (smu->adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC)
1532 break;
1533 }
1534
a38470f0
KW
1535
1536 smu->adev->ip_blocks[i].version->funcs->enable_umd_pstate(smu, &level);
1537 ret = smu_handle_task(smu, level,
1538 AMD_PP_TASK_READJUST_POWER_STATE);
6f6a7bba
KW
1539 if (ret)
1540 return ret;
a38470f0 1541
6f6a7bba
KW
1542 mutex_lock(&smu->mutex);
1543 smu_dpm_ctx->dpm_level = level;
a38470f0
KW
1544 mutex_unlock(&smu->mutex);
1545
1546 return ret;
1547}
1548
2e13c755 1549int smu_set_display_count(struct smu_context *smu, uint32_t count)
1550{
1551 int ret = 0;
1552
1553 mutex_lock(&smu->mutex);
1554 ret = smu_init_display_count(smu, count);
1555 mutex_unlock(&smu->mutex);
1556
1557 return ret;
1558}
1559
137d63ab
HR
1560const struct amd_ip_funcs smu_ip_funcs = {
1561 .name = "smu",
1562 .early_init = smu_early_init,
bee71d26 1563 .late_init = smu_late_init,
137d63ab
HR
1564 .sw_init = smu_sw_init,
1565 .sw_fini = smu_sw_fini,
1566 .hw_init = smu_hw_init,
1567 .hw_fini = smu_hw_fini,
1568 .suspend = smu_suspend,
1569 .resume = smu_resume,
1570 .is_idle = NULL,
1571 .check_soft_reset = NULL,
1572 .wait_for_idle = NULL,
1573 .soft_reset = NULL,
1574 .set_clockgating_state = smu_set_clockgating_state,
1575 .set_powergating_state = smu_set_powergating_state,
49d27e91 1576 .enable_umd_pstate = smu_enable_umd_pstate,
137d63ab 1577};
07845526
HR
1578
1579const struct amdgpu_ip_block_version smu_v11_0_ip_block =
1580{
1581 .type = AMD_IP_BLOCK_TYPE_SMC,
1582 .major = 11,
1583 .minor = 0,
1584 .rev = 0,
1585 .funcs = &smu_ip_funcs,
1586};