drm/amdgpu/gmc9: set vram_width properly for SR-IOV
[linux-2.6-block.git] / drivers / gpu / drm / amd / powerplay / amdgpu_smu.c
CommitLineData
137d63ab
HR
1/*
2 * Copyright 2019 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 */
22
23#include "pp_debug.h"
24#include <linux/firmware.h>
25#include <drm/drmP.h>
26#include "amdgpu.h"
27#include "amdgpu_smu.h"
28#include "soc15_common.h"
07845526 29#include "smu_v11_0.h"
e15da5a4 30#include "atom.h"
24e141e1 31#include "amd_pcie.h"
137d63ab 32
72e91f37
KW
33int smu_dpm_set_power_gate(struct smu_context *smu, uint32_t block_type,
34 bool gate)
35{
36 int ret = 0;
37
38 switch (block_type) {
39 case AMD_IP_BLOCK_TYPE_UVD:
40 ret = smu_dpm_set_uvd_enable(smu, gate);
41 break;
42 case AMD_IP_BLOCK_TYPE_VCE:
43 ret = smu_dpm_set_vce_enable(smu, gate);
44 break;
45 default:
46 break;
47 }
48
49 return ret;
50}
51
ea2d0bf8
KW
52enum amd_pm_state_type smu_get_current_power_state(struct smu_context *smu)
53{
54 /* not support power state */
55 return POWER_STATE_TYPE_DEFAULT;
56}
57
09895323
KW
58int smu_get_power_num_states(struct smu_context *smu,
59 struct pp_states_info *state_info)
60{
61 if (!state_info)
62 return -EINVAL;
63
64 /* not support power state */
65 memset(state_info, 0, sizeof(struct pp_states_info));
66 state_info->nums = 0;
67
68 return 0;
69}
70
143c75d6
KW
71int smu_common_read_sensor(struct smu_context *smu, enum amd_pp_sensors sensor,
72 void *data, uint32_t *size)
73{
74 int ret = 0;
75
76 switch (sensor) {
46814f51
CG
77 case AMDGPU_PP_SENSOR_STABLE_PSTATE_SCLK:
78 *((uint32_t *)data) = smu->pstate_sclk;
79 *size = 4;
80 break;
81 case AMDGPU_PP_SENSOR_STABLE_PSTATE_MCLK:
82 *((uint32_t *)data) = smu->pstate_mclk;
83 *size = 4;
84 break;
143c75d6
KW
85 case AMDGPU_PP_SENSOR_ENABLED_SMC_FEATURES_MASK:
86 ret = smu_feature_get_enabled_mask(smu, (uint32_t *)data, 2);
87 *size = 8;
88 break;
89 default:
90 ret = -EINVAL;
91 break;
92 }
93
94 if (ret)
95 *size = 0;
96
97 return ret;
98}
99
4825d8d6 100int smu_update_table_with_arg(struct smu_context *smu, uint16_t table_id, uint16_t exarg,
dbe6a970
KW
101 void *table_data, bool drv2smu)
102{
103 struct smu_table_context *smu_table = &smu->smu_table;
104 struct smu_table *table = NULL;
105 int ret = 0;
4825d8d6 106 uint32_t table_index;
dbe6a970
KW
107
108 if (!table_data || table_id >= smu_table->table_count)
109 return -EINVAL;
110
4825d8d6
KW
111 table_index = (exarg << 16) | table_id;
112
dbe6a970
KW
113 table = &smu_table->tables[table_id];
114
115 if (drv2smu)
116 memcpy(table->cpu_addr, table_data, table->size);
117
118 ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetDriverDramAddrHigh,
119 upper_32_bits(table->mc_address));
120 if (ret)
121 return ret;
122 ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetDriverDramAddrLow,
123 lower_32_bits(table->mc_address));
124 if (ret)
125 return ret;
126 ret = smu_send_smc_msg_with_param(smu, drv2smu ?
127 SMU_MSG_TransferTableDram2Smu :
128 SMU_MSG_TransferTableSmu2Dram,
4825d8d6 129 table_index);
dbe6a970
KW
130 if (ret)
131 return ret;
132
133 if (!drv2smu)
134 memcpy(table_data, table->cpu_addr, table->size);
135
136 return ret;
137}
138
dc8e3a0c
KW
139bool is_support_sw_smu(struct amdgpu_device *adev)
140{
141 if (amdgpu_dpm != 1)
142 return false;
143
dff234d2 144 if (adev->asic_type >= CHIP_VEGA20 && adev->asic_type != CHIP_RAVEN)
dc8e3a0c
KW
145 return true;
146
147 return false;
148}
149
289921b0
KW
150int smu_sys_get_pp_table(struct smu_context *smu, void **table)
151{
152 struct smu_table_context *smu_table = &smu->smu_table;
153
154 if (!smu_table->power_play_table && !smu_table->hardcode_pptable)
155 return -EINVAL;
156
157 if (smu_table->hardcode_pptable)
158 *table = smu_table->hardcode_pptable;
159 else
160 *table = smu_table->power_play_table;
161
162 return smu_table->power_play_table_size;
163}
164
165int smu_sys_set_pp_table(struct smu_context *smu, void *buf, size_t size)
166{
167 struct smu_table_context *smu_table = &smu->smu_table;
168 ATOM_COMMON_TABLE_HEADER *header = (ATOM_COMMON_TABLE_HEADER *)buf;
169 int ret = 0;
170
171 if (header->usStructureSize != size) {
172 pr_err("pp table size not matched !\n");
173 return -EIO;
174 }
175
176 mutex_lock(&smu->mutex);
177 if (!smu_table->hardcode_pptable)
178 smu_table->hardcode_pptable = kzalloc(size, GFP_KERNEL);
179 if (!smu_table->hardcode_pptable) {
180 ret = -ENOMEM;
181 goto failed;
182 }
183
184 memcpy(smu_table->hardcode_pptable, buf, size);
185 smu_table->power_play_table = smu_table->hardcode_pptable;
186 smu_table->power_play_table_size = size;
187 mutex_unlock(&smu->mutex);
188
189 ret = smu_reset(smu);
190 if (ret)
191 pr_info("smu reset failed, ret = %d\n", ret);
192
6c851417
DC
193 return ret;
194
289921b0
KW
195failed:
196 mutex_unlock(&smu->mutex);
197 return ret;
198}
199
6b816d73
KW
200int smu_feature_init_dpm(struct smu_context *smu)
201{
202 struct smu_feature *feature = &smu->smu_feature;
203 int ret = 0;
204 uint32_t unallowed_feature_mask[SMU_FEATURE_MAX/32];
205
f14a323d 206 mutex_lock(&feature->mutex);
6b816d73 207 bitmap_fill(feature->allowed, SMU_FEATURE_MAX);
f14a323d 208 mutex_unlock(&feature->mutex);
6b816d73
KW
209
210 ret = smu_get_unallowed_feature_mask(smu, unallowed_feature_mask,
211 SMU_FEATURE_MAX/32);
212 if (ret)
213 return ret;
214
f14a323d 215 mutex_lock(&feature->mutex);
6b816d73
KW
216 bitmap_andnot(feature->allowed, feature->allowed,
217 (unsigned long *)unallowed_feature_mask,
218 feature->feature_num);
f14a323d 219 mutex_unlock(&feature->mutex);
6b816d73
KW
220
221 return ret;
222}
223
2f25158d
KW
224int smu_feature_is_enabled(struct smu_context *smu, int feature_id)
225{
226 struct smu_feature *feature = &smu->smu_feature;
f14a323d
KW
227 int ret = 0;
228
2f25158d 229 WARN_ON(feature_id > feature->feature_num);
f14a323d
KW
230
231 mutex_lock(&feature->mutex);
232 ret = test_bit(feature_id, feature->enabled);
233 mutex_unlock(&feature->mutex);
234
235 return ret;
2f25158d
KW
236}
237
238int smu_feature_set_enabled(struct smu_context *smu, int feature_id, bool enable)
239{
240 struct smu_feature *feature = &smu->smu_feature;
f14a323d
KW
241 int ret = 0;
242
2f25158d 243 WARN_ON(feature_id > feature->feature_num);
f14a323d
KW
244
245 mutex_lock(&feature->mutex);
246 ret = smu_feature_update_enable_state(smu, feature_id, enable);
247 if (ret)
248 goto failed;
249
2f25158d
KW
250 if (enable)
251 test_and_set_bit(feature_id, feature->enabled);
252 else
253 test_and_clear_bit(feature_id, feature->enabled);
f14a323d
KW
254
255failed:
256 mutex_unlock(&feature->mutex);
257
258 return ret;
2f25158d
KW
259}
260
261int smu_feature_is_supported(struct smu_context *smu, int feature_id)
262{
263 struct smu_feature *feature = &smu->smu_feature;
f14a323d
KW
264 int ret = 0;
265
2f25158d 266 WARN_ON(feature_id > feature->feature_num);
f14a323d
KW
267
268 mutex_lock(&feature->mutex);
269 ret = test_bit(feature_id, feature->supported);
270 mutex_unlock(&feature->mutex);
271
272 return ret;
2f25158d
KW
273}
274
275int smu_feature_set_supported(struct smu_context *smu, int feature_id,
276 bool enable)
277{
278 struct smu_feature *feature = &smu->smu_feature;
f14a323d
KW
279 int ret = 0;
280
2f25158d 281 WARN_ON(feature_id > feature->feature_num);
f14a323d
KW
282
283 mutex_unlock(&feature->mutex);
2f25158d
KW
284 if (enable)
285 test_and_set_bit(feature_id, feature->supported);
286 else
287 test_and_clear_bit(feature_id, feature->supported);
f14a323d
KW
288 mutex_unlock(&feature->mutex);
289
290 return ret;
2f25158d
KW
291}
292
137d63ab
HR
293static int smu_set_funcs(struct amdgpu_device *adev)
294{
07845526
HR
295 struct smu_context *smu = &adev->smu;
296
297 switch (adev->asic_type) {
298 case CHIP_VEGA20:
3b94fb10
LG
299 adev->pm.pp_feature &= ~PP_GFXOFF_MASK;
300 if (adev->pm.pp_feature & PP_OVERDRIVE_MASK)
301 smu->od_enabled = true;
07845526
HR
302 smu_v11_0_set_smu_funcs(smu);
303 break;
304 default:
305 return -EINVAL;
306 }
307
137d63ab
HR
308 return 0;
309}
310
311static int smu_early_init(void *handle)
312{
313 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
314 struct smu_context *smu = &adev->smu;
137d63ab
HR
315
316 smu->adev = adev;
317 mutex_init(&smu->mutex);
318
74e07f9d 319 return smu_set_funcs(adev);
137d63ab
HR
320}
321
bee71d26
CG
322static int smu_late_init(void *handle)
323{
324 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
325 struct smu_context *smu = &adev->smu;
326 mutex_lock(&smu->mutex);
327 smu_handle_task(&adev->smu,
328 smu->smu_dpm.dpm_level,
329 AMD_PP_TASK_COMPLETE_INIT);
330 mutex_unlock(&smu->mutex);
331
332 return 0;
333}
334
e15da5a4
HR
335int smu_get_atom_data_table(struct smu_context *smu, uint32_t table,
336 uint16_t *size, uint8_t *frev, uint8_t *crev,
337 uint8_t **addr)
338{
339 struct amdgpu_device *adev = smu->adev;
340 uint16_t data_start;
341
342 if (!amdgpu_atom_parse_data_header(adev->mode_info.atom_context, table,
343 size, frev, crev, &data_start))
344 return -EINVAL;
345
346 *addr = (uint8_t *)adev->mode_info.atom_context->bios + data_start;
347
348 return 0;
349}
350
b5624000
HR
351static int smu_initialize_pptable(struct smu_context *smu)
352{
353 /* TODO */
354 return 0;
355}
356
357static int smu_smc_table_sw_init(struct smu_context *smu)
358{
359 int ret;
360
361 ret = smu_initialize_pptable(smu);
362 if (ret) {
363 pr_err("Failed to init smu_initialize_pptable!\n");
364 return ret;
365 }
366
cabd44c0
HR
367 /**
368 * Create smu_table structure, and init smc tables such as
369 * TABLE_PPTABLE, TABLE_WATERMARKS, TABLE_SMU_METRICS, and etc.
370 */
371 ret = smu_init_smc_tables(smu);
372 if (ret) {
373 pr_err("Failed to init smc tables!\n");
374 return ret;
375 }
376
17e6081b
HR
377 /**
378 * Create smu_power_context structure, and allocate smu_dpm_context and
379 * context size to fill the smu_power_context data.
380 */
381 ret = smu_init_power(smu);
382 if (ret) {
383 pr_err("Failed to init smu_init_power!\n");
384 return ret;
385 }
386
b5624000
HR
387 return 0;
388}
389
813ce279
KW
390static int smu_smc_table_sw_fini(struct smu_context *smu)
391{
392 int ret;
393
394 ret = smu_fini_smc_tables(smu);
395 if (ret) {
396 pr_err("Failed to smu_fini_smc_tables!\n");
397 return ret;
398 }
399
400 return 0;
401}
402
137d63ab
HR
403static int smu_sw_init(void *handle)
404{
405 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
406 struct smu_context *smu = &adev->smu;
407 int ret;
408
dc8e3a0c 409 if (!is_support_sw_smu(adev))
137d63ab
HR
410 return -EINVAL;
411
0b51d993 412 smu->pool_size = adev->pm.smu_prv_buffer_size;
6b816d73 413 smu->smu_feature.feature_num = SMU_FEATURE_MAX;
f14a323d 414 mutex_init(&smu->smu_feature.mutex);
6b816d73
KW
415 bitmap_zero(smu->smu_feature.supported, SMU_FEATURE_MAX);
416 bitmap_zero(smu->smu_feature.enabled, SMU_FEATURE_MAX);
417 bitmap_zero(smu->smu_feature.allowed, SMU_FEATURE_MAX);
2e069391 418 smu->watermarks_bitmap = 0;
16177fd0
CG
419 smu->power_profile_mode = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT;
420 smu->default_power_profile_mode = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT;
421
422 smu->workload_mask = 1 << smu->workload_prority[PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT];
423 smu->workload_prority[PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT] = 0;
424 smu->workload_prority[PP_SMC_POWER_PROFILE_FULLSCREEN3D] = 1;
425 smu->workload_prority[PP_SMC_POWER_PROFILE_POWERSAVING] = 2;
426 smu->workload_prority[PP_SMC_POWER_PROFILE_VIDEO] = 3;
427 smu->workload_prority[PP_SMC_POWER_PROFILE_VR] = 4;
428 smu->workload_prority[PP_SMC_POWER_PROFILE_COMPUTE] = 5;
429 smu->workload_prority[PP_SMC_POWER_PROFILE_CUSTOM] = 6;
430
431 smu->workload_setting[0] = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT;
432 smu->workload_setting[1] = PP_SMC_POWER_PROFILE_FULLSCREEN3D;
433 smu->workload_setting[2] = PP_SMC_POWER_PROFILE_POWERSAVING;
434 smu->workload_setting[3] = PP_SMC_POWER_PROFILE_VIDEO;
435 smu->workload_setting[4] = PP_SMC_POWER_PROFILE_VR;
436 smu->workload_setting[5] = PP_SMC_POWER_PROFILE_COMPUTE;
437 smu->workload_setting[6] = PP_SMC_POWER_PROFILE_CUSTOM;
379a4454 438 smu->display_config = &adev->pm.pm_display_cfg;
0b51d993 439
9a431038
CG
440 smu->smu_dpm.dpm_level = AMD_DPM_FORCED_LEVEL_AUTO;
441 smu->smu_dpm.requested_dpm_level = AMD_DPM_FORCED_LEVEL_AUTO;
137d63ab
HR
442 ret = smu_init_microcode(smu);
443 if (ret) {
444 pr_err("Failed to load smu firmware!\n");
445 return ret;
446 }
447
b5624000
HR
448 ret = smu_smc_table_sw_init(smu);
449 if (ret) {
450 pr_err("Failed to sw init smc table!\n");
451 return ret;
452 }
453
137d63ab
HR
454 return 0;
455}
456
457static int smu_sw_fini(void *handle)
458{
459 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
813ce279
KW
460 struct smu_context *smu = &adev->smu;
461 int ret;
137d63ab 462
dc8e3a0c 463 if (!is_support_sw_smu(adev))
137d63ab
HR
464 return -EINVAL;
465
813ce279
KW
466 ret = smu_smc_table_sw_fini(smu);
467 if (ret) {
468 pr_err("Failed to sw fini smc table!\n");
469 return ret;
470 }
471
8bf16963
KW
472 ret = smu_fini_power(smu);
473 if (ret) {
474 pr_err("Failed to init smu_fini_power!\n");
475 return ret;
476 }
477
137d63ab
HR
478 return 0;
479}
480
9c9a1747
HR
481static int smu_init_fb_allocations(struct smu_context *smu)
482{
f96357a9
KW
483 struct amdgpu_device *adev = smu->adev;
484 struct smu_table_context *smu_table = &smu->smu_table;
485 struct smu_table *tables = smu_table->tables;
486 uint32_t table_count = smu_table->table_count;
487 uint32_t i = 0;
488 int32_t ret = 0;
489
490 if (table_count <= 0)
491 return -EINVAL;
492
493 for (i = 0 ; i < table_count; i++) {
494 if (tables[i].size == 0)
495 continue;
496 ret = amdgpu_bo_create_kernel(adev,
497 tables[i].size,
498 tables[i].align,
499 tables[i].domain,
500 &tables[i].bo,
501 &tables[i].mc_address,
502 &tables[i].cpu_addr);
503 if (ret)
504 goto failed;
505 }
506
9c9a1747 507 return 0;
f96357a9
KW
508failed:
509 for (; i > 0; i--) {
510 if (tables[i].size == 0)
511 continue;
512 amdgpu_bo_free_kernel(&tables[i].bo,
513 &tables[i].mc_address,
514 &tables[i].cpu_addr);
515
516 }
517 return ret;
9c9a1747
HR
518}
519
f96357a9
KW
520static int smu_fini_fb_allocations(struct smu_context *smu)
521{
522 struct smu_table_context *smu_table = &smu->smu_table;
523 struct smu_table *tables = smu_table->tables;
524 uint32_t table_count = smu_table->table_count;
525 uint32_t i = 0;
526
527 if (table_count == 0 || tables == NULL)
289921b0 528 return 0;
f96357a9
KW
529
530 for (i = 0 ; i < table_count; i++) {
531 if (tables[i].size == 0)
532 continue;
533 amdgpu_bo_free_kernel(&tables[i].bo,
534 &tables[i].mc_address,
535 &tables[i].cpu_addr);
536 }
537
538 return 0;
539}
f6a6b952 540
24e141e1
LG
541static int smu_override_pcie_parameters(struct smu_context *smu)
542{
543 struct amdgpu_device *adev = smu->adev;
544 uint32_t pcie_gen = 0, pcie_width = 0, smu_pcie_arg;
545 int ret;
546
547 if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN4)
548 pcie_gen = 3;
549 else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3)
550 pcie_gen = 2;
551 else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2)
552 pcie_gen = 1;
553 else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1)
554 pcie_gen = 0;
555
556 /* Bit 31:16: LCLK DPM level. 0 is DPM0, and 1 is DPM1
557 * Bit 15:8: PCIE GEN, 0 to 3 corresponds to GEN1 to GEN4
558 * Bit 7:0: PCIE lane width, 1 to 7 corresponds is x1 to x32
559 */
560 if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X16)
561 pcie_width = 6;
562 else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X12)
563 pcie_width = 5;
564 else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X8)
565 pcie_width = 4;
566 else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X4)
567 pcie_width = 3;
568 else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X2)
569 pcie_width = 2;
570 else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X1)
571 pcie_width = 1;
572
573 smu_pcie_arg = (1 << 16) | (pcie_gen << 8) | pcie_width;
574 ret = smu_send_smc_msg_with_param(smu,
575 SMU_MSG_OverridePcieParameters,
576 smu_pcie_arg);
577 if (ret)
578 pr_err("[%s] Attempt to override pcie params failed!\n", __func__);
579 return ret;
580}
581
4733cc72
LG
582static int smu_smc_table_hw_init(struct smu_context *smu,
583 bool initialize)
05cadcd3 584{
f067499b 585 struct amdgpu_device *adev = smu->adev;
05cadcd3
HR
586 int ret;
587
f067499b
LG
588 if (smu_is_dpm_running(smu) && adev->in_suspend) {
589 pr_info("dpm has been enabled\n");
590 return 0;
591 }
592
56c53ad6
KW
593 ret = smu_init_display(smu);
594 if (ret)
595 return ret;
596
4733cc72
LG
597 if (initialize) {
598 ret = smu_read_pptable_from_vbios(smu);
599 if (ret)
600 return ret;
05cadcd3 601
4733cc72
LG
602 /* get boot_values from vbios to set revision, gfxclk, and etc. */
603 ret = smu_get_vbios_bootup_values(smu);
604 if (ret)
605 return ret;
a6b35900 606
4733cc72
LG
607 ret = smu_get_clk_info_from_vbios(smu);
608 if (ret)
609 return ret;
08115f87 610
4733cc72
LG
611 /*
612 * check if the format_revision in vbios is up to pptable header
613 * version, and the structure size is not 0.
614 */
615 ret = smu_get_clk_info_from_vbios(smu);
616 if (ret)
617 return ret;
08115f87 618
4733cc72
LG
619 ret = smu_check_pptable(smu);
620 if (ret)
621 return ret;
46126e6d 622
4733cc72
LG
623 /*
624 * allocate vram bos to store smc table contents.
625 */
626 ret = smu_init_fb_allocations(smu);
627 if (ret)
628 return ret;
9c9a1747 629
4733cc72
LG
630 /*
631 * Parse pptable format and fill PPTable_t smc_pptable to
632 * smu_table_context structure. And read the smc_dpm_table from vbios,
633 * then fill it into smc_pptable.
634 */
635 ret = smu_parse_pptable(smu);
636 if (ret)
637 return ret;
9e4848a4 638
4733cc72
LG
639 /*
640 * Send msg GetDriverIfVersion to check if the return value is equal
641 * with DRIVER_IF_VERSION of smc header.
642 */
643 ret = smu_check_fw_version(smu);
644 if (ret)
645 return ret;
646 }
a751b095 647
31b5ae49
HR
648 /*
649 * Copy pptable bo in the vram to smc with SMU MSGs such as
650 * SetDriverDramAddr and TransferTableDram2Smu.
651 */
652 ret = smu_write_pptable(smu);
653 if (ret)
654 return ret;
655
f6a6b952
KW
656 /* issue RunAfllBtc msg */
657 ret = smu_run_afll_btc(smu);
658 if (ret)
659 return ret;
660
d4631cba
HR
661 ret = smu_feature_set_allowed_mask(smu);
662 if (ret)
663 return ret;
664
f067499b 665 ret = smu_system_features_control(smu, true);
6b816d73
KW
666 if (ret)
667 return ret;
668
24e141e1
LG
669 ret = smu_override_pcie_parameters(smu);
670 if (ret)
671 return ret;
672
e1c6f86a
KW
673 ret = smu_notify_display_change(smu);
674 if (ret)
675 return ret;
676
a7ebb6d2
HR
677 /*
678 * Set min deep sleep dce fclk with bootup value from vbios via
679 * SetMinDeepSleepDcefclk MSG.
680 */
681 ret = smu_set_min_dcef_deep_sleep(smu);
682 if (ret)
683 return ret;
684
d6a4aa82
LG
685 /*
686 * Set initialized values (get from vbios) to dpm tables context such as
687 * gfxclk, memclk, dcefclk, and etc. And enable the DPM feature for each
688 * type of clks.
689 */
4733cc72
LG
690 if (initialize) {
691 ret = smu_populate_smc_pptable(smu);
692 if (ret)
693 return ret;
d6a4aa82 694
4733cc72
LG
695 ret = smu_init_max_sustainable_clocks(smu);
696 if (ret)
697 return ret;
698 }
7457cf02 699
4733cc72 700 ret = smu_set_od8_default_settings(smu, initialize);
2c80abe3
LG
701 if (ret)
702 return ret;
703
4733cc72
LG
704 if (initialize) {
705 ret = smu_populate_umd_state_clk(smu);
706 if (ret)
707 return ret;
133438fa 708
4733cc72
LG
709 ret = smu_get_power_limit(smu, &smu->default_power_limit, false);
710 if (ret)
711 return ret;
712 }
e66adb1e 713
206bc589
HR
714 /*
715 * Set PMSTATUSLOG table bo address with SetToolsDramAddr MSG for tools.
716 */
717 ret = smu_set_tool_table_location(smu);
718
719 return ret;
05cadcd3
HR
720}
721
e65d45f2
HR
722/**
723 * smu_alloc_memory_pool - allocate memory pool in the system memory
724 *
725 * @smu: amdgpu_device pointer
726 *
727 * This memory pool will be used for SMC use and msg SetSystemVirtualDramAddr
728 * and DramLogSetDramAddr can notify it changed.
729 *
730 * Returns 0 on success, error on failure.
731 */
732static int smu_alloc_memory_pool(struct smu_context *smu)
733{
0b51d993
KW
734 struct amdgpu_device *adev = smu->adev;
735 struct smu_table_context *smu_table = &smu->smu_table;
736 struct smu_table *memory_pool = &smu_table->memory_pool;
737 uint64_t pool_size = smu->pool_size;
738 int ret = 0;
739
740 if (pool_size == SMU_MEMORY_POOL_SIZE_ZERO)
741 return ret;
742
743 memory_pool->size = pool_size;
744 memory_pool->align = PAGE_SIZE;
745 memory_pool->domain = AMDGPU_GEM_DOMAIN_GTT;
746
747 switch (pool_size) {
748 case SMU_MEMORY_POOL_SIZE_256_MB:
749 case SMU_MEMORY_POOL_SIZE_512_MB:
750 case SMU_MEMORY_POOL_SIZE_1_GB:
751 case SMU_MEMORY_POOL_SIZE_2_GB:
752 ret = amdgpu_bo_create_kernel(adev,
753 memory_pool->size,
754 memory_pool->align,
755 memory_pool->domain,
756 &memory_pool->bo,
757 &memory_pool->mc_address,
758 &memory_pool->cpu_addr);
759 break;
760 default:
761 break;
762 }
763
764 return ret;
e65d45f2
HR
765}
766
0b51d993
KW
767static int smu_free_memory_pool(struct smu_context *smu)
768{
769 struct smu_table_context *smu_table = &smu->smu_table;
770 struct smu_table *memory_pool = &smu_table->memory_pool;
771 int ret = 0;
772
773 if (memory_pool->size == SMU_MEMORY_POOL_SIZE_ZERO)
774 return ret;
775
776 amdgpu_bo_free_kernel(&memory_pool->bo,
777 &memory_pool->mc_address,
778 &memory_pool->cpu_addr);
779
780 memset(memory_pool, 0, sizeof(struct smu_table));
781
782 return ret;
783}
4733cc72 784
137d63ab
HR
785static int smu_hw_init(void *handle)
786{
787 int ret;
788 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
789 struct smu_context *smu = &adev->smu;
790
dc8e3a0c 791 if (!is_support_sw_smu(adev))
137d63ab
HR
792 return -EINVAL;
793
3d2f5200
HR
794 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
795 ret = smu_load_microcode(smu);
796 if (ret)
797 return ret;
798 }
799
e11c4fd5
HR
800 ret = smu_check_fw_status(smu);
801 if (ret) {
802 pr_err("SMC firmware status is not correct\n");
803 return ret;
804 }
805
137d63ab
HR
806 mutex_lock(&smu->mutex);
807
6b816d73
KW
808 ret = smu_feature_init_dpm(smu);
809 if (ret)
810 goto failed;
811
4733cc72 812 ret = smu_smc_table_hw_init(smu, true);
05cadcd3
HR
813 if (ret)
814 goto failed;
137d63ab 815
e65d45f2
HR
816 ret = smu_alloc_memory_pool(smu);
817 if (ret)
818 goto failed;
819
c56de9e8
HR
820 /*
821 * Use msg SetSystemVirtualDramAddr and DramLogSetDramAddr can notify
822 * pool location.
823 */
824 ret = smu_notify_memory_pool_location(smu);
825 if (ret)
826 goto failed;
827
74ba3553
LG
828 ret = smu_start_thermal_control(smu);
829 if (ret)
830 goto failed;
831
137d63ab
HR
832 mutex_unlock(&smu->mutex);
833
a317cf03
KW
834 adev->pm.dpm_enabled = true;
835
137d63ab
HR
836 pr_info("SMU is initialized successfully!\n");
837
838 return 0;
05cadcd3
HR
839
840failed:
841 mutex_unlock(&smu->mutex);
842 return ret;
137d63ab
HR
843}
844
845static int smu_hw_fini(void *handle)
846{
847 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
848 struct smu_context *smu = &adev->smu;
afba8282 849 struct smu_table_context *table_context = &smu->smu_table;
f96357a9 850 int ret = 0;
137d63ab 851
dc8e3a0c 852 if (!is_support_sw_smu(adev))
137d63ab
HR
853 return -EINVAL;
854
6316f51c
HR
855 kfree(table_context->driver_pptable);
856 table_context->driver_pptable = NULL;
afba8282 857
6316f51c
HR
858 kfree(table_context->max_sustainable_clocks);
859 table_context->max_sustainable_clocks = NULL;
7457cf02 860
6316f51c
HR
861 kfree(table_context->od_feature_capabilities);
862 table_context->od_feature_capabilities = NULL;
b55ca3bd 863
6316f51c
HR
864 kfree(table_context->od_settings_max);
865 table_context->od_settings_max = NULL;
b55ca3bd 866
6316f51c
HR
867 kfree(table_context->od_settings_min);
868 table_context->od_settings_min = NULL;
b55ca3bd 869
6316f51c
HR
870 kfree(table_context->overdrive_table);
871 table_context->overdrive_table = NULL;
2c80abe3 872
6316f51c
HR
873 kfree(table_context->od8_settings);
874 table_context->od8_settings = NULL;
2c80abe3 875
f96357a9
KW
876 ret = smu_fini_fb_allocations(smu);
877 if (ret)
878 return ret;
879
0b51d993
KW
880 ret = smu_free_memory_pool(smu);
881 if (ret)
882 return ret;
883
137d63ab
HR
884 return 0;
885}
886
289921b0
KW
887int smu_reset(struct smu_context *smu)
888{
889 struct amdgpu_device *adev = smu->adev;
890 int ret = 0;
891
892 ret = smu_hw_fini(adev);
893 if (ret)
894 return ret;
895
896 ret = smu_hw_init(adev);
897 if (ret)
898 return ret;
899
900 return ret;
901}
902
137d63ab
HR
903static int smu_suspend(void *handle)
904{
4733cc72 905 int ret;
137d63ab 906 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
4733cc72 907 struct smu_context *smu = &adev->smu;
137d63ab 908
dc8e3a0c 909 if (!is_support_sw_smu(adev))
137d63ab
HR
910 return -EINVAL;
911
f067499b 912 ret = smu_system_features_control(smu, false);
4733cc72
LG
913 if (ret)
914 return ret;
915
916 smu->watermarks_bitmap &= ~(WATERMARKS_LOADED);
917
137d63ab
HR
918 return 0;
919}
920
921static int smu_resume(void *handle)
922{
923 int ret;
924 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
925 struct smu_context *smu = &adev->smu;
926
dc8e3a0c 927 if (!is_support_sw_smu(adev))
137d63ab
HR
928 return -EINVAL;
929
fad3ecf2
HR
930 pr_info("SMU is resuming...\n");
931
137d63ab
HR
932 mutex_lock(&smu->mutex);
933
4733cc72 934 ret = smu_smc_table_hw_init(smu, false);
fad3ecf2
HR
935 if (ret)
936 goto failed;
937
4733cc72 938 ret = smu_start_thermal_control(smu);
fad3ecf2
HR
939 if (ret)
940 goto failed;
137d63ab
HR
941
942 mutex_unlock(&smu->mutex);
943
fad3ecf2
HR
944 pr_info("SMU is resumed successfully!\n");
945
137d63ab 946 return 0;
fad3ecf2
HR
947failed:
948 mutex_unlock(&smu->mutex);
949 return ret;
137d63ab
HR
950}
951
94ed6d0c
HR
952int smu_display_configuration_change(struct smu_context *smu,
953 const struct amd_pp_display_configuration *display_config)
954{
955 int index = 0;
956 int num_of_active_display = 0;
957
958 if (!is_support_sw_smu(smu->adev))
959 return -EINVAL;
960
961 if (!display_config)
962 return -EINVAL;
963
964 mutex_lock(&smu->mutex);
965
966 smu_set_deep_sleep_dcefclk(smu,
967 display_config->min_dcef_deep_sleep_set_clk / 100);
968
969 for (index = 0; index < display_config->num_path_including_non_display; index++) {
970 if (display_config->displays[index].controller_id != 0)
971 num_of_active_display++;
972 }
973
974 smu_set_active_display_count(smu, num_of_active_display);
975
976 smu_store_cc6_data(smu, display_config->cpu_pstate_separation_time,
977 display_config->cpu_cc6_disable,
978 display_config->cpu_pstate_disable,
979 display_config->nb_pstate_switch_disable);
980
981 mutex_unlock(&smu->mutex);
982
983 return 0;
984}
985
5e2d3881
HR
986static int smu_get_clock_info(struct smu_context *smu,
987 struct smu_clock_info *clk_info,
988 enum smu_perf_level_designation designation)
989{
990 int ret;
991 struct smu_performance_level level = {0};
992
993 if (!clk_info)
994 return -EINVAL;
995
996 ret = smu_get_perf_level(smu, PERF_LEVEL_ACTIVITY, &level);
997 if (ret)
998 return -EINVAL;
999
1000 clk_info->min_mem_clk = level.memory_clock;
1001 clk_info->min_eng_clk = level.core_clock;
1002 clk_info->min_bus_bandwidth = level.non_local_mem_freq * level.non_local_mem_width;
1003
1004 ret = smu_get_perf_level(smu, designation, &level);
1005 if (ret)
1006 return -EINVAL;
1007
1008 clk_info->min_mem_clk = level.memory_clock;
1009 clk_info->min_eng_clk = level.core_clock;
1010 clk_info->min_bus_bandwidth = level.non_local_mem_freq * level.non_local_mem_width;
1011
1012 return 0;
1013}
1014
1015int smu_get_current_clocks(struct smu_context *smu,
1016 struct amd_pp_clock_info *clocks)
1017{
1018 struct amd_pp_simple_clock_info simple_clocks = {0};
1019 struct smu_clock_info hw_clocks;
1020 int ret = 0;
1021
1022 if (!is_support_sw_smu(smu->adev))
1023 return -EINVAL;
1024
1025 mutex_lock(&smu->mutex);
1026
1027 smu_get_dal_power_level(smu, &simple_clocks);
1028
1029 if (smu->support_power_containment)
1030 ret = smu_get_clock_info(smu, &hw_clocks,
1031 PERF_LEVEL_POWER_CONTAINMENT);
1032 else
1033 ret = smu_get_clock_info(smu, &hw_clocks, PERF_LEVEL_ACTIVITY);
1034
1035 if (ret) {
1036 pr_err("Error in smu_get_clock_info\n");
1037 goto failed;
1038 }
1039
1040 clocks->min_engine_clock = hw_clocks.min_eng_clk;
1041 clocks->max_engine_clock = hw_clocks.max_eng_clk;
1042 clocks->min_memory_clock = hw_clocks.min_mem_clk;
1043 clocks->max_memory_clock = hw_clocks.max_mem_clk;
1044 clocks->min_bus_bandwidth = hw_clocks.min_bus_bandwidth;
1045 clocks->max_bus_bandwidth = hw_clocks.max_bus_bandwidth;
1046 clocks->max_engine_clock_in_sr = hw_clocks.max_eng_clk;
1047 clocks->min_engine_clock_in_sr = hw_clocks.min_eng_clk;
1048
1049 if (simple_clocks.level == 0)
1050 clocks->max_clocks_state = PP_DAL_POWERLEVEL_7;
1051 else
1052 clocks->max_clocks_state = simple_clocks.level;
1053
1054 if (!smu_get_current_shallow_sleep_clocks(smu, &hw_clocks)) {
1055 clocks->max_engine_clock_in_sr = hw_clocks.max_eng_clk;
1056 clocks->min_engine_clock_in_sr = hw_clocks.min_eng_clk;
1057 }
1058
1059failed:
1060 mutex_unlock(&smu->mutex);
1061 return ret;
1062}
1063
137d63ab
HR
1064static int smu_set_clockgating_state(void *handle,
1065 enum amd_clockgating_state state)
1066{
1067 return 0;
1068}
1069
1070static int smu_set_powergating_state(void *handle,
1071 enum amd_powergating_state state)
1072{
1073 return 0;
1074}
1075
49d27e91
CG
1076static int smu_enable_umd_pstate(void *handle,
1077 enum amd_dpm_forced_level *level)
1078{
1079 uint32_t profile_mode_mask = AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD |
1080 AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK |
1081 AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK |
1082 AMD_DPM_FORCED_LEVEL_PROFILE_PEAK;
1083
1084 struct smu_context *smu = (struct smu_context*)(handle);
1085 struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
1086 if (!smu_dpm_ctx->dpm_context)
1087 return -EINVAL;
1088
1089 if (!(smu_dpm_ctx->dpm_level & profile_mode_mask)) {
1090 /* enter umd pstate, save current level, disable gfx cg*/
1091 if (*level & profile_mode_mask) {
1092 smu_dpm_ctx->saved_dpm_level = smu_dpm_ctx->dpm_level;
1093 smu_dpm_ctx->enable_umd_pstate = true;
1094 amdgpu_device_ip_set_clockgating_state(smu->adev,
1095 AMD_IP_BLOCK_TYPE_GFX,
1096 AMD_CG_STATE_UNGATE);
1097 amdgpu_device_ip_set_powergating_state(smu->adev,
1098 AMD_IP_BLOCK_TYPE_GFX,
1099 AMD_PG_STATE_UNGATE);
1100 }
1101 } else {
1102 /* exit umd pstate, restore level, enable gfx cg*/
1103 if (!(*level & profile_mode_mask)) {
1104 if (*level == AMD_DPM_FORCED_LEVEL_PROFILE_EXIT)
1105 *level = smu_dpm_ctx->saved_dpm_level;
1106 smu_dpm_ctx->enable_umd_pstate = false;
1107 amdgpu_device_ip_set_clockgating_state(smu->adev,
1108 AMD_IP_BLOCK_TYPE_GFX,
1109 AMD_CG_STATE_GATE);
1110 amdgpu_device_ip_set_powergating_state(smu->adev,
1111 AMD_IP_BLOCK_TYPE_GFX,
1112 AMD_PG_STATE_GATE);
1113 }
1114 }
1115
1116 return 0;
1117}
1118
bc0fcffd
LG
1119int smu_adjust_power_state_dynamic(struct smu_context *smu,
1120 enum amd_dpm_forced_level level,
1121 bool skip_display_settings)
1122{
1123 int ret = 0;
1124 int index = 0;
1125 uint32_t sclk_mask, mclk_mask, soc_mask;
1126 long workload;
1127 struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
1128
1129 if (!skip_display_settings) {
1130 ret = smu_display_config_changed(smu);
1131 if (ret) {
1132 pr_err("Failed to change display config!");
1133 return ret;
1134 }
1135 }
1136
1137 ret = smu_apply_clocks_adjust_rules(smu);
1138 if (ret) {
1139 pr_err("Failed to apply clocks adjust rules!");
1140 return ret;
1141 }
1142
1143 if (!skip_display_settings) {
1144 ret = smu_notify_smc_dispaly_config(smu);
1145 if (ret) {
1146 pr_err("Failed to notify smc display config!");
1147 return ret;
1148 }
1149 }
1150
1151 if (smu_dpm_ctx->dpm_level != level) {
1152 switch (level) {
1153 case AMD_DPM_FORCED_LEVEL_HIGH:
1154 ret = smu_force_dpm_limit_value(smu, true);
1155 break;
1156 case AMD_DPM_FORCED_LEVEL_LOW:
1157 ret = smu_force_dpm_limit_value(smu, false);
1158 break;
1159
1160 case AMD_DPM_FORCED_LEVEL_AUTO:
1161 ret = smu_unforce_dpm_levels(smu);
1162 break;
1163
1164 case AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD:
1165 case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK:
1166 case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK:
1167 case AMD_DPM_FORCED_LEVEL_PROFILE_PEAK:
1168 ret = smu_get_profiling_clk_mask(smu, level,
1169 &sclk_mask,
1170 &mclk_mask,
1171 &soc_mask);
1172 if (ret)
1173 return ret;
1174 smu_force_clk_levels(smu, PP_SCLK, 1 << sclk_mask);
1175 smu_force_clk_levels(smu, PP_MCLK, 1 << mclk_mask);
1176 break;
1177
1178 case AMD_DPM_FORCED_LEVEL_MANUAL:
1179 case AMD_DPM_FORCED_LEVEL_PROFILE_EXIT:
1180 default:
1181 break;
1182 }
1183
1184 if (!ret)
1185 smu_dpm_ctx->dpm_level = level;
1186 }
1187
1188 if (smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL) {
1189 index = fls(smu->workload_mask);
1190 index = index > 0 && index <= WORKLOAD_POLICY_MAX ? index - 1 : 0;
1191 workload = smu->workload_setting[index];
1192
1193 if (smu->power_profile_mode != workload)
1194 smu_set_power_profile_mode(smu, &workload, 0);
1195 }
1196
1197 return ret;
1198}
1199
1200int smu_handle_task(struct smu_context *smu,
1201 enum amd_dpm_forced_level level,
1202 enum amd_pp_task task_id)
1203{
1204 int ret = 0;
1205
1206 switch (task_id) {
1207 case AMD_PP_TASK_DISPLAY_CONFIG_CHANGE:
1208 ret = smu_pre_display_config_changed(smu);
1209 if (ret)
1210 return ret;
1211 ret = smu_set_cpu_power_state(smu);
1212 if (ret)
1213 return ret;
1214 ret = smu_adjust_power_state_dynamic(smu, level, false);
1215 break;
1216 case AMD_PP_TASK_COMPLETE_INIT:
1217 case AMD_PP_TASK_READJUST_POWER_STATE:
1218 ret = smu_adjust_power_state_dynamic(smu, level, true);
1219 break;
1220 default:
1221 break;
1222 }
1223
1224 return ret;
1225}
1226
137d63ab
HR
1227const struct amd_ip_funcs smu_ip_funcs = {
1228 .name = "smu",
1229 .early_init = smu_early_init,
bee71d26 1230 .late_init = smu_late_init,
137d63ab
HR
1231 .sw_init = smu_sw_init,
1232 .sw_fini = smu_sw_fini,
1233 .hw_init = smu_hw_init,
1234 .hw_fini = smu_hw_fini,
1235 .suspend = smu_suspend,
1236 .resume = smu_resume,
1237 .is_idle = NULL,
1238 .check_soft_reset = NULL,
1239 .wait_for_idle = NULL,
1240 .soft_reset = NULL,
1241 .set_clockgating_state = smu_set_clockgating_state,
1242 .set_powergating_state = smu_set_powergating_state,
49d27e91 1243 .enable_umd_pstate = smu_enable_umd_pstate,
137d63ab 1244};
07845526
HR
1245
1246const struct amdgpu_ip_block_version smu_v11_0_ip_block =
1247{
1248 .type = AMD_IP_BLOCK_TYPE_SMC,
1249 .major = 11,
1250 .minor = 0,
1251 .rev = 0,
1252 .funcs = &smu_ip_funcs,
1253};