drm/amd/powerplay: implement dpm enable functions of uvd & vce for smu
[linux-2.6-block.git] / drivers / gpu / drm / amd / powerplay / amdgpu_smu.c
CommitLineData
137d63ab
HR
1/*
2 * Copyright 2019 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 */
22
23#include "pp_debug.h"
24#include <linux/firmware.h>
25#include <drm/drmP.h>
26#include "amdgpu.h"
27#include "amdgpu_smu.h"
28#include "soc15_common.h"
07845526 29#include "smu_v11_0.h"
e15da5a4 30#include "atom.h"
137d63ab 31
72e91f37
KW
32int smu_dpm_set_power_gate(struct smu_context *smu, uint32_t block_type,
33 bool gate)
34{
35 int ret = 0;
36
37 switch (block_type) {
38 case AMD_IP_BLOCK_TYPE_UVD:
39 ret = smu_dpm_set_uvd_enable(smu, gate);
40 break;
41 case AMD_IP_BLOCK_TYPE_VCE:
42 ret = smu_dpm_set_vce_enable(smu, gate);
43 break;
44 default:
45 break;
46 }
47
48 return ret;
49}
50
ea2d0bf8
KW
51enum amd_pm_state_type smu_get_current_power_state(struct smu_context *smu)
52{
53 /* not support power state */
54 return POWER_STATE_TYPE_DEFAULT;
55}
56
09895323
KW
57int smu_get_power_num_states(struct smu_context *smu,
58 struct pp_states_info *state_info)
59{
60 if (!state_info)
61 return -EINVAL;
62
63 /* not support power state */
64 memset(state_info, 0, sizeof(struct pp_states_info));
65 state_info->nums = 0;
66
67 return 0;
68}
69
143c75d6
KW
70int smu_common_read_sensor(struct smu_context *smu, enum amd_pp_sensors sensor,
71 void *data, uint32_t *size)
72{
73 int ret = 0;
74
75 switch (sensor) {
76 case AMDGPU_PP_SENSOR_ENABLED_SMC_FEATURES_MASK:
77 ret = smu_feature_get_enabled_mask(smu, (uint32_t *)data, 2);
78 *size = 8;
79 break;
80 default:
81 ret = -EINVAL;
82 break;
83 }
84
85 if (ret)
86 *size = 0;
87
88 return ret;
89}
90
dbe6a970
KW
91int smu_update_table(struct smu_context *smu, uint32_t table_id,
92 void *table_data, bool drv2smu)
93{
94 struct smu_table_context *smu_table = &smu->smu_table;
95 struct smu_table *table = NULL;
96 int ret = 0;
97
98 if (!table_data || table_id >= smu_table->table_count)
99 return -EINVAL;
100
101 table = &smu_table->tables[table_id];
102
103 if (drv2smu)
104 memcpy(table->cpu_addr, table_data, table->size);
105
106 ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetDriverDramAddrHigh,
107 upper_32_bits(table->mc_address));
108 if (ret)
109 return ret;
110 ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetDriverDramAddrLow,
111 lower_32_bits(table->mc_address));
112 if (ret)
113 return ret;
114 ret = smu_send_smc_msg_with_param(smu, drv2smu ?
115 SMU_MSG_TransferTableDram2Smu :
116 SMU_MSG_TransferTableSmu2Dram,
117 table_id);
118 if (ret)
119 return ret;
120
121 if (!drv2smu)
122 memcpy(table_data, table->cpu_addr, table->size);
123
124 return ret;
125}
126
dc8e3a0c
KW
127bool is_support_sw_smu(struct amdgpu_device *adev)
128{
129 if (amdgpu_dpm != 1)
130 return false;
131
132 if (adev->asic_type >= CHIP_VEGA20)
133 return true;
134
135 return false;
136}
137
289921b0
KW
138int smu_sys_get_pp_table(struct smu_context *smu, void **table)
139{
140 struct smu_table_context *smu_table = &smu->smu_table;
141
142 if (!smu_table->power_play_table && !smu_table->hardcode_pptable)
143 return -EINVAL;
144
145 if (smu_table->hardcode_pptable)
146 *table = smu_table->hardcode_pptable;
147 else
148 *table = smu_table->power_play_table;
149
150 return smu_table->power_play_table_size;
151}
152
153int smu_sys_set_pp_table(struct smu_context *smu, void *buf, size_t size)
154{
155 struct smu_table_context *smu_table = &smu->smu_table;
156 ATOM_COMMON_TABLE_HEADER *header = (ATOM_COMMON_TABLE_HEADER *)buf;
157 int ret = 0;
158
159 if (header->usStructureSize != size) {
160 pr_err("pp table size not matched !\n");
161 return -EIO;
162 }
163
164 mutex_lock(&smu->mutex);
165 if (!smu_table->hardcode_pptable)
166 smu_table->hardcode_pptable = kzalloc(size, GFP_KERNEL);
167 if (!smu_table->hardcode_pptable) {
168 ret = -ENOMEM;
169 goto failed;
170 }
171
172 memcpy(smu_table->hardcode_pptable, buf, size);
173 smu_table->power_play_table = smu_table->hardcode_pptable;
174 smu_table->power_play_table_size = size;
175 mutex_unlock(&smu->mutex);
176
177 ret = smu_reset(smu);
178 if (ret)
179 pr_info("smu reset failed, ret = %d\n", ret);
180
181failed:
182 mutex_unlock(&smu->mutex);
183 return ret;
184}
185
6b816d73
KW
186int smu_feature_init_dpm(struct smu_context *smu)
187{
188 struct smu_feature *feature = &smu->smu_feature;
189 int ret = 0;
190 uint32_t unallowed_feature_mask[SMU_FEATURE_MAX/32];
191
f14a323d 192 mutex_lock(&feature->mutex);
6b816d73 193 bitmap_fill(feature->allowed, SMU_FEATURE_MAX);
f14a323d 194 mutex_unlock(&feature->mutex);
6b816d73
KW
195
196 ret = smu_get_unallowed_feature_mask(smu, unallowed_feature_mask,
197 SMU_FEATURE_MAX/32);
198 if (ret)
199 return ret;
200
f14a323d 201 mutex_lock(&feature->mutex);
6b816d73
KW
202 bitmap_andnot(feature->allowed, feature->allowed,
203 (unsigned long *)unallowed_feature_mask,
204 feature->feature_num);
f14a323d 205 mutex_unlock(&feature->mutex);
6b816d73
KW
206
207 return ret;
208}
209
2f25158d
KW
210int smu_feature_is_enabled(struct smu_context *smu, int feature_id)
211{
212 struct smu_feature *feature = &smu->smu_feature;
f14a323d
KW
213 int ret = 0;
214
2f25158d 215 WARN_ON(feature_id > feature->feature_num);
f14a323d
KW
216
217 mutex_lock(&feature->mutex);
218 ret = test_bit(feature_id, feature->enabled);
219 mutex_unlock(&feature->mutex);
220
221 return ret;
2f25158d
KW
222}
223
224int smu_feature_set_enabled(struct smu_context *smu, int feature_id, bool enable)
225{
226 struct smu_feature *feature = &smu->smu_feature;
f14a323d
KW
227 int ret = 0;
228
2f25158d 229 WARN_ON(feature_id > feature->feature_num);
f14a323d
KW
230
231 mutex_lock(&feature->mutex);
232 ret = smu_feature_update_enable_state(smu, feature_id, enable);
233 if (ret)
234 goto failed;
235
2f25158d
KW
236 if (enable)
237 test_and_set_bit(feature_id, feature->enabled);
238 else
239 test_and_clear_bit(feature_id, feature->enabled);
f14a323d
KW
240
241failed:
242 mutex_unlock(&feature->mutex);
243
244 return ret;
2f25158d
KW
245}
246
247int smu_feature_is_supported(struct smu_context *smu, int feature_id)
248{
249 struct smu_feature *feature = &smu->smu_feature;
f14a323d
KW
250 int ret = 0;
251
2f25158d 252 WARN_ON(feature_id > feature->feature_num);
f14a323d
KW
253
254 mutex_lock(&feature->mutex);
255 ret = test_bit(feature_id, feature->supported);
256 mutex_unlock(&feature->mutex);
257
258 return ret;
2f25158d
KW
259}
260
261int smu_feature_set_supported(struct smu_context *smu, int feature_id,
262 bool enable)
263{
264 struct smu_feature *feature = &smu->smu_feature;
f14a323d
KW
265 int ret = 0;
266
2f25158d 267 WARN_ON(feature_id > feature->feature_num);
f14a323d
KW
268
269 mutex_unlock(&feature->mutex);
2f25158d
KW
270 if (enable)
271 test_and_set_bit(feature_id, feature->supported);
272 else
273 test_and_clear_bit(feature_id, feature->supported);
f14a323d
KW
274 mutex_unlock(&feature->mutex);
275
276 return ret;
2f25158d
KW
277}
278
137d63ab
HR
279static int smu_set_funcs(struct amdgpu_device *adev)
280{
07845526
HR
281 struct smu_context *smu = &adev->smu;
282
283 switch (adev->asic_type) {
284 case CHIP_VEGA20:
285 smu_v11_0_set_smu_funcs(smu);
286 break;
287 default:
288 return -EINVAL;
289 }
290
137d63ab
HR
291 return 0;
292}
293
294static int smu_early_init(void *handle)
295{
296 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
297 struct smu_context *smu = &adev->smu;
137d63ab
HR
298
299 smu->adev = adev;
300 mutex_init(&smu->mutex);
301
74e07f9d 302 return smu_set_funcs(adev);
137d63ab
HR
303}
304
e15da5a4
HR
305int smu_get_atom_data_table(struct smu_context *smu, uint32_t table,
306 uint16_t *size, uint8_t *frev, uint8_t *crev,
307 uint8_t **addr)
308{
309 struct amdgpu_device *adev = smu->adev;
310 uint16_t data_start;
311
312 if (!amdgpu_atom_parse_data_header(adev->mode_info.atom_context, table,
313 size, frev, crev, &data_start))
314 return -EINVAL;
315
316 *addr = (uint8_t *)adev->mode_info.atom_context->bios + data_start;
317
318 return 0;
319}
320
b5624000
HR
321static int smu_initialize_pptable(struct smu_context *smu)
322{
323 /* TODO */
324 return 0;
325}
326
327static int smu_smc_table_sw_init(struct smu_context *smu)
328{
329 int ret;
330
331 ret = smu_initialize_pptable(smu);
332 if (ret) {
333 pr_err("Failed to init smu_initialize_pptable!\n");
334 return ret;
335 }
336
cabd44c0
HR
337 /**
338 * Create smu_table structure, and init smc tables such as
339 * TABLE_PPTABLE, TABLE_WATERMARKS, TABLE_SMU_METRICS, and etc.
340 */
341 ret = smu_init_smc_tables(smu);
342 if (ret) {
343 pr_err("Failed to init smc tables!\n");
344 return ret;
345 }
346
17e6081b
HR
347 /**
348 * Create smu_power_context structure, and allocate smu_dpm_context and
349 * context size to fill the smu_power_context data.
350 */
351 ret = smu_init_power(smu);
352 if (ret) {
353 pr_err("Failed to init smu_init_power!\n");
354 return ret;
355 }
356
b5624000
HR
357 return 0;
358}
359
813ce279
KW
360static int smu_smc_table_sw_fini(struct smu_context *smu)
361{
362 int ret;
363
364 ret = smu_fini_smc_tables(smu);
365 if (ret) {
366 pr_err("Failed to smu_fini_smc_tables!\n");
367 return ret;
368 }
369
370 return 0;
371}
372
137d63ab
HR
373static int smu_sw_init(void *handle)
374{
375 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
376 struct smu_context *smu = &adev->smu;
377 int ret;
378
dc8e3a0c 379 if (!is_support_sw_smu(adev))
137d63ab
HR
380 return -EINVAL;
381
0b51d993 382 smu->pool_size = adev->pm.smu_prv_buffer_size;
6b816d73 383 smu->smu_feature.feature_num = SMU_FEATURE_MAX;
f14a323d 384 mutex_init(&smu->smu_feature.mutex);
6b816d73
KW
385 bitmap_zero(smu->smu_feature.supported, SMU_FEATURE_MAX);
386 bitmap_zero(smu->smu_feature.enabled, SMU_FEATURE_MAX);
387 bitmap_zero(smu->smu_feature.allowed, SMU_FEATURE_MAX);
2e069391 388 smu->watermarks_bitmap = 0;
16177fd0
CG
389 smu->power_profile_mode = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT;
390 smu->default_power_profile_mode = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT;
391
392 smu->workload_mask = 1 << smu->workload_prority[PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT];
393 smu->workload_prority[PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT] = 0;
394 smu->workload_prority[PP_SMC_POWER_PROFILE_FULLSCREEN3D] = 1;
395 smu->workload_prority[PP_SMC_POWER_PROFILE_POWERSAVING] = 2;
396 smu->workload_prority[PP_SMC_POWER_PROFILE_VIDEO] = 3;
397 smu->workload_prority[PP_SMC_POWER_PROFILE_VR] = 4;
398 smu->workload_prority[PP_SMC_POWER_PROFILE_COMPUTE] = 5;
399 smu->workload_prority[PP_SMC_POWER_PROFILE_CUSTOM] = 6;
400
401 smu->workload_setting[0] = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT;
402 smu->workload_setting[1] = PP_SMC_POWER_PROFILE_FULLSCREEN3D;
403 smu->workload_setting[2] = PP_SMC_POWER_PROFILE_POWERSAVING;
404 smu->workload_setting[3] = PP_SMC_POWER_PROFILE_VIDEO;
405 smu->workload_setting[4] = PP_SMC_POWER_PROFILE_VR;
406 smu->workload_setting[5] = PP_SMC_POWER_PROFILE_COMPUTE;
407 smu->workload_setting[6] = PP_SMC_POWER_PROFILE_CUSTOM;
379a4454 408 smu->display_config = &adev->pm.pm_display_cfg;
0b51d993 409
9a431038
CG
410 smu->smu_dpm.dpm_level = AMD_DPM_FORCED_LEVEL_AUTO;
411 smu->smu_dpm.requested_dpm_level = AMD_DPM_FORCED_LEVEL_AUTO;
137d63ab
HR
412 ret = smu_init_microcode(smu);
413 if (ret) {
414 pr_err("Failed to load smu firmware!\n");
415 return ret;
416 }
417
b5624000
HR
418 ret = smu_smc_table_sw_init(smu);
419 if (ret) {
420 pr_err("Failed to sw init smc table!\n");
421 return ret;
422 }
423
137d63ab
HR
424 return 0;
425}
426
427static int smu_sw_fini(void *handle)
428{
429 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
813ce279
KW
430 struct smu_context *smu = &adev->smu;
431 int ret;
137d63ab 432
dc8e3a0c 433 if (!is_support_sw_smu(adev))
137d63ab
HR
434 return -EINVAL;
435
813ce279
KW
436 ret = smu_smc_table_sw_fini(smu);
437 if (ret) {
438 pr_err("Failed to sw fini smc table!\n");
439 return ret;
440 }
441
8bf16963
KW
442 ret = smu_fini_power(smu);
443 if (ret) {
444 pr_err("Failed to init smu_fini_power!\n");
445 return ret;
446 }
447
137d63ab
HR
448 return 0;
449}
450
9c9a1747
HR
451static int smu_init_fb_allocations(struct smu_context *smu)
452{
f96357a9
KW
453 struct amdgpu_device *adev = smu->adev;
454 struct smu_table_context *smu_table = &smu->smu_table;
455 struct smu_table *tables = smu_table->tables;
456 uint32_t table_count = smu_table->table_count;
457 uint32_t i = 0;
458 int32_t ret = 0;
459
460 if (table_count <= 0)
461 return -EINVAL;
462
463 for (i = 0 ; i < table_count; i++) {
464 if (tables[i].size == 0)
465 continue;
466 ret = amdgpu_bo_create_kernel(adev,
467 tables[i].size,
468 tables[i].align,
469 tables[i].domain,
470 &tables[i].bo,
471 &tables[i].mc_address,
472 &tables[i].cpu_addr);
473 if (ret)
474 goto failed;
475 }
476
9c9a1747 477 return 0;
f96357a9
KW
478failed:
479 for (; i > 0; i--) {
480 if (tables[i].size == 0)
481 continue;
482 amdgpu_bo_free_kernel(&tables[i].bo,
483 &tables[i].mc_address,
484 &tables[i].cpu_addr);
485
486 }
487 return ret;
9c9a1747
HR
488}
489
f96357a9
KW
490static int smu_fini_fb_allocations(struct smu_context *smu)
491{
492 struct smu_table_context *smu_table = &smu->smu_table;
493 struct smu_table *tables = smu_table->tables;
494 uint32_t table_count = smu_table->table_count;
495 uint32_t i = 0;
496
497 if (table_count == 0 || tables == NULL)
289921b0 498 return 0;
f96357a9
KW
499
500 for (i = 0 ; i < table_count; i++) {
501 if (tables[i].size == 0)
502 continue;
503 amdgpu_bo_free_kernel(&tables[i].bo,
504 &tables[i].mc_address,
505 &tables[i].cpu_addr);
506 }
507
508 return 0;
509}
f6a6b952 510
05cadcd3
HR
511static int smu_smc_table_hw_init(struct smu_context *smu)
512{
513 int ret;
514
56c53ad6
KW
515 ret = smu_init_display(smu);
516 if (ret)
517 return ret;
518
6b816d73
KW
519 ret = smu_feature_set_allowed_mask(smu);
520 if (ret)
521 return ret;
522
05cadcd3
HR
523 ret = smu_read_pptable_from_vbios(smu);
524 if (ret)
525 return ret;
526
a6b35900
HR
527 /* get boot_values from vbios to set revision, gfxclk, and etc. */
528 ret = smu_get_vbios_bootup_values(smu);
529 if (ret)
530 return ret;
531
08115f87
HR
532 ret = smu_get_clk_info_from_vbios(smu);
533 if (ret)
534 return ret;
535
46126e6d
HR
536 /*
537 * check if the format_revision in vbios is up to pptable header
538 * version, and the structure size is not 0.
539 */
08115f87
HR
540 ret = smu_get_clk_info_from_vbios(smu);
541 if (ret)
542 return ret;
543
46126e6d
HR
544 ret = smu_check_pptable(smu);
545 if (ret)
546 return ret;
547
9c9a1747
HR
548 /*
549 * allocate vram bos to store smc table contents.
550 */
551 ret = smu_init_fb_allocations(smu);
552 if (ret)
553 return ret;
554
9e4848a4
HR
555 /*
556 * Parse pptable format and fill PPTable_t smc_pptable to
557 * smu_table_context structure. And read the smc_dpm_table from vbios,
558 * then fill it into smc_pptable.
559 */
560 ret = smu_parse_pptable(smu);
561 if (ret)
562 return ret;
563
a751b095
HR
564 /*
565 * Send msg GetDriverIfVersion to check if the return value is equal
566 * with DRIVER_IF_VERSION of smc header.
567 */
568 ret = smu_check_fw_version(smu);
569 if (ret)
570 return ret;
571
31b5ae49
HR
572 /*
573 * Copy pptable bo in the vram to smc with SMU MSGs such as
574 * SetDriverDramAddr and TransferTableDram2Smu.
575 */
576 ret = smu_write_pptable(smu);
577 if (ret)
578 return ret;
579
f6a6b952
KW
580 /* issue RunAfllBtc msg */
581 ret = smu_run_afll_btc(smu);
582 if (ret)
583 return ret;
584
6b816d73
KW
585 ret = smu_feature_enable_all(smu);
586 if (ret)
587 return ret;
588
e1c6f86a
KW
589 ret = smu_notify_display_change(smu);
590 if (ret)
591 return ret;
592
a7ebb6d2
HR
593 /*
594 * Set min deep sleep dce fclk with bootup value from vbios via
595 * SetMinDeepSleepDcefclk MSG.
596 */
597 ret = smu_set_min_dcef_deep_sleep(smu);
598 if (ret)
599 return ret;
600
d6a4aa82
LG
601 /*
602 * Set initialized values (get from vbios) to dpm tables context such as
603 * gfxclk, memclk, dcefclk, and etc. And enable the DPM feature for each
604 * type of clks.
605 */
606 ret = smu_populate_smc_pptable(smu);
607 if (ret)
608 return ret;
609
7457cf02
HR
610 ret = smu_init_max_sustainable_clocks(smu);
611 if (ret)
612 return ret;
613
2c80abe3
LG
614 ret = smu_set_od8_default_settings(smu);
615 if (ret)
616 return ret;
617
133438fa
LG
618 ret = smu_populate_umd_state_clk(smu);
619 if (ret)
620 return ret;
621
e66adb1e
LG
622 ret = smu_get_power_limit(smu);
623 if (ret)
624 return ret;
625
206bc589
HR
626 /*
627 * Set PMSTATUSLOG table bo address with SetToolsDramAddr MSG for tools.
628 */
629 ret = smu_set_tool_table_location(smu);
630
631 return ret;
05cadcd3
HR
632}
633
e65d45f2
HR
634/**
635 * smu_alloc_memory_pool - allocate memory pool in the system memory
636 *
637 * @smu: amdgpu_device pointer
638 *
639 * This memory pool will be used for SMC use and msg SetSystemVirtualDramAddr
640 * and DramLogSetDramAddr can notify it changed.
641 *
642 * Returns 0 on success, error on failure.
643 */
644static int smu_alloc_memory_pool(struct smu_context *smu)
645{
0b51d993
KW
646 struct amdgpu_device *adev = smu->adev;
647 struct smu_table_context *smu_table = &smu->smu_table;
648 struct smu_table *memory_pool = &smu_table->memory_pool;
649 uint64_t pool_size = smu->pool_size;
650 int ret = 0;
651
652 if (pool_size == SMU_MEMORY_POOL_SIZE_ZERO)
653 return ret;
654
655 memory_pool->size = pool_size;
656 memory_pool->align = PAGE_SIZE;
657 memory_pool->domain = AMDGPU_GEM_DOMAIN_GTT;
658
659 switch (pool_size) {
660 case SMU_MEMORY_POOL_SIZE_256_MB:
661 case SMU_MEMORY_POOL_SIZE_512_MB:
662 case SMU_MEMORY_POOL_SIZE_1_GB:
663 case SMU_MEMORY_POOL_SIZE_2_GB:
664 ret = amdgpu_bo_create_kernel(adev,
665 memory_pool->size,
666 memory_pool->align,
667 memory_pool->domain,
668 &memory_pool->bo,
669 &memory_pool->mc_address,
670 &memory_pool->cpu_addr);
671 break;
672 default:
673 break;
674 }
675
676 return ret;
e65d45f2
HR
677}
678
0b51d993
KW
679static int smu_free_memory_pool(struct smu_context *smu)
680{
681 struct smu_table_context *smu_table = &smu->smu_table;
682 struct smu_table *memory_pool = &smu_table->memory_pool;
683 int ret = 0;
684
685 if (memory_pool->size == SMU_MEMORY_POOL_SIZE_ZERO)
686 return ret;
687
688 amdgpu_bo_free_kernel(&memory_pool->bo,
689 &memory_pool->mc_address,
690 &memory_pool->cpu_addr);
691
692 memset(memory_pool, 0, sizeof(struct smu_table));
693
694 return ret;
695}
137d63ab
HR
696static int smu_hw_init(void *handle)
697{
698 int ret;
699 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
700 struct smu_context *smu = &adev->smu;
701
dc8e3a0c 702 if (!is_support_sw_smu(adev))
137d63ab
HR
703 return -EINVAL;
704
3d2f5200
HR
705 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
706 ret = smu_load_microcode(smu);
707 if (ret)
708 return ret;
709 }
710
e11c4fd5
HR
711 ret = smu_check_fw_status(smu);
712 if (ret) {
713 pr_err("SMC firmware status is not correct\n");
714 return ret;
715 }
716
137d63ab
HR
717 mutex_lock(&smu->mutex);
718
6b816d73
KW
719 ret = smu_feature_init_dpm(smu);
720 if (ret)
721 goto failed;
722
05cadcd3
HR
723 ret = smu_smc_table_hw_init(smu);
724 if (ret)
725 goto failed;
137d63ab 726
e65d45f2
HR
727 ret = smu_alloc_memory_pool(smu);
728 if (ret)
729 goto failed;
730
c56de9e8
HR
731 /*
732 * Use msg SetSystemVirtualDramAddr and DramLogSetDramAddr can notify
733 * pool location.
734 */
735 ret = smu_notify_memory_pool_location(smu);
736 if (ret)
737 goto failed;
738
74ba3553
LG
739 ret = smu_start_thermal_control(smu);
740 if (ret)
741 goto failed;
742
137d63ab
HR
743 mutex_unlock(&smu->mutex);
744
745 pr_info("SMU is initialized successfully!\n");
746
747 return 0;
05cadcd3
HR
748
749failed:
750 mutex_unlock(&smu->mutex);
751 return ret;
137d63ab
HR
752}
753
754static int smu_hw_fini(void *handle)
755{
756 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
757 struct smu_context *smu = &adev->smu;
afba8282 758 struct smu_table_context *table_context = &smu->smu_table;
f96357a9 759 int ret = 0;
137d63ab 760
dc8e3a0c 761 if (!is_support_sw_smu(adev))
137d63ab
HR
762 return -EINVAL;
763
6316f51c
HR
764 kfree(table_context->driver_pptable);
765 table_context->driver_pptable = NULL;
afba8282 766
6316f51c
HR
767 kfree(table_context->max_sustainable_clocks);
768 table_context->max_sustainable_clocks = NULL;
7457cf02 769
6316f51c
HR
770 kfree(table_context->od_feature_capabilities);
771 table_context->od_feature_capabilities = NULL;
b55ca3bd 772
6316f51c
HR
773 kfree(table_context->od_settings_max);
774 table_context->od_settings_max = NULL;
b55ca3bd 775
6316f51c
HR
776 kfree(table_context->od_settings_min);
777 table_context->od_settings_min = NULL;
b55ca3bd 778
6316f51c
HR
779 kfree(table_context->overdrive_table);
780 table_context->overdrive_table = NULL;
2c80abe3 781
6316f51c
HR
782 kfree(table_context->od8_settings);
783 table_context->od8_settings = NULL;
2c80abe3 784
f96357a9
KW
785 ret = smu_fini_fb_allocations(smu);
786 if (ret)
787 return ret;
788
0b51d993
KW
789 ret = smu_free_memory_pool(smu);
790 if (ret)
791 return ret;
792
137d63ab
HR
793 return 0;
794}
795
289921b0
KW
796int smu_reset(struct smu_context *smu)
797{
798 struct amdgpu_device *adev = smu->adev;
799 int ret = 0;
800
801 ret = smu_hw_fini(adev);
802 if (ret)
803 return ret;
804
805 ret = smu_hw_init(adev);
806 if (ret)
807 return ret;
808
809 return ret;
810}
811
137d63ab
HR
812static int smu_suspend(void *handle)
813{
814 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
815
dc8e3a0c 816 if (!is_support_sw_smu(adev))
137d63ab
HR
817 return -EINVAL;
818
819 return 0;
820}
821
822static int smu_resume(void *handle)
823{
824 int ret;
825 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
826 struct smu_context *smu = &adev->smu;
827
dc8e3a0c 828 if (!is_support_sw_smu(adev))
137d63ab
HR
829 return -EINVAL;
830
fad3ecf2
HR
831 pr_info("SMU is resuming...\n");
832
833 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
834 ret = smu_load_microcode(smu);
835 if (ret)
836 return ret;
837 }
838
839 ret = smu_check_fw_status(smu);
840 if (ret) {
841 pr_err("SMC firmware status is not correct\n");
842 return ret;
843 }
844
137d63ab
HR
845 mutex_lock(&smu->mutex);
846
fad3ecf2
HR
847 ret = smu_set_tool_table_location(smu);
848 if (ret)
849 goto failed;
850
851 ret = smu_write_pptable(smu);
852 if (ret)
853 goto failed;
854
855 ret = smu_write_watermarks_table(smu);
856 if (ret)
857 goto failed;
858
859 ret = smu_set_last_dcef_min_deep_sleep_clk(smu);
860 if (ret)
861 goto failed;
862
863 ret = smu_system_features_control(smu, true);
864 if (ret)
865 goto failed;
137d63ab
HR
866
867 mutex_unlock(&smu->mutex);
868
fad3ecf2
HR
869 pr_info("SMU is resumed successfully!\n");
870
137d63ab 871 return 0;
fad3ecf2
HR
872failed:
873 mutex_unlock(&smu->mutex);
874 return ret;
137d63ab
HR
875}
876
94ed6d0c
HR
877int smu_display_configuration_change(struct smu_context *smu,
878 const struct amd_pp_display_configuration *display_config)
879{
880 int index = 0;
881 int num_of_active_display = 0;
882
883 if (!is_support_sw_smu(smu->adev))
884 return -EINVAL;
885
886 if (!display_config)
887 return -EINVAL;
888
889 mutex_lock(&smu->mutex);
890
891 smu_set_deep_sleep_dcefclk(smu,
892 display_config->min_dcef_deep_sleep_set_clk / 100);
893
894 for (index = 0; index < display_config->num_path_including_non_display; index++) {
895 if (display_config->displays[index].controller_id != 0)
896 num_of_active_display++;
897 }
898
899 smu_set_active_display_count(smu, num_of_active_display);
900
901 smu_store_cc6_data(smu, display_config->cpu_pstate_separation_time,
902 display_config->cpu_cc6_disable,
903 display_config->cpu_pstate_disable,
904 display_config->nb_pstate_switch_disable);
905
906 mutex_unlock(&smu->mutex);
907
908 return 0;
909}
910
5e2d3881
HR
911static int smu_get_clock_info(struct smu_context *smu,
912 struct smu_clock_info *clk_info,
913 enum smu_perf_level_designation designation)
914{
915 int ret;
916 struct smu_performance_level level = {0};
917
918 if (!clk_info)
919 return -EINVAL;
920
921 ret = smu_get_perf_level(smu, PERF_LEVEL_ACTIVITY, &level);
922 if (ret)
923 return -EINVAL;
924
925 clk_info->min_mem_clk = level.memory_clock;
926 clk_info->min_eng_clk = level.core_clock;
927 clk_info->min_bus_bandwidth = level.non_local_mem_freq * level.non_local_mem_width;
928
929 ret = smu_get_perf_level(smu, designation, &level);
930 if (ret)
931 return -EINVAL;
932
933 clk_info->min_mem_clk = level.memory_clock;
934 clk_info->min_eng_clk = level.core_clock;
935 clk_info->min_bus_bandwidth = level.non_local_mem_freq * level.non_local_mem_width;
936
937 return 0;
938}
939
940int smu_get_current_clocks(struct smu_context *smu,
941 struct amd_pp_clock_info *clocks)
942{
943 struct amd_pp_simple_clock_info simple_clocks = {0};
944 struct smu_clock_info hw_clocks;
945 int ret = 0;
946
947 if (!is_support_sw_smu(smu->adev))
948 return -EINVAL;
949
950 mutex_lock(&smu->mutex);
951
952 smu_get_dal_power_level(smu, &simple_clocks);
953
954 if (smu->support_power_containment)
955 ret = smu_get_clock_info(smu, &hw_clocks,
956 PERF_LEVEL_POWER_CONTAINMENT);
957 else
958 ret = smu_get_clock_info(smu, &hw_clocks, PERF_LEVEL_ACTIVITY);
959
960 if (ret) {
961 pr_err("Error in smu_get_clock_info\n");
962 goto failed;
963 }
964
965 clocks->min_engine_clock = hw_clocks.min_eng_clk;
966 clocks->max_engine_clock = hw_clocks.max_eng_clk;
967 clocks->min_memory_clock = hw_clocks.min_mem_clk;
968 clocks->max_memory_clock = hw_clocks.max_mem_clk;
969 clocks->min_bus_bandwidth = hw_clocks.min_bus_bandwidth;
970 clocks->max_bus_bandwidth = hw_clocks.max_bus_bandwidth;
971 clocks->max_engine_clock_in_sr = hw_clocks.max_eng_clk;
972 clocks->min_engine_clock_in_sr = hw_clocks.min_eng_clk;
973
974 if (simple_clocks.level == 0)
975 clocks->max_clocks_state = PP_DAL_POWERLEVEL_7;
976 else
977 clocks->max_clocks_state = simple_clocks.level;
978
979 if (!smu_get_current_shallow_sleep_clocks(smu, &hw_clocks)) {
980 clocks->max_engine_clock_in_sr = hw_clocks.max_eng_clk;
981 clocks->min_engine_clock_in_sr = hw_clocks.min_eng_clk;
982 }
983
984failed:
985 mutex_unlock(&smu->mutex);
986 return ret;
987}
988
137d63ab
HR
989static int smu_set_clockgating_state(void *handle,
990 enum amd_clockgating_state state)
991{
992 return 0;
993}
994
995static int smu_set_powergating_state(void *handle,
996 enum amd_powergating_state state)
997{
998 return 0;
999}
1000
49d27e91
CG
1001static int smu_enable_umd_pstate(void *handle,
1002 enum amd_dpm_forced_level *level)
1003{
1004 uint32_t profile_mode_mask = AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD |
1005 AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK |
1006 AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK |
1007 AMD_DPM_FORCED_LEVEL_PROFILE_PEAK;
1008
1009 struct smu_context *smu = (struct smu_context*)(handle);
1010 struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
1011 if (!smu_dpm_ctx->dpm_context)
1012 return -EINVAL;
1013
1014 if (!(smu_dpm_ctx->dpm_level & profile_mode_mask)) {
1015 /* enter umd pstate, save current level, disable gfx cg*/
1016 if (*level & profile_mode_mask) {
1017 smu_dpm_ctx->saved_dpm_level = smu_dpm_ctx->dpm_level;
1018 smu_dpm_ctx->enable_umd_pstate = true;
1019 amdgpu_device_ip_set_clockgating_state(smu->adev,
1020 AMD_IP_BLOCK_TYPE_GFX,
1021 AMD_CG_STATE_UNGATE);
1022 amdgpu_device_ip_set_powergating_state(smu->adev,
1023 AMD_IP_BLOCK_TYPE_GFX,
1024 AMD_PG_STATE_UNGATE);
1025 }
1026 } else {
1027 /* exit umd pstate, restore level, enable gfx cg*/
1028 if (!(*level & profile_mode_mask)) {
1029 if (*level == AMD_DPM_FORCED_LEVEL_PROFILE_EXIT)
1030 *level = smu_dpm_ctx->saved_dpm_level;
1031 smu_dpm_ctx->enable_umd_pstate = false;
1032 amdgpu_device_ip_set_clockgating_state(smu->adev,
1033 AMD_IP_BLOCK_TYPE_GFX,
1034 AMD_CG_STATE_GATE);
1035 amdgpu_device_ip_set_powergating_state(smu->adev,
1036 AMD_IP_BLOCK_TYPE_GFX,
1037 AMD_PG_STATE_GATE);
1038 }
1039 }
1040
1041 return 0;
1042}
1043
137d63ab
HR
1044const struct amd_ip_funcs smu_ip_funcs = {
1045 .name = "smu",
1046 .early_init = smu_early_init,
1047 .late_init = NULL,
1048 .sw_init = smu_sw_init,
1049 .sw_fini = smu_sw_fini,
1050 .hw_init = smu_hw_init,
1051 .hw_fini = smu_hw_fini,
1052 .suspend = smu_suspend,
1053 .resume = smu_resume,
1054 .is_idle = NULL,
1055 .check_soft_reset = NULL,
1056 .wait_for_idle = NULL,
1057 .soft_reset = NULL,
1058 .set_clockgating_state = smu_set_clockgating_state,
1059 .set_powergating_state = smu_set_powergating_state,
49d27e91 1060 .enable_umd_pstate = smu_enable_umd_pstate,
137d63ab 1061};
07845526
HR
1062
1063const struct amdgpu_ip_block_version smu_v11_0_ip_block =
1064{
1065 .type = AMD_IP_BLOCK_TYPE_SMC,
1066 .major = 11,
1067 .minor = 0,
1068 .rev = 0,
1069 .funcs = &smu_ip_funcs,
1070};