Commit | Line | Data |
---|---|---|
137d63ab HR |
1 | /* |
2 | * Copyright 2019 Advanced Micro Devices, Inc. | |
3 | * | |
4 | * Permission is hereby granted, free of charge, to any person obtaining a | |
5 | * copy of this software and associated documentation files (the "Software"), | |
6 | * to deal in the Software without restriction, including without limitation | |
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | |
8 | * and/or sell copies of the Software, and to permit persons to whom the | |
9 | * Software is furnished to do so, subject to the following conditions: | |
10 | * | |
11 | * The above copyright notice and this permission notice shall be included in | |
12 | * all copies or substantial portions of the Software. | |
13 | * | |
14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |
15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |
16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | |
17 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR | |
18 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | |
19 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | |
20 | * OTHER DEALINGS IN THE SOFTWARE. | |
21 | */ | |
22 | ||
23 | #include "pp_debug.h" | |
24 | #include <linux/firmware.h> | |
25 | #include <drm/drmP.h> | |
26 | #include "amdgpu.h" | |
27 | #include "amdgpu_smu.h" | |
28 | #include "soc15_common.h" | |
07845526 | 29 | #include "smu_v11_0.h" |
e15da5a4 | 30 | #include "atom.h" |
137d63ab | 31 | |
6b816d73 KW |
32 | int smu_feature_init_dpm(struct smu_context *smu) |
33 | { | |
34 | struct smu_feature *feature = &smu->smu_feature; | |
35 | int ret = 0; | |
36 | uint32_t unallowed_feature_mask[SMU_FEATURE_MAX/32]; | |
37 | ||
38 | bitmap_fill(feature->allowed, SMU_FEATURE_MAX); | |
39 | ||
40 | ret = smu_get_unallowed_feature_mask(smu, unallowed_feature_mask, | |
41 | SMU_FEATURE_MAX/32); | |
42 | if (ret) | |
43 | return ret; | |
44 | ||
45 | bitmap_andnot(feature->allowed, feature->allowed, | |
46 | (unsigned long *)unallowed_feature_mask, | |
47 | feature->feature_num); | |
48 | ||
49 | return ret; | |
50 | } | |
51 | ||
2f25158d KW |
52 | int smu_feature_is_enabled(struct smu_context *smu, int feature_id) |
53 | { | |
54 | struct smu_feature *feature = &smu->smu_feature; | |
55 | WARN_ON(feature_id > feature->feature_num); | |
56 | return test_bit(feature_id, feature->enabled); | |
57 | } | |
58 | ||
59 | int smu_feature_set_enabled(struct smu_context *smu, int feature_id, bool enable) | |
60 | { | |
61 | struct smu_feature *feature = &smu->smu_feature; | |
62 | WARN_ON(feature_id > feature->feature_num); | |
63 | if (enable) | |
64 | test_and_set_bit(feature_id, feature->enabled); | |
65 | else | |
66 | test_and_clear_bit(feature_id, feature->enabled); | |
67 | return 0; | |
68 | } | |
69 | ||
70 | int smu_feature_is_supported(struct smu_context *smu, int feature_id) | |
71 | { | |
72 | struct smu_feature *feature = &smu->smu_feature; | |
73 | WARN_ON(feature_id > feature->feature_num); | |
74 | return test_bit(feature_id, feature->supported); | |
75 | } | |
76 | ||
77 | int smu_feature_set_supported(struct smu_context *smu, int feature_id, | |
78 | bool enable) | |
79 | { | |
80 | struct smu_feature *feature = &smu->smu_feature; | |
81 | WARN_ON(feature_id > feature->feature_num); | |
82 | if (enable) | |
83 | test_and_set_bit(feature_id, feature->supported); | |
84 | else | |
85 | test_and_clear_bit(feature_id, feature->supported); | |
86 | return 0; | |
87 | } | |
88 | ||
137d63ab HR |
89 | static int smu_set_funcs(struct amdgpu_device *adev) |
90 | { | |
07845526 HR |
91 | struct smu_context *smu = &adev->smu; |
92 | ||
93 | switch (adev->asic_type) { | |
94 | case CHIP_VEGA20: | |
95 | smu_v11_0_set_smu_funcs(smu); | |
96 | break; | |
97 | default: | |
98 | return -EINVAL; | |
99 | } | |
100 | ||
137d63ab HR |
101 | return 0; |
102 | } | |
103 | ||
104 | static int smu_early_init(void *handle) | |
105 | { | |
106 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; | |
107 | struct smu_context *smu = &adev->smu; | |
137d63ab HR |
108 | |
109 | smu->adev = adev; | |
110 | mutex_init(&smu->mutex); | |
111 | ||
74e07f9d | 112 | return smu_set_funcs(adev); |
137d63ab HR |
113 | } |
114 | ||
e15da5a4 HR |
115 | int smu_get_atom_data_table(struct smu_context *smu, uint32_t table, |
116 | uint16_t *size, uint8_t *frev, uint8_t *crev, | |
117 | uint8_t **addr) | |
118 | { | |
119 | struct amdgpu_device *adev = smu->adev; | |
120 | uint16_t data_start; | |
121 | ||
122 | if (!amdgpu_atom_parse_data_header(adev->mode_info.atom_context, table, | |
123 | size, frev, crev, &data_start)) | |
124 | return -EINVAL; | |
125 | ||
126 | *addr = (uint8_t *)adev->mode_info.atom_context->bios + data_start; | |
127 | ||
128 | return 0; | |
129 | } | |
130 | ||
b5624000 HR |
131 | static int smu_initialize_pptable(struct smu_context *smu) |
132 | { | |
133 | /* TODO */ | |
134 | return 0; | |
135 | } | |
136 | ||
137 | static int smu_smc_table_sw_init(struct smu_context *smu) | |
138 | { | |
139 | int ret; | |
140 | ||
141 | ret = smu_initialize_pptable(smu); | |
142 | if (ret) { | |
143 | pr_err("Failed to init smu_initialize_pptable!\n"); | |
144 | return ret; | |
145 | } | |
146 | ||
cabd44c0 HR |
147 | /** |
148 | * Create smu_table structure, and init smc tables such as | |
149 | * TABLE_PPTABLE, TABLE_WATERMARKS, TABLE_SMU_METRICS, and etc. | |
150 | */ | |
151 | ret = smu_init_smc_tables(smu); | |
152 | if (ret) { | |
153 | pr_err("Failed to init smc tables!\n"); | |
154 | return ret; | |
155 | } | |
156 | ||
17e6081b HR |
157 | /** |
158 | * Create smu_power_context structure, and allocate smu_dpm_context and | |
159 | * context size to fill the smu_power_context data. | |
160 | */ | |
161 | ret = smu_init_power(smu); | |
162 | if (ret) { | |
163 | pr_err("Failed to init smu_init_power!\n"); | |
164 | return ret; | |
165 | } | |
166 | ||
b5624000 HR |
167 | return 0; |
168 | } | |
169 | ||
813ce279 KW |
170 | static int smu_smc_table_sw_fini(struct smu_context *smu) |
171 | { | |
172 | int ret; | |
173 | ||
174 | ret = smu_fini_smc_tables(smu); | |
175 | if (ret) { | |
176 | pr_err("Failed to smu_fini_smc_tables!\n"); | |
177 | return ret; | |
178 | } | |
179 | ||
180 | return 0; | |
181 | } | |
182 | ||
137d63ab HR |
183 | static int smu_sw_init(void *handle) |
184 | { | |
185 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; | |
186 | struct smu_context *smu = &adev->smu; | |
187 | int ret; | |
188 | ||
189 | if (adev->asic_type < CHIP_VEGA20) | |
190 | return -EINVAL; | |
191 | ||
0b51d993 | 192 | smu->pool_size = adev->pm.smu_prv_buffer_size; |
6b816d73 KW |
193 | smu->smu_feature.feature_num = SMU_FEATURE_MAX; |
194 | bitmap_zero(smu->smu_feature.supported, SMU_FEATURE_MAX); | |
195 | bitmap_zero(smu->smu_feature.enabled, SMU_FEATURE_MAX); | |
196 | bitmap_zero(smu->smu_feature.allowed, SMU_FEATURE_MAX); | |
0b51d993 | 197 | |
137d63ab HR |
198 | ret = smu_init_microcode(smu); |
199 | if (ret) { | |
200 | pr_err("Failed to load smu firmware!\n"); | |
201 | return ret; | |
202 | } | |
203 | ||
b5624000 HR |
204 | ret = smu_smc_table_sw_init(smu); |
205 | if (ret) { | |
206 | pr_err("Failed to sw init smc table!\n"); | |
207 | return ret; | |
208 | } | |
209 | ||
137d63ab HR |
210 | return 0; |
211 | } | |
212 | ||
213 | static int smu_sw_fini(void *handle) | |
214 | { | |
215 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; | |
813ce279 KW |
216 | struct smu_context *smu = &adev->smu; |
217 | int ret; | |
137d63ab HR |
218 | |
219 | if (adev->asic_type < CHIP_VEGA20) | |
220 | return -EINVAL; | |
221 | ||
813ce279 KW |
222 | ret = smu_smc_table_sw_fini(smu); |
223 | if (ret) { | |
224 | pr_err("Failed to sw fini smc table!\n"); | |
225 | return ret; | |
226 | } | |
227 | ||
8bf16963 KW |
228 | ret = smu_fini_power(smu); |
229 | if (ret) { | |
230 | pr_err("Failed to init smu_fini_power!\n"); | |
231 | return ret; | |
232 | } | |
233 | ||
137d63ab HR |
234 | return 0; |
235 | } | |
236 | ||
9c9a1747 HR |
237 | static int smu_init_fb_allocations(struct smu_context *smu) |
238 | { | |
f96357a9 KW |
239 | struct amdgpu_device *adev = smu->adev; |
240 | struct smu_table_context *smu_table = &smu->smu_table; | |
241 | struct smu_table *tables = smu_table->tables; | |
242 | uint32_t table_count = smu_table->table_count; | |
243 | uint32_t i = 0; | |
244 | int32_t ret = 0; | |
245 | ||
246 | if (table_count <= 0) | |
247 | return -EINVAL; | |
248 | ||
249 | for (i = 0 ; i < table_count; i++) { | |
250 | if (tables[i].size == 0) | |
251 | continue; | |
252 | ret = amdgpu_bo_create_kernel(adev, | |
253 | tables[i].size, | |
254 | tables[i].align, | |
255 | tables[i].domain, | |
256 | &tables[i].bo, | |
257 | &tables[i].mc_address, | |
258 | &tables[i].cpu_addr); | |
259 | if (ret) | |
260 | goto failed; | |
261 | } | |
262 | ||
9c9a1747 | 263 | return 0; |
f96357a9 KW |
264 | failed: |
265 | for (; i > 0; i--) { | |
266 | if (tables[i].size == 0) | |
267 | continue; | |
268 | amdgpu_bo_free_kernel(&tables[i].bo, | |
269 | &tables[i].mc_address, | |
270 | &tables[i].cpu_addr); | |
271 | ||
272 | } | |
273 | return ret; | |
9c9a1747 HR |
274 | } |
275 | ||
f96357a9 KW |
276 | static int smu_fini_fb_allocations(struct smu_context *smu) |
277 | { | |
278 | struct smu_table_context *smu_table = &smu->smu_table; | |
279 | struct smu_table *tables = smu_table->tables; | |
280 | uint32_t table_count = smu_table->table_count; | |
281 | uint32_t i = 0; | |
282 | ||
283 | if (table_count == 0 || tables == NULL) | |
284 | return -EINVAL; | |
285 | ||
286 | for (i = 0 ; i < table_count; i++) { | |
287 | if (tables[i].size == 0) | |
288 | continue; | |
289 | amdgpu_bo_free_kernel(&tables[i].bo, | |
290 | &tables[i].mc_address, | |
291 | &tables[i].cpu_addr); | |
292 | } | |
293 | ||
294 | return 0; | |
295 | } | |
f6a6b952 | 296 | |
05cadcd3 HR |
297 | static int smu_smc_table_hw_init(struct smu_context *smu) |
298 | { | |
299 | int ret; | |
300 | ||
56c53ad6 KW |
301 | ret = smu_init_display(smu); |
302 | if (ret) | |
303 | return ret; | |
304 | ||
6b816d73 KW |
305 | ret = smu_feature_set_allowed_mask(smu); |
306 | if (ret) | |
307 | return ret; | |
308 | ||
05cadcd3 HR |
309 | ret = smu_read_pptable_from_vbios(smu); |
310 | if (ret) | |
311 | return ret; | |
312 | ||
a6b35900 HR |
313 | /* get boot_values from vbios to set revision, gfxclk, and etc. */ |
314 | ret = smu_get_vbios_bootup_values(smu); | |
315 | if (ret) | |
316 | return ret; | |
317 | ||
08115f87 HR |
318 | ret = smu_get_clk_info_from_vbios(smu); |
319 | if (ret) | |
320 | return ret; | |
321 | ||
46126e6d HR |
322 | /* |
323 | * check if the format_revision in vbios is up to pptable header | |
324 | * version, and the structure size is not 0. | |
325 | */ | |
08115f87 HR |
326 | ret = smu_get_clk_info_from_vbios(smu); |
327 | if (ret) | |
328 | return ret; | |
329 | ||
46126e6d HR |
330 | ret = smu_check_pptable(smu); |
331 | if (ret) | |
332 | return ret; | |
333 | ||
9c9a1747 HR |
334 | /* |
335 | * allocate vram bos to store smc table contents. | |
336 | */ | |
337 | ret = smu_init_fb_allocations(smu); | |
338 | if (ret) | |
339 | return ret; | |
340 | ||
9e4848a4 HR |
341 | /* |
342 | * Parse pptable format and fill PPTable_t smc_pptable to | |
343 | * smu_table_context structure. And read the smc_dpm_table from vbios, | |
344 | * then fill it into smc_pptable. | |
345 | */ | |
346 | ret = smu_parse_pptable(smu); | |
347 | if (ret) | |
348 | return ret; | |
349 | ||
a751b095 HR |
350 | /* |
351 | * Send msg GetDriverIfVersion to check if the return value is equal | |
352 | * with DRIVER_IF_VERSION of smc header. | |
353 | */ | |
354 | ret = smu_check_fw_version(smu); | |
355 | if (ret) | |
356 | return ret; | |
357 | ||
31b5ae49 HR |
358 | /* |
359 | * Copy pptable bo in the vram to smc with SMU MSGs such as | |
360 | * SetDriverDramAddr and TransferTableDram2Smu. | |
361 | */ | |
362 | ret = smu_write_pptable(smu); | |
363 | if (ret) | |
364 | return ret; | |
365 | ||
f6a6b952 KW |
366 | /* issue RunAfllBtc msg */ |
367 | ret = smu_run_afll_btc(smu); | |
368 | if (ret) | |
369 | return ret; | |
370 | ||
6b816d73 KW |
371 | ret = smu_feature_enable_all(smu); |
372 | if (ret) | |
373 | return ret; | |
374 | ||
e1c6f86a KW |
375 | ret = smu_notify_display_change(smu); |
376 | if (ret) | |
377 | return ret; | |
378 | ||
a7ebb6d2 HR |
379 | /* |
380 | * Set min deep sleep dce fclk with bootup value from vbios via | |
381 | * SetMinDeepSleepDcefclk MSG. | |
382 | */ | |
383 | ret = smu_set_min_dcef_deep_sleep(smu); | |
384 | if (ret) | |
385 | return ret; | |
386 | ||
d6a4aa82 LG |
387 | /* |
388 | * Set initialized values (get from vbios) to dpm tables context such as | |
389 | * gfxclk, memclk, dcefclk, and etc. And enable the DPM feature for each | |
390 | * type of clks. | |
391 | */ | |
392 | ret = smu_populate_smc_pptable(smu); | |
393 | if (ret) | |
394 | return ret; | |
395 | ||
7457cf02 HR |
396 | ret = smu_init_max_sustainable_clocks(smu); |
397 | if (ret) | |
398 | return ret; | |
399 | ||
133438fa LG |
400 | ret = smu_populate_umd_state_clk(smu); |
401 | if (ret) | |
402 | return ret; | |
403 | ||
e66adb1e LG |
404 | ret = smu_get_power_limit(smu); |
405 | if (ret) | |
406 | return ret; | |
407 | ||
206bc589 HR |
408 | /* |
409 | * Set PMSTATUSLOG table bo address with SetToolsDramAddr MSG for tools. | |
410 | */ | |
411 | ret = smu_set_tool_table_location(smu); | |
412 | ||
413 | return ret; | |
05cadcd3 HR |
414 | } |
415 | ||
e65d45f2 HR |
416 | /** |
417 | * smu_alloc_memory_pool - allocate memory pool in the system memory | |
418 | * | |
419 | * @smu: amdgpu_device pointer | |
420 | * | |
421 | * This memory pool will be used for SMC use and msg SetSystemVirtualDramAddr | |
422 | * and DramLogSetDramAddr can notify it changed. | |
423 | * | |
424 | * Returns 0 on success, error on failure. | |
425 | */ | |
426 | static int smu_alloc_memory_pool(struct smu_context *smu) | |
427 | { | |
0b51d993 KW |
428 | struct amdgpu_device *adev = smu->adev; |
429 | struct smu_table_context *smu_table = &smu->smu_table; | |
430 | struct smu_table *memory_pool = &smu_table->memory_pool; | |
431 | uint64_t pool_size = smu->pool_size; | |
432 | int ret = 0; | |
433 | ||
434 | if (pool_size == SMU_MEMORY_POOL_SIZE_ZERO) | |
435 | return ret; | |
436 | ||
437 | memory_pool->size = pool_size; | |
438 | memory_pool->align = PAGE_SIZE; | |
439 | memory_pool->domain = AMDGPU_GEM_DOMAIN_GTT; | |
440 | ||
441 | switch (pool_size) { | |
442 | case SMU_MEMORY_POOL_SIZE_256_MB: | |
443 | case SMU_MEMORY_POOL_SIZE_512_MB: | |
444 | case SMU_MEMORY_POOL_SIZE_1_GB: | |
445 | case SMU_MEMORY_POOL_SIZE_2_GB: | |
446 | ret = amdgpu_bo_create_kernel(adev, | |
447 | memory_pool->size, | |
448 | memory_pool->align, | |
449 | memory_pool->domain, | |
450 | &memory_pool->bo, | |
451 | &memory_pool->mc_address, | |
452 | &memory_pool->cpu_addr); | |
453 | break; | |
454 | default: | |
455 | break; | |
456 | } | |
457 | ||
458 | return ret; | |
e65d45f2 HR |
459 | } |
460 | ||
0b51d993 KW |
461 | static int smu_free_memory_pool(struct smu_context *smu) |
462 | { | |
463 | struct smu_table_context *smu_table = &smu->smu_table; | |
464 | struct smu_table *memory_pool = &smu_table->memory_pool; | |
465 | int ret = 0; | |
466 | ||
467 | if (memory_pool->size == SMU_MEMORY_POOL_SIZE_ZERO) | |
468 | return ret; | |
469 | ||
470 | amdgpu_bo_free_kernel(&memory_pool->bo, | |
471 | &memory_pool->mc_address, | |
472 | &memory_pool->cpu_addr); | |
473 | ||
474 | memset(memory_pool, 0, sizeof(struct smu_table)); | |
475 | ||
476 | return ret; | |
477 | } | |
137d63ab HR |
478 | static int smu_hw_init(void *handle) |
479 | { | |
480 | int ret; | |
481 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; | |
482 | struct smu_context *smu = &adev->smu; | |
483 | ||
484 | if (adev->asic_type < CHIP_VEGA20) | |
485 | return -EINVAL; | |
486 | ||
3d2f5200 HR |
487 | if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) { |
488 | ret = smu_load_microcode(smu); | |
489 | if (ret) | |
490 | return ret; | |
491 | } | |
492 | ||
e11c4fd5 HR |
493 | ret = smu_check_fw_status(smu); |
494 | if (ret) { | |
495 | pr_err("SMC firmware status is not correct\n"); | |
496 | return ret; | |
497 | } | |
498 | ||
137d63ab HR |
499 | mutex_lock(&smu->mutex); |
500 | ||
6b816d73 KW |
501 | ret = smu_feature_init_dpm(smu); |
502 | if (ret) | |
503 | goto failed; | |
504 | ||
05cadcd3 HR |
505 | ret = smu_smc_table_hw_init(smu); |
506 | if (ret) | |
507 | goto failed; | |
137d63ab | 508 | |
e65d45f2 HR |
509 | ret = smu_alloc_memory_pool(smu); |
510 | if (ret) | |
511 | goto failed; | |
512 | ||
c56de9e8 HR |
513 | /* |
514 | * Use msg SetSystemVirtualDramAddr and DramLogSetDramAddr can notify | |
515 | * pool location. | |
516 | */ | |
517 | ret = smu_notify_memory_pool_location(smu); | |
518 | if (ret) | |
519 | goto failed; | |
520 | ||
137d63ab HR |
521 | mutex_unlock(&smu->mutex); |
522 | ||
523 | pr_info("SMU is initialized successfully!\n"); | |
524 | ||
525 | return 0; | |
05cadcd3 HR |
526 | |
527 | failed: | |
528 | mutex_unlock(&smu->mutex); | |
529 | return ret; | |
137d63ab HR |
530 | } |
531 | ||
532 | static int smu_hw_fini(void *handle) | |
533 | { | |
534 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; | |
535 | struct smu_context *smu = &adev->smu; | |
afba8282 | 536 | struct smu_table_context *table_context = &smu->smu_table; |
f96357a9 | 537 | int ret = 0; |
137d63ab HR |
538 | |
539 | if (adev->asic_type < CHIP_VEGA20) | |
540 | return -EINVAL; | |
541 | ||
afba8282 LG |
542 | if (!table_context->driver_pptable) |
543 | return -EINVAL; | |
544 | kfree(table_context->driver_pptable); | |
545 | ||
7457cf02 HR |
546 | if (table_context->max_sustainable_clocks) { |
547 | kfree(table_context->max_sustainable_clocks); | |
548 | table_context->max_sustainable_clocks = NULL; | |
549 | } | |
550 | ||
f96357a9 KW |
551 | ret = smu_fini_fb_allocations(smu); |
552 | if (ret) | |
553 | return ret; | |
554 | ||
0b51d993 KW |
555 | ret = smu_free_memory_pool(smu); |
556 | if (ret) | |
557 | return ret; | |
558 | ||
137d63ab HR |
559 | return 0; |
560 | } | |
561 | ||
562 | static int smu_suspend(void *handle) | |
563 | { | |
564 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; | |
565 | ||
566 | if (adev->asic_type < CHIP_VEGA20) | |
567 | return -EINVAL; | |
568 | ||
569 | return 0; | |
570 | } | |
571 | ||
572 | static int smu_resume(void *handle) | |
573 | { | |
574 | int ret; | |
575 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; | |
576 | struct smu_context *smu = &adev->smu; | |
577 | ||
578 | if (adev->asic_type < CHIP_VEGA20) | |
579 | return -EINVAL; | |
580 | ||
fad3ecf2 HR |
581 | pr_info("SMU is resuming...\n"); |
582 | ||
583 | if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) { | |
584 | ret = smu_load_microcode(smu); | |
585 | if (ret) | |
586 | return ret; | |
587 | } | |
588 | ||
589 | ret = smu_check_fw_status(smu); | |
590 | if (ret) { | |
591 | pr_err("SMC firmware status is not correct\n"); | |
592 | return ret; | |
593 | } | |
594 | ||
137d63ab HR |
595 | mutex_lock(&smu->mutex); |
596 | ||
fad3ecf2 HR |
597 | ret = smu_set_tool_table_location(smu); |
598 | if (ret) | |
599 | goto failed; | |
600 | ||
601 | ret = smu_write_pptable(smu); | |
602 | if (ret) | |
603 | goto failed; | |
604 | ||
605 | ret = smu_write_watermarks_table(smu); | |
606 | if (ret) | |
607 | goto failed; | |
608 | ||
609 | ret = smu_set_last_dcef_min_deep_sleep_clk(smu); | |
610 | if (ret) | |
611 | goto failed; | |
612 | ||
613 | ret = smu_system_features_control(smu, true); | |
614 | if (ret) | |
615 | goto failed; | |
137d63ab HR |
616 | |
617 | mutex_unlock(&smu->mutex); | |
618 | ||
fad3ecf2 HR |
619 | pr_info("SMU is resumed successfully!\n"); |
620 | ||
137d63ab | 621 | return 0; |
fad3ecf2 HR |
622 | failed: |
623 | mutex_unlock(&smu->mutex); | |
624 | return ret; | |
137d63ab HR |
625 | } |
626 | ||
627 | static int smu_set_clockgating_state(void *handle, | |
628 | enum amd_clockgating_state state) | |
629 | { | |
630 | return 0; | |
631 | } | |
632 | ||
633 | static int smu_set_powergating_state(void *handle, | |
634 | enum amd_powergating_state state) | |
635 | { | |
636 | return 0; | |
637 | } | |
638 | ||
639 | const struct amd_ip_funcs smu_ip_funcs = { | |
640 | .name = "smu", | |
641 | .early_init = smu_early_init, | |
642 | .late_init = NULL, | |
643 | .sw_init = smu_sw_init, | |
644 | .sw_fini = smu_sw_fini, | |
645 | .hw_init = smu_hw_init, | |
646 | .hw_fini = smu_hw_fini, | |
647 | .suspend = smu_suspend, | |
648 | .resume = smu_resume, | |
649 | .is_idle = NULL, | |
650 | .check_soft_reset = NULL, | |
651 | .wait_for_idle = NULL, | |
652 | .soft_reset = NULL, | |
653 | .set_clockgating_state = smu_set_clockgating_state, | |
654 | .set_powergating_state = smu_set_powergating_state, | |
655 | }; | |
07845526 HR |
656 | |
657 | const struct amdgpu_ip_block_version smu_v11_0_ip_block = | |
658 | { | |
659 | .type = AMD_IP_BLOCK_TYPE_SMC, | |
660 | .major = 11, | |
661 | .minor = 0, | |
662 | .rev = 0, | |
663 | .funcs = &smu_ip_funcs, | |
664 | }; |