drm/amd/pm: add unique serial number support for smu_v13_0_6
[linux-block.git] / drivers / gpu / drm / amd / pm / swsmu / smu13 / smu_v13_0.c
CommitLineData
c05d1c40
KW
1/*
2 * Copyright 2020 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 */
22
23#include <linux/firmware.h>
24#include <linux/module.h>
25#include <linux/pci.h>
26#include <linux/reboot.h>
27
28#define SMU_13_0_PARTIAL_PPTABLE
29#define SWSMU_CODE_LAYER_L3
30
31#include "amdgpu.h"
32#include "amdgpu_smu.h"
33#include "atomfirmware.h"
34#include "amdgpu_atomfirmware.h"
35#include "amdgpu_atombios.h"
36#include "smu_v13_0.h"
37#include "soc15_common.h"
38#include "atom.h"
39#include "amdgpu_ras.h"
40#include "smu_cmn.h"
41
42#include "asic_reg/thm/thm_13_0_2_offset.h"
43#include "asic_reg/thm/thm_13_0_2_sh_mask.h"
44#include "asic_reg/mp/mp_13_0_2_offset.h"
45#include "asic_reg/mp/mp_13_0_2_sh_mask.h"
46#include "asic_reg/smuio/smuio_13_0_2_offset.h"
47#include "asic_reg/smuio/smuio_13_0_2_sh_mask.h"
48
49/*
50 * DO NOT use these for err/warn/info/debug messages.
51 * Use dev_err, dev_warn, dev_info and dev_dbg instead.
52 * They are more MGPU friendly.
53 */
54#undef pr_err
55#undef pr_warn
56#undef pr_info
57#undef pr_debug
58
59MODULE_FIRMWARE("amdgpu/aldebaran_smc.bin");
276c03a0 60MODULE_FIRMWARE("amdgpu/smu_13_0_0.bin");
0c2a2d1c 61MODULE_FIRMWARE("amdgpu/smu_13_0_7.bin");
92f05043 62MODULE_FIRMWARE("amdgpu/smu_13_0_10.bin");
c05d1c40 63
da1db031
AD
64#define mmMP1_SMN_C2PMSG_66 0x0282
65#define mmMP1_SMN_C2PMSG_66_BASE_IDX 0
66
67#define mmMP1_SMN_C2PMSG_82 0x0292
68#define mmMP1_SMN_C2PMSG_82_BASE_IDX 0
69
70#define mmMP1_SMN_C2PMSG_90 0x029a
71#define mmMP1_SMN_C2PMSG_90_BASE_IDX 0
72
c05d1c40
KW
73#define SMU13_VOLTAGE_SCALE 4
74
c05d1c40
KW
75#define LINK_WIDTH_MAX 6
76#define LINK_SPEED_MAX 3
77
78#define smnPCIE_LC_LINK_WIDTH_CNTL 0x11140288
79#define PCIE_LC_LINK_WIDTH_CNTL__LC_LINK_WIDTH_RD_MASK 0x00000070L
80#define PCIE_LC_LINK_WIDTH_CNTL__LC_LINK_WIDTH_RD__SHIFT 0x4
81#define smnPCIE_LC_SPEED_CNTL 0x11140290
82#define PCIE_LC_SPEED_CNTL__LC_CURRENT_DATA_RATE_MASK 0xC000
83#define PCIE_LC_SPEED_CNTL__LC_CURRENT_DATA_RATE__SHIFT 0xE
84
dd67d7a6
AD
85static const int link_width[] = {0, 1, 2, 4, 8, 12, 16};
86static const int link_speed[] = {25, 50, 80, 160};
c05d1c40 87
8d9cdb46
TR
88const int pmfw_decoded_link_speed[5] = {1, 2, 3, 4, 5};
89const int pmfw_decoded_link_width[7] = {0, 1, 2, 4, 8, 12, 16};
90
c05d1c40
KW
91int smu_v13_0_init_microcode(struct smu_context *smu)
92{
93 struct amdgpu_device *adev = smu->adev;
c05d1c40 94 char fw_name[30];
276c03a0 95 char ucode_prefix[30];
c05d1c40
KW
96 int err = 0;
97 const struct smc_firmware_header_v1_0 *hdr;
98 const struct common_firmware_header *header;
99 struct amdgpu_firmware_info *ucode = NULL;
100
4a1cac25
KW
101 /* doesn't need to load smu firmware in IOV mode */
102 if (amdgpu_sriov_vf(adev))
103 return 0;
104
755f32a3 105 amdgpu_ucode_ip_version_decode(adev, MP1_HWIP, ucode_prefix, sizeof(ucode_prefix));
c05d1c40 106
755f32a3 107 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s.bin", ucode_prefix);
c05d1c40 108
315d1716 109 err = amdgpu_ucode_request(adev, &adev->pm.fw, fw_name);
c05d1c40
KW
110 if (err)
111 goto out;
112
113 hdr = (const struct smc_firmware_header_v1_0 *) adev->pm.fw->data;
114 amdgpu_ucode_print_smc_hdr(&hdr->header);
115 adev->pm.fw_version = le32_to_cpu(hdr->header.ucode_version);
116
117 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
118 ucode = &adev->firmware.ucode[AMDGPU_UCODE_ID_SMC];
119 ucode->ucode_id = AMDGPU_UCODE_ID_SMC;
120 ucode->fw = adev->pm.fw;
121 header = (const struct common_firmware_header *)ucode->fw->data;
122 adev->firmware.fw_size +=
123 ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
124 }
125
126out:
315d1716
ML
127 if (err)
128 amdgpu_ucode_release(&adev->pm.fw);
c05d1c40
KW
129 return err;
130}
131
132void smu_v13_0_fini_microcode(struct smu_context *smu)
133{
134 struct amdgpu_device *adev = smu->adev;
135
315d1716 136 amdgpu_ucode_release(&adev->pm.fw);
c05d1c40
KW
137 adev->pm.fw_version = 0;
138}
139
140int smu_v13_0_load_microcode(struct smu_context *smu)
141{
142#if 0
143 struct amdgpu_device *adev = smu->adev;
144 const uint32_t *src;
145 const struct smc_firmware_header_v1_0 *hdr;
146 uint32_t addr_start = MP1_SRAM;
147 uint32_t i;
148 uint32_t smc_fw_size;
149 uint32_t mp1_fw_flags;
150
151 hdr = (const struct smc_firmware_header_v1_0 *) adev->pm.fw->data;
152 src = (const uint32_t *)(adev->pm.fw->data +
153 le32_to_cpu(hdr->header.ucode_array_offset_bytes));
154 smc_fw_size = hdr->header.ucode_size_bytes;
155
156 for (i = 1; i < smc_fw_size/4 - 1; i++) {
157 WREG32_PCIE(addr_start, src[i]);
158 addr_start += 4;
159 }
160
161 WREG32_PCIE(MP1_Public | (smnMP1_PUB_CTRL & 0xffffffff),
162 1 & MP1_SMN_PUB_CTRL__RESET_MASK);
163 WREG32_PCIE(MP1_Public | (smnMP1_PUB_CTRL & 0xffffffff),
164 1 & ~MP1_SMN_PUB_CTRL__RESET_MASK);
165
166 for (i = 0; i < adev->usec_timeout; i++) {
167 mp1_fw_flags = RREG32_PCIE(MP1_Public |
168 (smnMP1_FIRMWARE_FLAGS & 0xffffffff));
169 if ((mp1_fw_flags & MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED_MASK) >>
170 MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED__SHIFT)
171 break;
172 udelay(1);
173 }
174
175 if (i == adev->usec_timeout)
176 return -ETIME;
177#endif
276c03a0
EQ
178
179 return 0;
180}
181
182int smu_v13_0_init_pptable_microcode(struct smu_context *smu)
183{
184 struct amdgpu_device *adev = smu->adev;
185 struct amdgpu_firmware_info *ucode = NULL;
186 uint32_t size = 0, pptable_id = 0;
187 int ret = 0;
188 void *table;
189
190 /* doesn't need to load smu firmware in IOV mode */
191 if (amdgpu_sriov_vf(adev))
192 return 0;
193
194 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
195 return 0;
196
197 if (!adev->scpm_enabled)
198 return 0;
199
0b844b6e 200 if ((adev->ip_versions[MP1_HWIP][0] == IP_VERSION(13, 0, 7)) ||
7faf684b
KF
201 (adev->ip_versions[MP1_HWIP][0] == IP_VERSION(13, 0, 0)) ||
202 (adev->ip_versions[MP1_HWIP][0] == IP_VERSION(13, 0, 10)))
4e64b529
KF
203 return 0;
204
276c03a0
EQ
205 /* override pptable_id from driver parameter */
206 if (amdgpu_smu_pptable_id >= 0) {
207 pptable_id = amdgpu_smu_pptable_id;
208 dev_info(adev->dev, "override pptable id %d\n", pptable_id);
209 } else {
210 pptable_id = smu->smu_table.boot_values.pp_table_id;
276c03a0
EQ
211 }
212
213 /* "pptable_id == 0" means vbios carries the pptable. */
214 if (!pptable_id)
215 return 0;
216
217 ret = smu_v13_0_get_pptable_from_firmware(smu, &table, &size, pptable_id);
218 if (ret)
219 return ret;
220
221 smu->pptable_firmware.data = table;
222 smu->pptable_firmware.size = size;
223
224 ucode = &adev->firmware.ucode[AMDGPU_UCODE_ID_PPTABLE];
225 ucode->ucode_id = AMDGPU_UCODE_ID_PPTABLE;
226 ucode->fw = &smu->pptable_firmware;
227 adev->firmware.fw_size +=
228 ALIGN(smu->pptable_firmware.size, PAGE_SIZE);
229
c05d1c40
KW
230 return 0;
231}
232
233int smu_v13_0_check_fw_status(struct smu_context *smu)
234{
235 struct amdgpu_device *adev = smu->adev;
236 uint32_t mp1_fw_flags;
237
d52ea3dc
TH
238 switch (adev->ip_versions[MP1_HWIP][0]) {
239 case IP_VERSION(13, 0, 4):
51e7a216 240 case IP_VERSION(13, 0, 11):
d52ea3dc
TH
241 mp1_fw_flags = RREG32_PCIE(MP1_Public |
242 (smnMP1_V13_0_4_FIRMWARE_FLAGS & 0xffffffff));
243 break;
244 default:
245 mp1_fw_flags = RREG32_PCIE(MP1_Public |
246 (smnMP1_FIRMWARE_FLAGS & 0xffffffff));
247 break;
248 }
c05d1c40
KW
249
250 if ((mp1_fw_flags & MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED_MASK) >>
251 MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED__SHIFT)
252 return 0;
253
254 return -EIO;
255}
256
257int smu_v13_0_check_fw_version(struct smu_context *smu)
258{
6f072a84 259 struct amdgpu_device *adev = smu->adev;
c05d1c40 260 uint32_t if_version = 0xff, smu_version = 0xff;
82890466 261 uint8_t smu_program, smu_major, smu_minor, smu_debug;
c05d1c40
KW
262 int ret = 0;
263
264 ret = smu_cmn_get_smc_version(smu, &if_version, &smu_version);
265 if (ret)
266 return ret;
267
82890466
ML
268 smu_program = (smu_version >> 24) & 0xff;
269 smu_major = (smu_version >> 16) & 0xff;
c05d1c40
KW
270 smu_minor = (smu_version >> 8) & 0xff;
271 smu_debug = (smu_version >> 0) & 0xff;
9661bf68
LL
272 if (smu->is_apu ||
273 adev->ip_versions[MP1_HWIP][0] == IP_VERSION(13, 0, 6))
6f072a84 274 adev->pm.fw_version = smu_version;
c05d1c40 275
0ff76b53
ML
276 /* only for dGPU w/ SMU13*/
277 if (adev->pm.fw)
82890466
ML
278 dev_dbg(smu->adev->dev, "smu fw reported program %d, version = 0x%08x (%d.%d.%d)\n",
279 smu_program, smu_version, smu_major, smu_minor, smu_debug);
0ff76b53 280
c05d1c40
KW
281 /*
282 * 1. if_version mismatch is not critical as our fw is designed
283 * to be backward compatible.
284 * 2. New fw usually brings some optimizations. But that's visible
285 * only on the paired driver.
424b3d75 286 * Considering above, we just leave user a verbal message instead
c05d1c40
KW
287 * of halt driver loading.
288 */
289 if (if_version != smu->smc_driver_if_version) {
9f952378 290 dev_info(adev->dev, "smu driver if version = 0x%08x, smu fw if version = 0x%08x, "
82890466 291 "smu fw program = %d, smu fw version = 0x%08x (%d.%d.%d)\n",
c05d1c40 292 smu->smc_driver_if_version, if_version,
82890466 293 smu_program, smu_version, smu_major, smu_minor, smu_debug);
424b3d75 294 dev_info(adev->dev, "SMU driver if version not matched\n");
c05d1c40
KW
295 }
296
297 return ret;
298}
299
276c03a0
EQ
300static int smu_v13_0_set_pptable_v2_0(struct smu_context *smu, void **table, uint32_t *size)
301{
302 struct amdgpu_device *adev = smu->adev;
303 uint32_t ppt_offset_bytes;
304 const struct smc_firmware_header_v2_0 *v2;
305
306 v2 = (const struct smc_firmware_header_v2_0 *) adev->pm.fw->data;
307
308 ppt_offset_bytes = le32_to_cpu(v2->ppt_offset_bytes);
309 *size = le32_to_cpu(v2->ppt_size_bytes);
310 *table = (uint8_t *)v2 + ppt_offset_bytes;
311
312 return 0;
313}
314
c05d1c40
KW
315static int smu_v13_0_set_pptable_v2_1(struct smu_context *smu, void **table,
316 uint32_t *size, uint32_t pptable_id)
317{
318 struct amdgpu_device *adev = smu->adev;
319 const struct smc_firmware_header_v2_1 *v2_1;
320 struct smc_soft_pptable_entry *entries;
321 uint32_t pptable_count = 0;
322 int i = 0;
323
324 v2_1 = (const struct smc_firmware_header_v2_1 *) adev->pm.fw->data;
325 entries = (struct smc_soft_pptable_entry *)
326 ((uint8_t *)v2_1 + le32_to_cpu(v2_1->pptable_entry_offset));
327 pptable_count = le32_to_cpu(v2_1->pptable_count);
328 for (i = 0; i < pptable_count; i++) {
329 if (le32_to_cpu(entries[i].id) == pptable_id) {
330 *table = ((uint8_t *)v2_1 + le32_to_cpu(entries[i].ppt_offset_bytes));
331 *size = le32_to_cpu(entries[i].ppt_size_bytes);
332 break;
333 }
334 }
335
336 if (i == pptable_count)
337 return -EINVAL;
338
339 return 0;
340}
341
4a1cac25 342static int smu_v13_0_get_pptable_from_vbios(struct smu_context *smu, void **table, uint32_t *size)
c05d1c40
KW
343{
344 struct amdgpu_device *adev = smu->adev;
c05d1c40
KW
345 uint16_t atom_table_size;
346 uint8_t frev, crev;
4a1cac25 347 int ret, index;
c05d1c40 348
4a1cac25
KW
349 dev_info(adev->dev, "use vbios provided pptable\n");
350 index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
351 powerplayinfo);
f1adbe03 352
4a1cac25
KW
353 ret = amdgpu_atombios_get_data_table(adev, index, &atom_table_size, &frev, &crev,
354 (uint8_t **)table);
355 if (ret)
356 return ret;
357
358 if (size)
359 *size = atom_table_size;
360
361 return 0;
362}
363
64e32c91
EQ
364int smu_v13_0_get_pptable_from_firmware(struct smu_context *smu,
365 void **table,
366 uint32_t *size,
367 uint32_t pptable_id)
4a1cac25
KW
368{
369 const struct smc_firmware_header_v1_0 *hdr;
370 struct amdgpu_device *adev = smu->adev;
371 uint16_t version_major, version_minor;
372 int ret;
c05d1c40
KW
373
374 hdr = (const struct smc_firmware_header_v1_0 *) adev->pm.fw->data;
4a1cac25
KW
375 if (!hdr)
376 return -EINVAL;
377
378 dev_info(adev->dev, "use driver provided pptable %d\n", pptable_id);
379
c05d1c40
KW
380 version_major = le16_to_cpu(hdr->header.header_version_major);
381 version_minor = le16_to_cpu(hdr->header.header_version_minor);
4a1cac25 382 if (version_major != 2) {
c94126c4 383 dev_err(adev->dev, "Unsupported smu firmware version %d.%d\n",
4a1cac25
KW
384 version_major, version_minor);
385 return -EINVAL;
386 }
387
388 switch (version_minor) {
276c03a0
EQ
389 case 0:
390 ret = smu_v13_0_set_pptable_v2_0(smu, table, size);
391 break;
4a1cac25
KW
392 case 1:
393 ret = smu_v13_0_set_pptable_v2_1(smu, table, size, pptable_id);
394 break;
395 default:
396 ret = -EINVAL;
397 break;
398 }
399
400 return ret;
401}
402
403int smu_v13_0_setup_pptable(struct smu_context *smu)
404{
405 struct amdgpu_device *adev = smu->adev;
406 uint32_t size = 0, pptable_id = 0;
407 void *table;
408 int ret = 0;
c05d1c40 409
4a1cac25
KW
410 /* override pptable_id from driver parameter */
411 if (amdgpu_smu_pptable_id >= 0) {
412 pptable_id = amdgpu_smu_pptable_id;
413 dev_info(adev->dev, "override pptable id %d\n", pptable_id);
c05d1c40 414 } else {
4a1cac25 415 pptable_id = smu->smu_table.boot_values.pp_table_id;
c05d1c40
KW
416 }
417
4a1cac25 418 /* force using vbios pptable in sriov mode */
276c03a0 419 if ((amdgpu_sriov_vf(adev) || !pptable_id) && (amdgpu_emu_mode != 1))
4a1cac25
KW
420 ret = smu_v13_0_get_pptable_from_vbios(smu, &table, &size);
421 else
422 ret = smu_v13_0_get_pptable_from_firmware(smu, &table, &size, pptable_id);
423
424 if (ret)
425 return ret;
426
c05d1c40
KW
427 if (!smu->smu_table.power_play_table)
428 smu->smu_table.power_play_table = table;
429 if (!smu->smu_table.power_play_table_size)
430 smu->smu_table.power_play_table_size = size;
431
432 return 0;
433}
434
435int smu_v13_0_init_smc_tables(struct smu_context *smu)
436{
437 struct smu_table_context *smu_table = &smu->smu_table;
438 struct smu_table *tables = smu_table->tables;
439 int ret = 0;
440
441 smu_table->driver_pptable =
442 kzalloc(tables[SMU_TABLE_PPTABLE].size, GFP_KERNEL);
443 if (!smu_table->driver_pptable) {
444 ret = -ENOMEM;
445 goto err0_out;
446 }
447
448 smu_table->max_sustainable_clocks =
449 kzalloc(sizeof(struct smu_13_0_max_sustainable_clocks), GFP_KERNEL);
450 if (!smu_table->max_sustainable_clocks) {
451 ret = -ENOMEM;
452 goto err1_out;
453 }
454
455 /* Aldebaran does not support OVERDRIVE */
456 if (tables[SMU_TABLE_OVERDRIVE].size) {
457 smu_table->overdrive_table =
458 kzalloc(tables[SMU_TABLE_OVERDRIVE].size, GFP_KERNEL);
459 if (!smu_table->overdrive_table) {
460 ret = -ENOMEM;
461 goto err2_out;
462 }
463
464 smu_table->boot_overdrive_table =
465 kzalloc(tables[SMU_TABLE_OVERDRIVE].size, GFP_KERNEL);
466 if (!smu_table->boot_overdrive_table) {
467 ret = -ENOMEM;
468 goto err3_out;
469 }
470 }
471
276c03a0
EQ
472 smu_table->combo_pptable =
473 kzalloc(tables[SMU_TABLE_COMBO_PPTABLE].size, GFP_KERNEL);
474 if (!smu_table->combo_pptable) {
475 ret = -ENOMEM;
476 goto err4_out;
477 }
478
c05d1c40
KW
479 return 0;
480
276c03a0
EQ
481err4_out:
482 kfree(smu_table->boot_overdrive_table);
c05d1c40
KW
483err3_out:
484 kfree(smu_table->overdrive_table);
485err2_out:
486 kfree(smu_table->max_sustainable_clocks);
487err1_out:
488 kfree(smu_table->driver_pptable);
489err0_out:
490 return ret;
491}
492
493int smu_v13_0_fini_smc_tables(struct smu_context *smu)
494{
495 struct smu_table_context *smu_table = &smu->smu_table;
496 struct smu_dpm_context *smu_dpm = &smu->smu_dpm;
497
498 kfree(smu_table->gpu_metrics_table);
276c03a0 499 kfree(smu_table->combo_pptable);
c05d1c40
KW
500 kfree(smu_table->boot_overdrive_table);
501 kfree(smu_table->overdrive_table);
502 kfree(smu_table->max_sustainable_clocks);
503 kfree(smu_table->driver_pptable);
504 smu_table->gpu_metrics_table = NULL;
276c03a0 505 smu_table->combo_pptable = NULL;
c05d1c40
KW
506 smu_table->boot_overdrive_table = NULL;
507 smu_table->overdrive_table = NULL;
508 smu_table->max_sustainable_clocks = NULL;
509 smu_table->driver_pptable = NULL;
510 kfree(smu_table->hardcode_pptable);
511 smu_table->hardcode_pptable = NULL;
512
edd79420 513 kfree(smu_table->ecc_table);
c05d1c40
KW
514 kfree(smu_table->metrics_table);
515 kfree(smu_table->watermarks_table);
edd79420 516 smu_table->ecc_table = NULL;
c05d1c40
KW
517 smu_table->metrics_table = NULL;
518 smu_table->watermarks_table = NULL;
519 smu_table->metrics_time = 0;
520
521 kfree(smu_dpm->dpm_context);
522 kfree(smu_dpm->golden_dpm_context);
523 kfree(smu_dpm->dpm_current_power_state);
524 kfree(smu_dpm->dpm_request_power_state);
525 smu_dpm->dpm_context = NULL;
526 smu_dpm->golden_dpm_context = NULL;
527 smu_dpm->dpm_context_size = 0;
528 smu_dpm->dpm_current_power_state = NULL;
529 smu_dpm->dpm_request_power_state = NULL;
530
531 return 0;
532}
533
534int smu_v13_0_init_power(struct smu_context *smu)
535{
536 struct smu_power_context *smu_power = &smu->smu_power;
537
538 if (smu_power->power_context || smu_power->power_context_size != 0)
539 return -EINVAL;
540
541 smu_power->power_context = kzalloc(sizeof(struct smu_13_0_dpm_context),
542 GFP_KERNEL);
543 if (!smu_power->power_context)
544 return -ENOMEM;
545 smu_power->power_context_size = sizeof(struct smu_13_0_dpm_context);
546
547 return 0;
548}
549
550int smu_v13_0_fini_power(struct smu_context *smu)
551{
552 struct smu_power_context *smu_power = &smu->smu_power;
553
554 if (!smu_power->power_context || smu_power->power_context_size == 0)
555 return -EINVAL;
556
557 kfree(smu_power->power_context);
558 smu_power->power_context = NULL;
559 smu_power->power_context_size = 0;
560
561 return 0;
562}
563
c05d1c40
KW
564int smu_v13_0_get_vbios_bootup_values(struct smu_context *smu)
565{
566 int ret, index;
567 uint16_t size;
568 uint8_t frev, crev;
569 struct atom_common_table_header *header;
3d01361c 570 struct atom_firmware_info_v3_4 *v_3_4;
c05d1c40
KW
571 struct atom_firmware_info_v3_3 *v_3_3;
572 struct atom_firmware_info_v3_1 *v_3_1;
593a54f1
EQ
573 struct atom_smu_info_v3_6 *smu_info_v3_6;
574 struct atom_smu_info_v4_0 *smu_info_v4_0;
c05d1c40
KW
575
576 index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
577 firmwareinfo);
578
579 ret = amdgpu_atombios_get_data_table(smu->adev, index, &size, &frev, &crev,
580 (uint8_t **)&header);
581 if (ret)
582 return ret;
583
584 if (header->format_revision != 3) {
585 dev_err(smu->adev->dev, "unknown atom_firmware_info version! for smu13\n");
586 return -EINVAL;
587 }
588
589 switch (header->content_revision) {
590 case 0:
591 case 1:
592 case 2:
593 v_3_1 = (struct atom_firmware_info_v3_1 *)header;
594 smu->smu_table.boot_values.revision = v_3_1->firmware_revision;
595 smu->smu_table.boot_values.gfxclk = v_3_1->bootup_sclk_in10khz;
596 smu->smu_table.boot_values.uclk = v_3_1->bootup_mclk_in10khz;
597 smu->smu_table.boot_values.socclk = 0;
598 smu->smu_table.boot_values.dcefclk = 0;
599 smu->smu_table.boot_values.vddc = v_3_1->bootup_vddc_mv;
600 smu->smu_table.boot_values.vddci = v_3_1->bootup_vddci_mv;
601 smu->smu_table.boot_values.mvddc = v_3_1->bootup_mvddc_mv;
602 smu->smu_table.boot_values.vdd_gfx = v_3_1->bootup_vddgfx_mv;
603 smu->smu_table.boot_values.cooling_id = v_3_1->coolingsolution_id;
604 smu->smu_table.boot_values.pp_table_id = 0;
605 break;
606 case 3:
c05d1c40
KW
607 v_3_3 = (struct atom_firmware_info_v3_3 *)header;
608 smu->smu_table.boot_values.revision = v_3_3->firmware_revision;
609 smu->smu_table.boot_values.gfxclk = v_3_3->bootup_sclk_in10khz;
610 smu->smu_table.boot_values.uclk = v_3_3->bootup_mclk_in10khz;
611 smu->smu_table.boot_values.socclk = 0;
612 smu->smu_table.boot_values.dcefclk = 0;
613 smu->smu_table.boot_values.vddc = v_3_3->bootup_vddc_mv;
614 smu->smu_table.boot_values.vddci = v_3_3->bootup_vddci_mv;
615 smu->smu_table.boot_values.mvddc = v_3_3->bootup_mvddc_mv;
616 smu->smu_table.boot_values.vdd_gfx = v_3_3->bootup_vddgfx_mv;
617 smu->smu_table.boot_values.cooling_id = v_3_3->coolingsolution_id;
618 smu->smu_table.boot_values.pp_table_id = v_3_3->pplib_pptable_id;
e5a83213 619 break;
3d01361c
FX
620 case 4:
621 default:
622 v_3_4 = (struct atom_firmware_info_v3_4 *)header;
623 smu->smu_table.boot_values.revision = v_3_4->firmware_revision;
624 smu->smu_table.boot_values.gfxclk = v_3_4->bootup_sclk_in10khz;
625 smu->smu_table.boot_values.uclk = v_3_4->bootup_mclk_in10khz;
626 smu->smu_table.boot_values.socclk = 0;
627 smu->smu_table.boot_values.dcefclk = 0;
628 smu->smu_table.boot_values.vddc = v_3_4->bootup_vddc_mv;
629 smu->smu_table.boot_values.vddci = v_3_4->bootup_vddci_mv;
630 smu->smu_table.boot_values.mvddc = v_3_4->bootup_mvddc_mv;
631 smu->smu_table.boot_values.vdd_gfx = v_3_4->bootup_vddgfx_mv;
632 smu->smu_table.boot_values.cooling_id = v_3_4->coolingsolution_id;
633 smu->smu_table.boot_values.pp_table_id = v_3_4->pplib_pptable_id;
e5a83213 634 break;
c05d1c40
KW
635 }
636
637 smu->smu_table.boot_values.format_revision = header->format_revision;
638 smu->smu_table.boot_values.content_revision = header->content_revision;
639
593a54f1
EQ
640 index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
641 smu_info);
642 if (!amdgpu_atombios_get_data_table(smu->adev, index, &size, &frev, &crev,
643 (uint8_t **)&header)) {
276c03a0 644
593a54f1
EQ
645 if ((frev == 3) && (crev == 6)) {
646 smu_info_v3_6 = (struct atom_smu_info_v3_6 *)header;
647
648 smu->smu_table.boot_values.socclk = smu_info_v3_6->bootup_socclk_10khz;
649 smu->smu_table.boot_values.vclk = smu_info_v3_6->bootup_vclk_10khz;
650 smu->smu_table.boot_values.dclk = smu_info_v3_6->bootup_dclk_10khz;
651 smu->smu_table.boot_values.fclk = smu_info_v3_6->bootup_fclk_10khz;
1320d6c7
ML
652 } else if ((frev == 3) && (crev == 1)) {
653 return 0;
593a54f1
EQ
654 } else if ((frev == 4) && (crev == 0)) {
655 smu_info_v4_0 = (struct atom_smu_info_v4_0 *)header;
656
657 smu->smu_table.boot_values.socclk = smu_info_v4_0->bootup_socclk_10khz;
658 smu->smu_table.boot_values.dcefclk = smu_info_v4_0->bootup_dcefclk_10khz;
659 smu->smu_table.boot_values.vclk = smu_info_v4_0->bootup_vclk0_10khz;
660 smu->smu_table.boot_values.dclk = smu_info_v4_0->bootup_dclk0_10khz;
661 smu->smu_table.boot_values.fclk = smu_info_v4_0->bootup_fclk_10khz;
662 } else {
663 dev_warn(smu->adev->dev, "Unexpected and unhandled version: %d.%d\n",
664 (uint32_t)frev, (uint32_t)crev);
665 }
666 }
c05d1c40
KW
667
668 return 0;
669}
670
f1adbe03 671
c05d1c40
KW
672int smu_v13_0_notify_memory_pool_location(struct smu_context *smu)
673{
674 struct smu_table_context *smu_table = &smu->smu_table;
675 struct smu_table *memory_pool = &smu_table->memory_pool;
676 int ret = 0;
677 uint64_t address;
678 uint32_t address_low, address_high;
679
680 if (memory_pool->size == 0 || memory_pool->cpu_addr == NULL)
681 return ret;
682
c05d1c40
KW
683 address = memory_pool->mc_address;
684 address_high = (uint32_t)upper_32_bits(address);
685 address_low = (uint32_t)lower_32_bits(address);
686
687 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_DramLogSetDramAddrHigh,
688 address_high, NULL);
689 if (ret)
690 return ret;
691 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_DramLogSetDramAddrLow,
692 address_low, NULL);
693 if (ret)
694 return ret;
695 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_DramLogSetDramSize,
696 (uint32_t)memory_pool->size, NULL);
697 if (ret)
698 return ret;
699
700 return ret;
701}
702
703int smu_v13_0_set_min_deep_sleep_dcefclk(struct smu_context *smu, uint32_t clk)
704{
705 int ret;
706
707 ret = smu_cmn_send_smc_msg_with_param(smu,
708 SMU_MSG_SetMinDeepSleepDcefclk, clk, NULL);
709 if (ret)
710 dev_err(smu->adev->dev, "SMU13 attempt to set divider for DCEFCLK Failed!");
711
712 return ret;
713}
714
715int smu_v13_0_set_driver_table_location(struct smu_context *smu)
716{
717 struct smu_table *driver_table = &smu->smu_table.driver_table;
718 int ret = 0;
719
720 if (driver_table->mc_address) {
721 ret = smu_cmn_send_smc_msg_with_param(smu,
722 SMU_MSG_SetDriverDramAddrHigh,
723 upper_32_bits(driver_table->mc_address),
724 NULL);
725 if (!ret)
726 ret = smu_cmn_send_smc_msg_with_param(smu,
727 SMU_MSG_SetDriverDramAddrLow,
728 lower_32_bits(driver_table->mc_address),
729 NULL);
730 }
731
732 return ret;
733}
734
735int smu_v13_0_set_tool_table_location(struct smu_context *smu)
736{
737 int ret = 0;
738 struct smu_table *tool_table = &smu->smu_table.tables[SMU_TABLE_PMSTATUSLOG];
739
740 if (tool_table->mc_address) {
741 ret = smu_cmn_send_smc_msg_with_param(smu,
742 SMU_MSG_SetToolsDramAddrHigh,
743 upper_32_bits(tool_table->mc_address),
744 NULL);
745 if (!ret)
746 ret = smu_cmn_send_smc_msg_with_param(smu,
747 SMU_MSG_SetToolsDramAddrLow,
748 lower_32_bits(tool_table->mc_address),
749 NULL);
750 }
751
752 return ret;
753}
754
755int smu_v13_0_init_display_count(struct smu_context *smu, uint32_t count)
756{
757 int ret = 0;
758
759 if (!smu->pm_enabled)
760 return ret;
761
762 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_NumOfDisplays, count, NULL);
763
764 return ret;
765}
766
c05d1c40
KW
767int smu_v13_0_set_allowed_mask(struct smu_context *smu)
768{
769 struct smu_feature *feature = &smu->smu_feature;
770 int ret = 0;
771 uint32_t feature_mask[2];
772
1f2cf08a
EQ
773 if (bitmap_empty(feature->allowed, SMU_FEATURE_MAX) ||
774 feature->feature_num < 64)
775 return -EINVAL;
c05d1c40 776
525d6515 777 bitmap_to_arr32(feature_mask, feature->allowed, 64);
c05d1c40
KW
778
779 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetAllowedFeaturesMaskHigh,
780 feature_mask[1], NULL);
781 if (ret)
1f2cf08a 782 return ret;
c05d1c40 783
1f2cf08a
EQ
784 return smu_cmn_send_smc_msg_with_param(smu,
785 SMU_MSG_SetAllowedFeaturesMaskLow,
786 feature_mask[0],
787 NULL);
c05d1c40
KW
788}
789
21cf0293
XH
790int smu_v13_0_gfx_off_control(struct smu_context *smu, bool enable)
791{
792 int ret = 0;
793 struct amdgpu_device *adev = smu->adev;
794
1d789535 795 switch (adev->ip_versions[MP1_HWIP][0]) {
276c03a0 796 case IP_VERSION(13, 0, 0):
61b396b9
AD
797 case IP_VERSION(13, 0, 1):
798 case IP_VERSION(13, 0, 3):
33ef11cd 799 case IP_VERSION(13, 0, 4):
111aeed2 800 case IP_VERSION(13, 0, 5):
b4e7b0e8 801 case IP_VERSION(13, 0, 7):
d7709eb6 802 case IP_VERSION(13, 0, 8):
fa16dec2 803 case IP_VERSION(13, 0, 10):
9f83e612 804 case IP_VERSION(13, 0, 11):
21cf0293
XH
805 if (!(adev->pm.pp_feature & PP_GFXOFF_MASK))
806 return 0;
807 if (enable)
808 ret = smu_cmn_send_smc_msg(smu, SMU_MSG_AllowGfxOff, NULL);
809 else
810 ret = smu_cmn_send_smc_msg(smu, SMU_MSG_DisallowGfxOff, NULL);
811 break;
812 default:
813 break;
814 }
815
816 return ret;
817}
818
c05d1c40
KW
819int smu_v13_0_system_features_control(struct smu_context *smu,
820 bool en)
821{
3c6591e9
EQ
822 return smu_cmn_send_smc_msg(smu, (en ? SMU_MSG_EnableAllSmuFeatures :
823 SMU_MSG_DisableAllSmuFeatures), NULL);
c05d1c40
KW
824}
825
826int smu_v13_0_notify_display_change(struct smu_context *smu)
827{
828 int ret = 0;
829
830 if (!smu->pm_enabled)
831 return ret;
832
833 if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_UCLK_BIT) &&
834 smu->adev->gmc.vram_type == AMDGPU_VRAM_TYPE_HBM)
835 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetUclkFastSwitch, 1, NULL);
836
837 return ret;
838}
839
840 static int
841smu_v13_0_get_max_sustainable_clock(struct smu_context *smu, uint32_t *clock,
842 enum smu_clk_type clock_select)
843{
844 int ret = 0;
845 int clk_id;
846
847 if ((smu_cmn_to_asic_specific_index(smu, CMN2ASIC_MAPPING_MSG, SMU_MSG_GetDcModeMaxDpmFreq) < 0) ||
848 (smu_cmn_to_asic_specific_index(smu, CMN2ASIC_MAPPING_MSG, SMU_MSG_GetMaxDpmFreq) < 0))
849 return 0;
850
851 clk_id = smu_cmn_to_asic_specific_index(smu,
852 CMN2ASIC_MAPPING_CLK,
853 clock_select);
854 if (clk_id < 0)
855 return -EINVAL;
856
857 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_GetDcModeMaxDpmFreq,
858 clk_id << 16, clock);
859 if (ret) {
860 dev_err(smu->adev->dev, "[GetMaxSustainableClock] Failed to get max DC clock from SMC!");
861 return ret;
862 }
863
864 if (*clock != 0)
865 return 0;
866
867 /* if DC limit is zero, return AC limit */
868 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_GetMaxDpmFreq,
869 clk_id << 16, clock);
870 if (ret) {
871 dev_err(smu->adev->dev, "[GetMaxSustainableClock] failed to get max AC clock from SMC!");
872 return ret;
873 }
874
875 return 0;
876}
877
878int smu_v13_0_init_max_sustainable_clocks(struct smu_context *smu)
879{
880 struct smu_13_0_max_sustainable_clocks *max_sustainable_clocks =
881 smu->smu_table.max_sustainable_clocks;
882 int ret = 0;
883
884 max_sustainable_clocks->uclock = smu->smu_table.boot_values.uclk / 100;
885 max_sustainable_clocks->soc_clock = smu->smu_table.boot_values.socclk / 100;
886 max_sustainable_clocks->dcef_clock = smu->smu_table.boot_values.dcefclk / 100;
887 max_sustainable_clocks->display_clock = 0xFFFFFFFF;
888 max_sustainable_clocks->phy_clock = 0xFFFFFFFF;
889 max_sustainable_clocks->pixel_clock = 0xFFFFFFFF;
890
891 if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_UCLK_BIT)) {
892 ret = smu_v13_0_get_max_sustainable_clock(smu,
893 &(max_sustainable_clocks->uclock),
894 SMU_UCLK);
895 if (ret) {
896 dev_err(smu->adev->dev, "[%s] failed to get max UCLK from SMC!",
897 __func__);
898 return ret;
899 }
900 }
901
902 if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_SOCCLK_BIT)) {
903 ret = smu_v13_0_get_max_sustainable_clock(smu,
904 &(max_sustainable_clocks->soc_clock),
905 SMU_SOCCLK);
906 if (ret) {
907 dev_err(smu->adev->dev, "[%s] failed to get max SOCCLK from SMC!",
908 __func__);
909 return ret;
910 }
911 }
912
913 if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_DCEFCLK_BIT)) {
914 ret = smu_v13_0_get_max_sustainable_clock(smu,
915 &(max_sustainable_clocks->dcef_clock),
916 SMU_DCEFCLK);
917 if (ret) {
918 dev_err(smu->adev->dev, "[%s] failed to get max DCEFCLK from SMC!",
919 __func__);
920 return ret;
921 }
922
923 ret = smu_v13_0_get_max_sustainable_clock(smu,
924 &(max_sustainable_clocks->display_clock),
925 SMU_DISPCLK);
926 if (ret) {
927 dev_err(smu->adev->dev, "[%s] failed to get max DISPCLK from SMC!",
928 __func__);
929 return ret;
930 }
931 ret = smu_v13_0_get_max_sustainable_clock(smu,
932 &(max_sustainable_clocks->phy_clock),
933 SMU_PHYCLK);
934 if (ret) {
935 dev_err(smu->adev->dev, "[%s] failed to get max PHYCLK from SMC!",
936 __func__);
937 return ret;
938 }
939 ret = smu_v13_0_get_max_sustainable_clock(smu,
940 &(max_sustainable_clocks->pixel_clock),
941 SMU_PIXCLK);
942 if (ret) {
943 dev_err(smu->adev->dev, "[%s] failed to get max PIXCLK from SMC!",
944 __func__);
945 return ret;
946 }
947 }
948
949 if (max_sustainable_clocks->soc_clock < max_sustainable_clocks->uclock)
950 max_sustainable_clocks->uclock = max_sustainable_clocks->soc_clock;
951
952 return 0;
953}
954
955int smu_v13_0_get_current_power_limit(struct smu_context *smu,
956 uint32_t *power_limit)
957{
958 int power_src;
959 int ret = 0;
960
961 if (!smu_cmn_feature_is_enabled(smu, SMU_FEATURE_PPT_BIT))
962 return -EINVAL;
963
964 power_src = smu_cmn_to_asic_specific_index(smu,
965 CMN2ASIC_MAPPING_PWR,
966 smu->adev->pm.ac_power ?
967 SMU_POWER_SOURCE_AC :
968 SMU_POWER_SOURCE_DC);
969 if (power_src < 0)
970 return -EINVAL;
971
972 ret = smu_cmn_send_smc_msg_with_param(smu,
973 SMU_MSG_GetPptLimit,
974 power_src << 16,
975 power_limit);
976 if (ret)
977 dev_err(smu->adev->dev, "[%s] get PPT limit failed!", __func__);
978
979 return ret;
980}
981
2d1ac1cb
DP
982int smu_v13_0_set_power_limit(struct smu_context *smu,
983 enum smu_ppt_limit_type limit_type,
984 uint32_t limit)
c05d1c40
KW
985{
986 int ret = 0;
987
2d1ac1cb
DP
988 if (limit_type != SMU_DEFAULT_PPT_LIMIT)
989 return -EINVAL;
990
c05d1c40
KW
991 if (!smu_cmn_feature_is_enabled(smu, SMU_FEATURE_PPT_BIT)) {
992 dev_err(smu->adev->dev, "Setting new power limit is not supported!\n");
993 return -EOPNOTSUPP;
994 }
995
2d1ac1cb 996 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetPptLimit, limit, NULL);
c05d1c40
KW
997 if (ret) {
998 dev_err(smu->adev->dev, "[%s] Set power limit Failed!\n", __func__);
999 return ret;
1000 }
1001
2d1ac1cb 1002 smu->current_power_limit = limit;
c05d1c40
KW
1003
1004 return 0;
1005}
1006
c028d66e
KF
1007static int smu_v13_0_allow_ih_interrupt(struct smu_context *smu)
1008{
1009 return smu_cmn_send_smc_msg(smu,
1010 SMU_MSG_AllowIHHostInterrupt,
1011 NULL);
1012}
1013
1014static int smu_v13_0_process_pending_interrupt(struct smu_context *smu)
1015{
1016 int ret = 0;
1017
1018 if (smu->dc_controlled_by_gpio &&
1019 smu_cmn_feature_is_enabled(smu, SMU_FEATURE_ACDC_BIT))
1020 ret = smu_v13_0_allow_ih_interrupt(smu);
1021
1022 return ret;
1023}
1024
c05d1c40
KW
1025int smu_v13_0_enable_thermal_alert(struct smu_context *smu)
1026{
c028d66e
KF
1027 int ret = 0;
1028
72be7316
YZ
1029 if (!smu->irq_source.num_types)
1030 return 0;
1031
872642ed
KF
1032 ret = amdgpu_irq_get(smu->adev, &smu->irq_source, 0);
1033 if (ret)
1034 return ret;
c028d66e
KF
1035
1036 return smu_v13_0_process_pending_interrupt(smu);
c05d1c40
KW
1037}
1038
1039int smu_v13_0_disable_thermal_alert(struct smu_context *smu)
1040{
72be7316
YZ
1041 if (!smu->irq_source.num_types)
1042 return 0;
1043
c05d1c40
KW
1044 return amdgpu_irq_put(smu->adev, &smu->irq_source, 0);
1045}
1046
1047static uint16_t convert_to_vddc(uint8_t vid)
1048{
1049 return (uint16_t) ((6200 - (vid * 25)) / SMU13_VOLTAGE_SCALE);
1050}
1051
1052int smu_v13_0_get_gfx_vdd(struct smu_context *smu, uint32_t *value)
1053{
1054 struct amdgpu_device *adev = smu->adev;
1055 uint32_t vdd = 0, val_vid = 0;
1056
1057 if (!value)
1058 return -EINVAL;
1059 val_vid = (RREG32_SOC15(SMUIO, 0, regSMUSVI0_TEL_PLANE0) &
1060 SMUSVI0_TEL_PLANE0__SVI0_PLANE0_VDDCOR_MASK) >>
1061 SMUSVI0_TEL_PLANE0__SVI0_PLANE0_VDDCOR__SHIFT;
1062
1063 vdd = (uint32_t)convert_to_vddc((uint8_t)val_vid);
1064
1065 *value = vdd;
1066
1067 return 0;
1068
1069}
1070
1071int
1072smu_v13_0_display_clock_voltage_request(struct smu_context *smu,
1073 struct pp_display_clock_request
1074 *clock_req)
1075{
1076 enum amd_pp_clock_type clk_type = clock_req->clock_type;
1077 int ret = 0;
1078 enum smu_clk_type clk_select = 0;
1079 uint32_t clk_freq = clock_req->clock_freq_in_khz / 1000;
1080
1081 if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_DCEFCLK_BIT) ||
1082 smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_UCLK_BIT)) {
1083 switch (clk_type) {
1084 case amd_pp_dcef_clock:
1085 clk_select = SMU_DCEFCLK;
1086 break;
1087 case amd_pp_disp_clock:
1088 clk_select = SMU_DISPCLK;
1089 break;
1090 case amd_pp_pixel_clock:
1091 clk_select = SMU_PIXCLK;
1092 break;
1093 case amd_pp_phy_clock:
1094 clk_select = SMU_PHYCLK;
1095 break;
1096 case amd_pp_mem_clock:
1097 clk_select = SMU_UCLK;
1098 break;
1099 default:
1100 dev_info(smu->adev->dev, "[%s] Invalid Clock Type!", __func__);
1101 ret = -EINVAL;
1102 break;
1103 }
1104
1105 if (ret)
1106 goto failed;
1107
1108 if (clk_select == SMU_UCLK && smu->disable_uclk_switch)
1109 return 0;
1110
1111 ret = smu_v13_0_set_hard_freq_limited_range(smu, clk_select, clk_freq, 0);
1112
1113 if(clk_select == SMU_UCLK)
1114 smu->hard_min_uclk_req_from_dal = clk_freq;
1115 }
1116
1117failed:
1118 return ret;
1119}
1120
1121uint32_t smu_v13_0_get_fan_control_mode(struct smu_context *smu)
1122{
1123 if (!smu_cmn_feature_is_enabled(smu, SMU_FEATURE_FAN_CONTROL_BIT))
1124 return AMD_FAN_CTRL_MANUAL;
1125 else
1126 return AMD_FAN_CTRL_AUTO;
1127}
1128
1129 static int
1130smu_v13_0_auto_fan_control(struct smu_context *smu, bool auto_fan_control)
1131{
1132 int ret = 0;
1133
1134 if (!smu_cmn_feature_is_supported(smu, SMU_FEATURE_FAN_CONTROL_BIT))
1135 return 0;
1136
1137 ret = smu_cmn_feature_set_enabled(smu, SMU_FEATURE_FAN_CONTROL_BIT, auto_fan_control);
1138 if (ret)
1139 dev_err(smu->adev->dev, "[%s]%s smc FAN CONTROL feature failed!",
1140 __func__, (auto_fan_control ? "Start" : "Stop"));
1141
1142 return ret;
1143}
1144
1145 static int
1146smu_v13_0_set_fan_static_mode(struct smu_context *smu, uint32_t mode)
1147{
1148 struct amdgpu_device *adev = smu->adev;
1149
1150 WREG32_SOC15(THM, 0, regCG_FDO_CTRL2,
1151 REG_SET_FIELD(RREG32_SOC15(THM, 0, regCG_FDO_CTRL2),
1152 CG_FDO_CTRL2, TMIN, 0));
1153 WREG32_SOC15(THM, 0, regCG_FDO_CTRL2,
1154 REG_SET_FIELD(RREG32_SOC15(THM, 0, regCG_FDO_CTRL2),
1155 CG_FDO_CTRL2, FDO_PWM_MODE, mode));
1156
1157 return 0;
1158}
1159
276c03a0
EQ
1160int smu_v13_0_set_fan_speed_pwm(struct smu_context *smu,
1161 uint32_t speed)
c05d1c40
KW
1162{
1163 struct amdgpu_device *adev = smu->adev;
1164 uint32_t duty100, duty;
1165 uint64_t tmp64;
1166
276c03a0 1167 speed = MIN(speed, 255);
c05d1c40
KW
1168
1169 if (smu_v13_0_auto_fan_control(smu, 0))
1170 return -EINVAL;
1171
1172 duty100 = REG_GET_FIELD(RREG32_SOC15(THM, 0, regCG_FDO_CTRL1),
1173 CG_FDO_CTRL1, FMAX_DUTY100);
1174 if (!duty100)
1175 return -EINVAL;
1176
1177 tmp64 = (uint64_t)speed * duty100;
276c03a0 1178 do_div(tmp64, 255);
c05d1c40
KW
1179 duty = (uint32_t)tmp64;
1180
1181 WREG32_SOC15(THM, 0, regCG_FDO_CTRL0,
1182 REG_SET_FIELD(RREG32_SOC15(THM, 0, regCG_FDO_CTRL0),
1183 CG_FDO_CTRL0, FDO_STATIC_DUTY, duty));
1184
1185 return smu_v13_0_set_fan_static_mode(smu, FDO_PWM_MODE_STATIC);
1186}
1187
1188 int
1189smu_v13_0_set_fan_control_mode(struct smu_context *smu,
1190 uint32_t mode)
1191{
1192 int ret = 0;
1193
1194 switch (mode) {
1195 case AMD_FAN_CTRL_NONE:
276c03a0 1196 ret = smu_v13_0_set_fan_speed_pwm(smu, 255);
c05d1c40
KW
1197 break;
1198 case AMD_FAN_CTRL_MANUAL:
1199 ret = smu_v13_0_auto_fan_control(smu, 0);
1200 break;
1201 case AMD_FAN_CTRL_AUTO:
1202 ret = smu_v13_0_auto_fan_control(smu, 1);
1203 break;
1204 default:
1205 break;
1206 }
1207
1208 if (ret) {
1209 dev_err(smu->adev->dev, "[%s]Set fan control mode failed!", __func__);
1210 return -EINVAL;
1211 }
1212
1213 return ret;
1214}
1215
1216int smu_v13_0_set_fan_speed_rpm(struct smu_context *smu,
1217 uint32_t speed)
1218{
1219 struct amdgpu_device *adev = smu->adev;
c595637f
EQ
1220 uint32_t crystal_clock_freq = 2500;
1221 uint32_t tach_period;
276c03a0 1222 int ret;
c05d1c40
KW
1223
1224 if (!speed)
1225 return -EINVAL;
1226
1227 ret = smu_v13_0_auto_fan_control(smu, 0);
1228 if (ret)
1229 return ret;
1230
c05d1c40
KW
1231 tach_period = 60 * crystal_clock_freq * 10000 / (8 * speed);
1232 WREG32_SOC15(THM, 0, regCG_TACH_CTRL,
1233 REG_SET_FIELD(RREG32_SOC15(THM, 0, regCG_TACH_CTRL),
1234 CG_TACH_CTRL, TARGET_PERIOD,
1235 tach_period));
1236
276c03a0 1237 return smu_v13_0_set_fan_static_mode(smu, FDO_PWM_MODE_STATIC_RPM);
c05d1c40
KW
1238}
1239
1240int smu_v13_0_set_xgmi_pstate(struct smu_context *smu,
1241 uint32_t pstate)
1242{
1243 int ret = 0;
1244 ret = smu_cmn_send_smc_msg_with_param(smu,
1245 SMU_MSG_SetXgmiMode,
1246 pstate ? XGMI_MODE_PSTATE_D0 : XGMI_MODE_PSTATE_D3,
1247 NULL);
1248 return ret;
1249}
1250
1251static int smu_v13_0_set_irq_state(struct amdgpu_device *adev,
1252 struct amdgpu_irq_src *source,
1253 unsigned tyep,
1254 enum amdgpu_interrupt_state state)
1255{
ebfc2533 1256 struct smu_context *smu = adev->powerplay.pp_handle;
c05d1c40
KW
1257 uint32_t low, high;
1258 uint32_t val = 0;
1259
1260 switch (state) {
1261 case AMDGPU_IRQ_STATE_DISABLE:
1262 /* For THM irqs */
1263 val = RREG32_SOC15(THM, 0, regTHM_THERMAL_INT_CTRL);
1264 val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, THERM_INTH_MASK, 1);
1265 val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, THERM_INTL_MASK, 1);
1266 WREG32_SOC15(THM, 0, regTHM_THERMAL_INT_CTRL, val);
1267
1268 WREG32_SOC15(THM, 0, regTHM_THERMAL_INT_ENA, 0);
1269
1270 /* For MP1 SW irqs */
1271 val = RREG32_SOC15(MP1, 0, regMP1_SMN_IH_SW_INT_CTRL);
1272 val = REG_SET_FIELD(val, MP1_SMN_IH_SW_INT_CTRL, INT_MASK, 1);
1273 WREG32_SOC15(MP1, 0, regMP1_SMN_IH_SW_INT_CTRL, val);
1274
1275 break;
1276 case AMDGPU_IRQ_STATE_ENABLE:
1277 /* For THM irqs */
1278 low = max(SMU_THERMAL_MINIMUM_ALERT_TEMP,
1279 smu->thermal_range.min / SMU_TEMPERATURE_UNITS_PER_CENTIGRADES);
1280 high = min(SMU_THERMAL_MAXIMUM_ALERT_TEMP,
1281 smu->thermal_range.software_shutdown_temp);
1282
1283 val = RREG32_SOC15(THM, 0, regTHM_THERMAL_INT_CTRL);
1284 val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, MAX_IH_CREDIT, 5);
1285 val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, THERM_IH_HW_ENA, 1);
1286 val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, THERM_INTH_MASK, 0);
1287 val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, THERM_INTL_MASK, 0);
1288 val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, DIG_THERM_INTH, (high & 0xff));
1289 val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, DIG_THERM_INTL, (low & 0xff));
1290 val = val & (~THM_THERMAL_INT_CTRL__THERM_TRIGGER_MASK_MASK);
1291 WREG32_SOC15(THM, 0, regTHM_THERMAL_INT_CTRL, val);
1292
1293 val = (1 << THM_THERMAL_INT_ENA__THERM_INTH_CLR__SHIFT);
1294 val |= (1 << THM_THERMAL_INT_ENA__THERM_INTL_CLR__SHIFT);
1295 val |= (1 << THM_THERMAL_INT_ENA__THERM_TRIGGER_CLR__SHIFT);
1296 WREG32_SOC15(THM, 0, regTHM_THERMAL_INT_ENA, val);
1297
1298 /* For MP1 SW irqs */
1299 val = RREG32_SOC15(MP1, 0, regMP1_SMN_IH_SW_INT);
1300 val = REG_SET_FIELD(val, MP1_SMN_IH_SW_INT, ID, 0xFE);
1301 val = REG_SET_FIELD(val, MP1_SMN_IH_SW_INT, VALID, 0);
1302 WREG32_SOC15(MP1, 0, regMP1_SMN_IH_SW_INT, val);
1303
1304 val = RREG32_SOC15(MP1, 0, regMP1_SMN_IH_SW_INT_CTRL);
1305 val = REG_SET_FIELD(val, MP1_SMN_IH_SW_INT_CTRL, INT_MASK, 0);
1306 WREG32_SOC15(MP1, 0, regMP1_SMN_IH_SW_INT_CTRL, val);
1307
1308 break;
1309 default:
1310 break;
1311 }
1312
1313 return 0;
1314}
1315
1316static int smu_v13_0_ack_ac_dc_interrupt(struct smu_context *smu)
1317{
1318 return smu_cmn_send_smc_msg(smu,
1319 SMU_MSG_ReenableAcDcInterrupt,
1320 NULL);
1321}
1322
1323#define THM_11_0__SRCID__THM_DIG_THERM_L2H 0 /* ASIC_TEMP > CG_THERMAL_INT.DIG_THERM_INTH */
1324#define THM_11_0__SRCID__THM_DIG_THERM_H2L 1 /* ASIC_TEMP < CG_THERMAL_INT.DIG_THERM_INTL */
1325#define SMUIO_11_0__SRCID__SMUIO_GPIO19 83
1326
1327static int smu_v13_0_irq_process(struct amdgpu_device *adev,
1328 struct amdgpu_irq_src *source,
1329 struct amdgpu_iv_entry *entry)
1330{
ebfc2533 1331 struct smu_context *smu = adev->powerplay.pp_handle;
c05d1c40
KW
1332 uint32_t client_id = entry->client_id;
1333 uint32_t src_id = entry->src_id;
1334 /*
1335 * ctxid is used to distinguish different
1336 * events for SMCToHost interrupt.
1337 */
1338 uint32_t ctxid = entry->src_data[0];
1339 uint32_t data;
ef5fca9f 1340 uint32_t high;
c05d1c40
KW
1341
1342 if (client_id == SOC15_IH_CLIENTID_THM) {
1343 switch (src_id) {
1344 case THM_11_0__SRCID__THM_DIG_THERM_L2H:
1345 dev_emerg(adev->dev, "ERROR: GPU over temperature range(SW CTF) detected!\n");
1346 /*
1347 * SW CTF just occurred.
1348 * Try to do a graceful shutdown to prevent further damage.
1349 */
1350 dev_emerg(adev->dev, "ERROR: System is going to shutdown due to GPU SW CTF!\n");
1351 orderly_poweroff(true);
1352 break;
1353 case THM_11_0__SRCID__THM_DIG_THERM_H2L:
1354 dev_emerg(adev->dev, "ERROR: GPU under temperature range detected\n");
1355 break;
1356 default:
1357 dev_emerg(adev->dev, "ERROR: GPU under temperature range unknown src id (%d)\n",
1358 src_id);
1359 break;
1360 }
1361 } else if (client_id == SOC15_IH_CLIENTID_ROM_SMUIO) {
1362 dev_emerg(adev->dev, "ERROR: GPU HW Critical Temperature Fault(aka CTF) detected!\n");
1363 /*
1364 * HW CTF just occurred. Shutdown to prevent further damage.
1365 */
1366 dev_emerg(adev->dev, "ERROR: System is going to shutdown due to GPU HW CTF!\n");
1367 orderly_poweroff(true);
1368 } else if (client_id == SOC15_IH_CLIENTID_MP1) {
1369 if (src_id == 0xfe) {
1370 /* ACK SMUToHost interrupt */
1371 data = RREG32_SOC15(MP1, 0, regMP1_SMN_IH_SW_INT_CTRL);
1372 data = REG_SET_FIELD(data, MP1_SMN_IH_SW_INT_CTRL, INT_ACK, 1);
1373 WREG32_SOC15(MP1, 0, regMP1_SMN_IH_SW_INT_CTRL, data);
1374
1375 switch (ctxid) {
1376 case 0x3:
1377 dev_dbg(adev->dev, "Switched to AC mode!\n");
ebfc2533 1378 smu_v13_0_ack_ac_dc_interrupt(smu);
c05d1c40
KW
1379 break;
1380 case 0x4:
1381 dev_dbg(adev->dev, "Switched to DC mode!\n");
ebfc2533 1382 smu_v13_0_ack_ac_dc_interrupt(smu);
c05d1c40
KW
1383 break;
1384 case 0x7:
1385 /*
1386 * Increment the throttle interrupt counter
1387 */
1388 atomic64_inc(&smu->throttle_int_counter);
1389
1390 if (!atomic_read(&adev->throttling_logging_enabled))
1391 return 0;
1392
1393 if (__ratelimit(&adev->throttling_logging_rs))
1394 schedule_work(&smu->throttling_logging_work);
1395
1396 break;
ef5fca9f 1397 case 0x8:
1398 high = smu->thermal_range.software_shutdown_temp +
1399 smu->thermal_range.software_shutdown_temp_offset;
abd51738
LT
1400 high = min_t(typeof(high),
1401 SMU_THERMAL_MAXIMUM_ALERT_TEMP,
1402 high);
ef5fca9f 1403 dev_emerg(adev->dev, "Reduce soft CTF limit to %d (by an offset %d)\n",
1404 high,
1405 smu->thermal_range.software_shutdown_temp_offset);
1406
1407 data = RREG32_SOC15(THM, 0, regTHM_THERMAL_INT_CTRL);
1408 data = REG_SET_FIELD(data, THM_THERMAL_INT_CTRL,
1409 DIG_THERM_INTH,
1410 (high & 0xff));
1411 data = data & (~THM_THERMAL_INT_CTRL__THERM_TRIGGER_MASK_MASK);
1412 WREG32_SOC15(THM, 0, regTHM_THERMAL_INT_CTRL, data);
1413 break;
1414 case 0x9:
abd51738
LT
1415 high = min_t(typeof(high),
1416 SMU_THERMAL_MAXIMUM_ALERT_TEMP,
1417 smu->thermal_range.software_shutdown_temp);
ef5fca9f 1418 dev_emerg(adev->dev, "Recover soft CTF limit to %d\n", high);
1419
1420 data = RREG32_SOC15(THM, 0, regTHM_THERMAL_INT_CTRL);
1421 data = REG_SET_FIELD(data, THM_THERMAL_INT_CTRL,
1422 DIG_THERM_INTH,
1423 (high & 0xff));
1424 data = data & (~THM_THERMAL_INT_CTRL__THERM_TRIGGER_MASK_MASK);
1425 WREG32_SOC15(THM, 0, regTHM_THERMAL_INT_CTRL, data);
1426 break;
c05d1c40
KW
1427 }
1428 }
1429 }
1430
1431 return 0;
1432}
1433
1434static const struct amdgpu_irq_src_funcs smu_v13_0_irq_funcs =
1435{
1436 .set = smu_v13_0_set_irq_state,
1437 .process = smu_v13_0_irq_process,
1438};
1439
1440int smu_v13_0_register_irq_handler(struct smu_context *smu)
1441{
1442 struct amdgpu_device *adev = smu->adev;
1443 struct amdgpu_irq_src *irq_src = &smu->irq_source;
1444 int ret = 0;
1445
72be7316
YZ
1446 if (amdgpu_sriov_vf(adev))
1447 return 0;
1448
c05d1c40
KW
1449 irq_src->num_types = 1;
1450 irq_src->funcs = &smu_v13_0_irq_funcs;
1451
1452 ret = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_THM,
1453 THM_11_0__SRCID__THM_DIG_THERM_L2H,
1454 irq_src);
1455 if (ret)
1456 return ret;
1457
1458 ret = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_THM,
1459 THM_11_0__SRCID__THM_DIG_THERM_H2L,
1460 irq_src);
1461 if (ret)
1462 return ret;
1463
1464 /* Register CTF(GPIO_19) interrupt */
1465 ret = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_ROM_SMUIO,
1466 SMUIO_11_0__SRCID__SMUIO_GPIO19,
1467 irq_src);
1468 if (ret)
1469 return ret;
1470
1471 ret = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_MP1,
1472 0xfe,
1473 irq_src);
1474 if (ret)
1475 return ret;
1476
1477 return ret;
1478}
1479
1480int smu_v13_0_get_max_sustainable_clocks_by_dc(struct smu_context *smu,
1481 struct pp_smu_nv_clock_table *max_clocks)
1482{
1483 struct smu_table_context *table_context = &smu->smu_table;
1484 struct smu_13_0_max_sustainable_clocks *sustainable_clocks = NULL;
1485
1486 if (!max_clocks || !table_context->max_sustainable_clocks)
1487 return -EINVAL;
1488
1489 sustainable_clocks = table_context->max_sustainable_clocks;
1490
1491 max_clocks->dcfClockInKhz =
1492 (unsigned int) sustainable_clocks->dcef_clock * 1000;
1493 max_clocks->displayClockInKhz =
1494 (unsigned int) sustainable_clocks->display_clock * 1000;
1495 max_clocks->phyClockInKhz =
1496 (unsigned int) sustainable_clocks->phy_clock * 1000;
1497 max_clocks->pixelClockInKhz =
1498 (unsigned int) sustainable_clocks->pixel_clock * 1000;
1499 max_clocks->uClockInKhz =
1500 (unsigned int) sustainable_clocks->uclock * 1000;
1501 max_clocks->socClockInKhz =
1502 (unsigned int) sustainable_clocks->soc_clock * 1000;
1503 max_clocks->dscClockInKhz = 0;
1504 max_clocks->dppClockInKhz = 0;
1505 max_clocks->fabricClockInKhz = 0;
1506
1507 return 0;
1508}
1509
1510int smu_v13_0_set_azalia_d3_pme(struct smu_context *smu)
1511{
1512 int ret = 0;
1513
1514 ret = smu_cmn_send_smc_msg(smu, SMU_MSG_BacoAudioD3PME, NULL);
1515
1516 return ret;
1517}
1518
c941e9fe
LL
1519static int smu_v13_0_wait_for_reset_complete(struct smu_context *smu,
1520 uint64_t event_arg)
1521{
1522 int ret = 0;
1523
1524 dev_dbg(smu->adev->dev, "waiting for smu reset complete\n");
1525 ret = smu_cmn_send_smc_msg(smu, SMU_MSG_GfxDriverResetRecovery, NULL);
1526
1527 return ret;
1528}
1529
1530int smu_v13_0_wait_for_event(struct smu_context *smu, enum smu_event_type event,
1531 uint64_t event_arg)
1532{
1533 int ret = -EINVAL;
1534
1535 switch (event) {
1536 case SMU_EVENT_RESET_COMPLETE:
1537 ret = smu_v13_0_wait_for_reset_complete(smu, event_arg);
1538 break;
1539 default:
1540 break;
1541 }
1542
1543 return ret;
1544}
1545
c05d1c40
KW
1546int smu_v13_0_get_dpm_ultimate_freq(struct smu_context *smu, enum smu_clk_type clk_type,
1547 uint32_t *min, uint32_t *max)
1548{
1549 int ret = 0, clk_id = 0;
1550 uint32_t param = 0;
1551 uint32_t clock_limit;
1552
1553 if (!smu_cmn_clk_dpm_is_enabled(smu, clk_type)) {
1554 switch (clk_type) {
1555 case SMU_MCLK:
1556 case SMU_UCLK:
1557 clock_limit = smu->smu_table.boot_values.uclk;
1558 break;
1559 case SMU_GFXCLK:
1560 case SMU_SCLK:
1561 clock_limit = smu->smu_table.boot_values.gfxclk;
1562 break;
1563 case SMU_SOCCLK:
1564 clock_limit = smu->smu_table.boot_values.socclk;
1565 break;
1566 default:
1567 clock_limit = 0;
1568 break;
1569 }
1570
1571 /* clock in Mhz unit */
1572 if (min)
1573 *min = clock_limit / 100;
1574 if (max)
1575 *max = clock_limit / 100;
1576
1577 return 0;
1578 }
1579
1580 clk_id = smu_cmn_to_asic_specific_index(smu,
1581 CMN2ASIC_MAPPING_CLK,
1582 clk_type);
1583 if (clk_id < 0) {
1584 ret = -EINVAL;
1585 goto failed;
1586 }
1587 param = (clk_id & 0xffff) << 16;
1588
1589 if (max) {
276c03a0
EQ
1590 if (smu->adev->pm.ac_power)
1591 ret = smu_cmn_send_smc_msg_with_param(smu,
1592 SMU_MSG_GetMaxDpmFreq,
1593 param,
1594 max);
1595 else
1596 ret = smu_cmn_send_smc_msg_with_param(smu,
1597 SMU_MSG_GetDcModeMaxDpmFreq,
1598 param,
1599 max);
c05d1c40
KW
1600 if (ret)
1601 goto failed;
1602 }
1603
1604 if (min) {
1605 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_GetMinDpmFreq, param, min);
1606 if (ret)
1607 goto failed;
1608 }
1609
1610failed:
1611 return ret;
1612}
1613
1614int smu_v13_0_set_soft_freq_limited_range(struct smu_context *smu,
1615 enum smu_clk_type clk_type,
1616 uint32_t min,
1617 uint32_t max)
1618{
c05d1c40
KW
1619 int ret = 0, clk_id = 0;
1620 uint32_t param;
1621
1622 if (!smu_cmn_clk_dpm_is_enabled(smu, clk_type))
1623 return 0;
1624
1625 clk_id = smu_cmn_to_asic_specific_index(smu,
1626 CMN2ASIC_MAPPING_CLK,
1627 clk_type);
1628 if (clk_id < 0)
1629 return clk_id;
1630
c05d1c40
KW
1631 if (max > 0) {
1632 param = (uint32_t)((clk_id << 16) | (max & 0xffff));
1633 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxByFreq,
1634 param, NULL);
1635 if (ret)
1636 goto out;
1637 }
1638
1639 if (min > 0) {
1640 param = (uint32_t)((clk_id << 16) | (min & 0xffff));
1641 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMinByFreq,
1642 param, NULL);
1643 if (ret)
1644 goto out;
1645 }
1646
1647out:
c05d1c40
KW
1648 return ret;
1649}
1650
1651int smu_v13_0_set_hard_freq_limited_range(struct smu_context *smu,
1652 enum smu_clk_type clk_type,
1653 uint32_t min,
1654 uint32_t max)
1655{
1656 int ret = 0, clk_id = 0;
1657 uint32_t param;
1658
1659 if (min <= 0 && max <= 0)
1660 return -EINVAL;
1661
1662 if (!smu_cmn_clk_dpm_is_enabled(smu, clk_type))
1663 return 0;
1664
1665 clk_id = smu_cmn_to_asic_specific_index(smu,
1666 CMN2ASIC_MAPPING_CLK,
1667 clk_type);
1668 if (clk_id < 0)
1669 return clk_id;
1670
1671 if (max > 0) {
1672 param = (uint32_t)((clk_id << 16) | (max & 0xffff));
1673 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetHardMaxByFreq,
1674 param, NULL);
1675 if (ret)
1676 return ret;
1677 }
1678
1679 if (min > 0) {
1680 param = (uint32_t)((clk_id << 16) | (min & 0xffff));
1681 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinByFreq,
1682 param, NULL);
1683 if (ret)
1684 return ret;
1685 }
1686
1687 return ret;
1688}
1689
1690int smu_v13_0_set_performance_level(struct smu_context *smu,
1691 enum amd_dpm_forced_level level)
1692{
1693 struct smu_13_0_dpm_context *dpm_context =
1694 smu->smu_dpm.dpm_context;
1695 struct smu_13_0_dpm_table *gfx_table =
1696 &dpm_context->dpm_tables.gfx_table;
1697 struct smu_13_0_dpm_table *mem_table =
1698 &dpm_context->dpm_tables.uclk_table;
1699 struct smu_13_0_dpm_table *soc_table =
1700 &dpm_context->dpm_tables.soc_table;
276c03a0
EQ
1701 struct smu_13_0_dpm_table *vclk_table =
1702 &dpm_context->dpm_tables.vclk_table;
1703 struct smu_13_0_dpm_table *dclk_table =
1704 &dpm_context->dpm_tables.dclk_table;
1705 struct smu_13_0_dpm_table *fclk_table =
1706 &dpm_context->dpm_tables.fclk_table;
c05d1c40
KW
1707 struct smu_umd_pstate_table *pstate_table =
1708 &smu->pstate_table;
1709 struct amdgpu_device *adev = smu->adev;
1710 uint32_t sclk_min = 0, sclk_max = 0;
1711 uint32_t mclk_min = 0, mclk_max = 0;
1712 uint32_t socclk_min = 0, socclk_max = 0;
276c03a0
EQ
1713 uint32_t vclk_min = 0, vclk_max = 0;
1714 uint32_t dclk_min = 0, dclk_max = 0;
1715 uint32_t fclk_min = 0, fclk_max = 0;
1716 int ret = 0, i;
c05d1c40
KW
1717
1718 switch (level) {
1719 case AMD_DPM_FORCED_LEVEL_HIGH:
1720 sclk_min = sclk_max = gfx_table->max;
1721 mclk_min = mclk_max = mem_table->max;
1722 socclk_min = socclk_max = soc_table->max;
276c03a0
EQ
1723 vclk_min = vclk_max = vclk_table->max;
1724 dclk_min = dclk_max = dclk_table->max;
1725 fclk_min = fclk_max = fclk_table->max;
c05d1c40
KW
1726 break;
1727 case AMD_DPM_FORCED_LEVEL_LOW:
1728 sclk_min = sclk_max = gfx_table->min;
1729 mclk_min = mclk_max = mem_table->min;
1730 socclk_min = socclk_max = soc_table->min;
276c03a0
EQ
1731 vclk_min = vclk_max = vclk_table->min;
1732 dclk_min = dclk_max = dclk_table->min;
1733 fclk_min = fclk_max = fclk_table->min;
c05d1c40
KW
1734 break;
1735 case AMD_DPM_FORCED_LEVEL_AUTO:
1736 sclk_min = gfx_table->min;
1737 sclk_max = gfx_table->max;
1738 mclk_min = mem_table->min;
1739 mclk_max = mem_table->max;
1740 socclk_min = soc_table->min;
1741 socclk_max = soc_table->max;
276c03a0
EQ
1742 vclk_min = vclk_table->min;
1743 vclk_max = vclk_table->max;
1744 dclk_min = dclk_table->min;
1745 dclk_max = dclk_table->max;
1746 fclk_min = fclk_table->min;
1747 fclk_max = fclk_table->max;
c05d1c40
KW
1748 break;
1749 case AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD:
1750 sclk_min = sclk_max = pstate_table->gfxclk_pstate.standard;
1751 mclk_min = mclk_max = pstate_table->uclk_pstate.standard;
1752 socclk_min = socclk_max = pstate_table->socclk_pstate.standard;
276c03a0
EQ
1753 vclk_min = vclk_max = pstate_table->vclk_pstate.standard;
1754 dclk_min = dclk_max = pstate_table->dclk_pstate.standard;
1755 fclk_min = fclk_max = pstate_table->fclk_pstate.standard;
c05d1c40
KW
1756 break;
1757 case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK:
1758 sclk_min = sclk_max = pstate_table->gfxclk_pstate.min;
1759 break;
1760 case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK:
1761 mclk_min = mclk_max = pstate_table->uclk_pstate.min;
1762 break;
1763 case AMD_DPM_FORCED_LEVEL_PROFILE_PEAK:
1764 sclk_min = sclk_max = pstate_table->gfxclk_pstate.peak;
1765 mclk_min = mclk_max = pstate_table->uclk_pstate.peak;
1766 socclk_min = socclk_max = pstate_table->socclk_pstate.peak;
276c03a0
EQ
1767 vclk_min = vclk_max = pstate_table->vclk_pstate.peak;
1768 dclk_min = dclk_max = pstate_table->dclk_pstate.peak;
1769 fclk_min = fclk_max = pstate_table->fclk_pstate.peak;
c05d1c40
KW
1770 break;
1771 case AMD_DPM_FORCED_LEVEL_MANUAL:
1772 case AMD_DPM_FORCED_LEVEL_PROFILE_EXIT:
1773 return 0;
1774 default:
1775 dev_err(adev->dev, "Invalid performance level %d\n", level);
1776 return -EINVAL;
1777 }
1778
276c03a0
EQ
1779 /*
1780 * Unset those settings for SMU 13.0.2. As soft limits settings
1781 * for those clock domains are not supported.
1782 */
1783 if (smu->adev->ip_versions[MP1_HWIP][0] == IP_VERSION(13, 0, 2)) {
1784 mclk_min = mclk_max = 0;
1785 socclk_min = socclk_max = 0;
1786 vclk_min = vclk_max = 0;
1787 dclk_min = dclk_max = 0;
1788 fclk_min = fclk_max = 0;
1789 }
c05d1c40
KW
1790
1791 if (sclk_min && sclk_max) {
1792 ret = smu_v13_0_set_soft_freq_limited_range(smu,
1793 SMU_GFXCLK,
1794 sclk_min,
1795 sclk_max);
1796 if (ret)
1797 return ret;
e943dd88
LL
1798
1799 pstate_table->gfxclk_pstate.curr.min = sclk_min;
1800 pstate_table->gfxclk_pstate.curr.max = sclk_max;
c05d1c40
KW
1801 }
1802
1803 if (mclk_min && mclk_max) {
1804 ret = smu_v13_0_set_soft_freq_limited_range(smu,
1805 SMU_MCLK,
1806 mclk_min,
1807 mclk_max);
1808 if (ret)
1809 return ret;
e943dd88
LL
1810
1811 pstate_table->uclk_pstate.curr.min = mclk_min;
1812 pstate_table->uclk_pstate.curr.max = mclk_max;
c05d1c40
KW
1813 }
1814
1815 if (socclk_min && socclk_max) {
1816 ret = smu_v13_0_set_soft_freq_limited_range(smu,
1817 SMU_SOCCLK,
1818 socclk_min,
1819 socclk_max);
1820 if (ret)
1821 return ret;
e943dd88
LL
1822
1823 pstate_table->socclk_pstate.curr.min = socclk_min;
1824 pstate_table->socclk_pstate.curr.max = socclk_max;
c05d1c40
KW
1825 }
1826
276c03a0
EQ
1827 if (vclk_min && vclk_max) {
1828 for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
1829 if (adev->vcn.harvest_config & (1 << i))
1830 continue;
1831 ret = smu_v13_0_set_soft_freq_limited_range(smu,
1832 i ? SMU_VCLK1 : SMU_VCLK,
1833 vclk_min,
1834 vclk_max);
1835 if (ret)
1836 return ret;
1837 }
1838 pstate_table->vclk_pstate.curr.min = vclk_min;
1839 pstate_table->vclk_pstate.curr.max = vclk_max;
1840 }
1841
1842 if (dclk_min && dclk_max) {
1843 for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
1844 if (adev->vcn.harvest_config & (1 << i))
1845 continue;
1846 ret = smu_v13_0_set_soft_freq_limited_range(smu,
1847 i ? SMU_DCLK1 : SMU_DCLK,
1848 dclk_min,
1849 dclk_max);
1850 if (ret)
1851 return ret;
1852 }
1853 pstate_table->dclk_pstate.curr.min = dclk_min;
1854 pstate_table->dclk_pstate.curr.max = dclk_max;
1855 }
1856
1857 if (fclk_min && fclk_max) {
1858 ret = smu_v13_0_set_soft_freq_limited_range(smu,
1859 SMU_FCLK,
1860 fclk_min,
1861 fclk_max);
1862 if (ret)
1863 return ret;
1864
1865 pstate_table->fclk_pstate.curr.min = fclk_min;
1866 pstate_table->fclk_pstate.curr.max = fclk_max;
1867 }
1868
c05d1c40
KW
1869 return ret;
1870}
1871
1872int smu_v13_0_set_power_source(struct smu_context *smu,
1873 enum smu_power_src_type power_src)
1874{
1875 int pwr_source;
1876
1877 pwr_source = smu_cmn_to_asic_specific_index(smu,
1878 CMN2ASIC_MAPPING_PWR,
1879 (uint32_t)power_src);
1880 if (pwr_source < 0)
1881 return -EINVAL;
1882
1883 return smu_cmn_send_smc_msg_with_param(smu,
1884 SMU_MSG_NotifyPowerSource,
1885 pwr_source,
1886 NULL);
1887}
1888
511a9555
LL
1889int smu_v13_0_get_dpm_freq_by_index(struct smu_context *smu,
1890 enum smu_clk_type clk_type, uint16_t level,
1891 uint32_t *value)
c05d1c40
KW
1892{
1893 int ret = 0, clk_id = 0;
1894 uint32_t param;
1895
1896 if (!value)
1897 return -EINVAL;
1898
1899 if (!smu_cmn_clk_dpm_is_enabled(smu, clk_type))
1900 return 0;
1901
1902 clk_id = smu_cmn_to_asic_specific_index(smu,
1903 CMN2ASIC_MAPPING_CLK,
1904 clk_type);
1905 if (clk_id < 0)
1906 return clk_id;
1907
1908 param = (uint32_t)(((clk_id & 0xffff) << 16) | (level & 0xffff));
1909
1910 ret = smu_cmn_send_smc_msg_with_param(smu,
1911 SMU_MSG_GetDpmFreqByIndex,
1912 param,
1913 value);
1914 if (ret)
1915 return ret;
1916
c05d1c40
KW
1917 *value = *value & 0x7fffffff;
1918
1919 return ret;
1920}
1921
276c03a0
EQ
1922static int smu_v13_0_get_dpm_level_count(struct smu_context *smu,
1923 enum smu_clk_type clk_type,
1924 uint32_t *value)
c05d1c40 1925{
f41f8e08
LL
1926 int ret;
1927
1928 ret = smu_v13_0_get_dpm_freq_by_index(smu, clk_type, 0xff, value);
2913b567
LG
1929 /* SMU v13.0.2 FW returns 0 based max level, increment by one for it */
1930 if((smu->adev->ip_versions[MP1_HWIP][0] == IP_VERSION(13, 0, 2)) && (!ret && value))
f41f8e08
LL
1931 ++(*value);
1932
1933 return ret;
c05d1c40
KW
1934}
1935
276c03a0
EQ
1936static int smu_v13_0_get_fine_grained_status(struct smu_context *smu,
1937 enum smu_clk_type clk_type,
1938 bool *is_fine_grained_dpm)
1939{
1940 int ret = 0, clk_id = 0;
1941 uint32_t param;
1942 uint32_t value;
1943
1944 if (!is_fine_grained_dpm)
1945 return -EINVAL;
1946
1947 if (!smu_cmn_clk_dpm_is_enabled(smu, clk_type))
1948 return 0;
1949
1950 clk_id = smu_cmn_to_asic_specific_index(smu,
1951 CMN2ASIC_MAPPING_CLK,
1952 clk_type);
1953 if (clk_id < 0)
1954 return clk_id;
1955
1956 param = (uint32_t)(((clk_id & 0xffff) << 16) | 0xff);
1957
1958 ret = smu_cmn_send_smc_msg_with_param(smu,
1959 SMU_MSG_GetDpmFreqByIndex,
1960 param,
1961 &value);
1962 if (ret)
1963 return ret;
1964
1965 /*
1966 * BIT31: 1 - Fine grained DPM, 0 - Dicrete DPM
1967 * now, we un-support it
1968 */
1969 *is_fine_grained_dpm = value & 0x80000000;
1970
1971 return 0;
1972}
1973
c05d1c40
KW
1974int smu_v13_0_set_single_dpm_table(struct smu_context *smu,
1975 enum smu_clk_type clk_type,
1976 struct smu_13_0_dpm_table *single_dpm_table)
1977{
1978 int ret = 0;
1979 uint32_t clk;
1980 int i;
1981
1982 ret = smu_v13_0_get_dpm_level_count(smu,
1983 clk_type,
1984 &single_dpm_table->count);
1985 if (ret) {
1986 dev_err(smu->adev->dev, "[%s] failed to get dpm levels!\n", __func__);
1987 return ret;
1988 }
1989
276c03a0
EQ
1990 if (smu->adev->ip_versions[MP1_HWIP][0] != IP_VERSION(13, 0, 2)) {
1991 ret = smu_v13_0_get_fine_grained_status(smu,
1992 clk_type,
1993 &single_dpm_table->is_fine_grained);
1994 if (ret) {
1995 dev_err(smu->adev->dev, "[%s] failed to get fine grained status!\n", __func__);
1996 return ret;
1997 }
1998 }
1999
c05d1c40
KW
2000 for (i = 0; i < single_dpm_table->count; i++) {
2001 ret = smu_v13_0_get_dpm_freq_by_index(smu,
2002 clk_type,
2003 i,
2004 &clk);
2005 if (ret) {
2006 dev_err(smu->adev->dev, "[%s] failed to get dpm freq by index!\n", __func__);
2007 return ret;
2008 }
2009
2010 single_dpm_table->dpm_levels[i].value = clk;
2011 single_dpm_table->dpm_levels[i].enabled = true;
2012
2013 if (i == 0)
2014 single_dpm_table->min = clk;
2015 else if (i == single_dpm_table->count - 1)
2016 single_dpm_table->max = clk;
2017 }
2018
2019 return 0;
2020}
2021
c05d1c40
KW
2022int smu_v13_0_get_current_pcie_link_width_level(struct smu_context *smu)
2023{
2024 struct amdgpu_device *adev = smu->adev;
2025
2026 return (RREG32_PCIE(smnPCIE_LC_LINK_WIDTH_CNTL) &
2027 PCIE_LC_LINK_WIDTH_CNTL__LC_LINK_WIDTH_RD_MASK)
2028 >> PCIE_LC_LINK_WIDTH_CNTL__LC_LINK_WIDTH_RD__SHIFT;
2029}
2030
2031int smu_v13_0_get_current_pcie_link_width(struct smu_context *smu)
2032{
2033 uint32_t width_level;
2034
2035 width_level = smu_v13_0_get_current_pcie_link_width_level(smu);
2036 if (width_level > LINK_WIDTH_MAX)
2037 width_level = 0;
2038
2039 return link_width[width_level];
2040}
2041
2042int smu_v13_0_get_current_pcie_link_speed_level(struct smu_context *smu)
2043{
2044 struct amdgpu_device *adev = smu->adev;
2045
2046 return (RREG32_PCIE(smnPCIE_LC_SPEED_CNTL) &
2047 PCIE_LC_SPEED_CNTL__LC_CURRENT_DATA_RATE_MASK)
2048 >> PCIE_LC_SPEED_CNTL__LC_CURRENT_DATA_RATE__SHIFT;
2049}
2050
2051int smu_v13_0_get_current_pcie_link_speed(struct smu_context *smu)
2052{
2053 uint32_t speed_level;
2054
2055 speed_level = smu_v13_0_get_current_pcie_link_speed_level(smu);
2056 if (speed_level > LINK_SPEED_MAX)
2057 speed_level = 0;
2058
2059 return link_speed[speed_level];
2060}
2061
276c03a0
EQ
2062int smu_v13_0_set_vcn_enable(struct smu_context *smu,
2063 bool enable)
2064{
2065 struct amdgpu_device *adev = smu->adev;
2066 int i, ret = 0;
2067
2068 for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
2069 if (adev->vcn.harvest_config & (1 << i))
2070 continue;
2071
2072 ret = smu_cmn_send_smc_msg_with_param(smu, enable ?
2073 SMU_MSG_PowerUpVcn : SMU_MSG_PowerDownVcn,
2074 i << 16U, NULL);
2075 if (ret)
2076 return ret;
2077 }
2078
2079 return ret;
2080}
2081
2082int smu_v13_0_set_jpeg_enable(struct smu_context *smu,
2083 bool enable)
2084{
2085 return smu_cmn_send_smc_msg_with_param(smu, enable ?
2086 SMU_MSG_PowerUpJpeg : SMU_MSG_PowerDownJpeg,
2087 0, NULL);
2088}
93661c1d
EQ
2089
2090int smu_v13_0_run_btc(struct smu_context *smu)
2091{
2092 int res;
2093
2094 res = smu_cmn_send_smc_msg(smu, SMU_MSG_RunDcBtc, NULL);
2095 if (res)
2096 dev_err(smu->adev->dev, "RunDcBtc failed!\n");
2097
2098 return res;
2099}
a5ffbfa0 2100
1794f6a9
EQ
2101int smu_v13_0_gpo_control(struct smu_context *smu,
2102 bool enablement)
2103{
2104 int res;
2105
2106 res = smu_cmn_send_smc_msg_with_param(smu,
2107 SMU_MSG_AllowGpo,
2108 enablement ? 1 : 0,
2109 NULL);
2110 if (res)
2111 dev_err(smu->adev->dev, "SetGpoAllow %d failed!\n", enablement);
2112
2113 return res;
2114}
2115
a5ffbfa0
EQ
2116int smu_v13_0_deep_sleep_control(struct smu_context *smu,
2117 bool enablement)
2118{
2119 struct amdgpu_device *adev = smu->adev;
2120 int ret = 0;
2121
2122 if (smu_cmn_feature_is_supported(smu, SMU_FEATURE_DS_GFXCLK_BIT)) {
2123 ret = smu_cmn_feature_set_enabled(smu, SMU_FEATURE_DS_GFXCLK_BIT, enablement);
2124 if (ret) {
2125 dev_err(adev->dev, "Failed to %s GFXCLK DS!\n", enablement ? "enable" : "disable");
2126 return ret;
2127 }
2128 }
2129
2130 if (smu_cmn_feature_is_supported(smu, SMU_FEATURE_DS_UCLK_BIT)) {
2131 ret = smu_cmn_feature_set_enabled(smu, SMU_FEATURE_DS_UCLK_BIT, enablement);
2132 if (ret) {
2133 dev_err(adev->dev, "Failed to %s UCLK DS!\n", enablement ? "enable" : "disable");
2134 return ret;
2135 }
2136 }
2137
2138 if (smu_cmn_feature_is_supported(smu, SMU_FEATURE_DS_FCLK_BIT)) {
2139 ret = smu_cmn_feature_set_enabled(smu, SMU_FEATURE_DS_FCLK_BIT, enablement);
2140 if (ret) {
2141 dev_err(adev->dev, "Failed to %s FCLK DS!\n", enablement ? "enable" : "disable");
2142 return ret;
2143 }
2144 }
2145
2146 if (smu_cmn_feature_is_supported(smu, SMU_FEATURE_DS_SOCCLK_BIT)) {
2147 ret = smu_cmn_feature_set_enabled(smu, SMU_FEATURE_DS_SOCCLK_BIT, enablement);
2148 if (ret) {
2149 dev_err(adev->dev, "Failed to %s SOCCLK DS!\n", enablement ? "enable" : "disable");
2150 return ret;
2151 }
2152 }
2153
2154 if (smu_cmn_feature_is_supported(smu, SMU_FEATURE_DS_LCLK_BIT)) {
2155 ret = smu_cmn_feature_set_enabled(smu, SMU_FEATURE_DS_LCLK_BIT, enablement);
2156 if (ret) {
2157 dev_err(adev->dev, "Failed to %s LCLK DS!\n", enablement ? "enable" : "disable");
2158 return ret;
2159 }
2160 }
2161
2162 if (smu_cmn_feature_is_supported(smu, SMU_FEATURE_DS_VCN_BIT)) {
2163 ret = smu_cmn_feature_set_enabled(smu, SMU_FEATURE_DS_VCN_BIT, enablement);
2164 if (ret) {
2165 dev_err(adev->dev, "Failed to %s VCN DS!\n", enablement ? "enable" : "disable");
2166 return ret;
2167 }
2168 }
2169
2170 if (smu_cmn_feature_is_supported(smu, SMU_FEATURE_DS_MP0CLK_BIT)) {
2171 ret = smu_cmn_feature_set_enabled(smu, SMU_FEATURE_DS_MP0CLK_BIT, enablement);
2172 if (ret) {
2173 dev_err(adev->dev, "Failed to %s MP0/MPIOCLK DS!\n", enablement ? "enable" : "disable");
2174 return ret;
2175 }
2176 }
2177
2178 if (smu_cmn_feature_is_supported(smu, SMU_FEATURE_DS_MP1CLK_BIT)) {
2179 ret = smu_cmn_feature_set_enabled(smu, SMU_FEATURE_DS_MP1CLK_BIT, enablement);
2180 if (ret) {
2181 dev_err(adev->dev, "Failed to %s MP1CLK DS!\n", enablement ? "enable" : "disable");
2182 return ret;
2183 }
2184 }
2185
2186 return ret;
2187}
914b3087
EQ
2188
2189int smu_v13_0_gfx_ulv_control(struct smu_context *smu,
2190 bool enablement)
2191{
2192 int ret = 0;
2193
2194 if (smu_cmn_feature_is_supported(smu, SMU_FEATURE_GFX_ULV_BIT))
2195 ret = smu_cmn_feature_set_enabled(smu, SMU_FEATURE_GFX_ULV_BIT, enablement);
2196
2197 return ret;
2198}
7c1fa0bf 2199
8ae5a38c
EQ
2200int smu_v13_0_baco_set_armd3_sequence(struct smu_context *smu,
2201 enum smu_baco_seq baco_seq)
2202{
edddc6fd
EQ
2203 struct smu_baco_context *smu_baco = &smu->smu_baco;
2204 int ret;
2205
2206 ret = smu_cmn_send_smc_msg_with_param(smu,
2207 SMU_MSG_ArmD3,
2208 baco_seq,
2209 NULL);
2210 if (ret)
2211 return ret;
2212
2213 if (baco_seq == BACO_SEQ_BAMACO ||
2214 baco_seq == BACO_SEQ_BACO)
2215 smu_baco->state = SMU_BACO_STATE_ENTER;
2216 else
2217 smu_baco->state = SMU_BACO_STATE_EXIT;
2218
2219 return 0;
8ae5a38c
EQ
2220}
2221
7c1fa0bf
EQ
2222bool smu_v13_0_baco_is_support(struct smu_context *smu)
2223{
2224 struct smu_baco_context *smu_baco = &smu->smu_baco;
2225
2226 if (amdgpu_sriov_vf(smu->adev) ||
2227 !smu_baco->platform_support)
2228 return false;
2229
45bf7971
GC
2230 /* return true if ASIC is in BACO state already */
2231 if (smu_v13_0_baco_get_state(smu) == SMU_BACO_STATE_ENTER)
2232 return true;
2233
7c1fa0bf
EQ
2234 if (smu_cmn_feature_is_supported(smu, SMU_FEATURE_BACO_BIT) &&
2235 !smu_cmn_feature_is_enabled(smu, SMU_FEATURE_BACO_BIT))
2236 return false;
2237
2238 return true;
2239}
2240
2241enum smu_baco_state smu_v13_0_baco_get_state(struct smu_context *smu)
2242{
2243 struct smu_baco_context *smu_baco = &smu->smu_baco;
2244
2245 return smu_baco->state;
2246}
2247
2248int smu_v13_0_baco_set_state(struct smu_context *smu,
2249 enum smu_baco_state state)
2250{
2251 struct smu_baco_context *smu_baco = &smu->smu_baco;
2252 struct amdgpu_device *adev = smu->adev;
2253 int ret = 0;
2254
2255 if (smu_v13_0_baco_get_state(smu) == state)
2256 return 0;
2257
2258 if (state == SMU_BACO_STATE_ENTER) {
2259 ret = smu_cmn_send_smc_msg_with_param(smu,
2260 SMU_MSG_EnterBaco,
272308ad
EQ
2261 smu_baco->maco_support ?
2262 BACO_SEQ_BAMACO : BACO_SEQ_BACO,
7c1fa0bf
EQ
2263 NULL);
2264 } else {
2265 ret = smu_cmn_send_smc_msg(smu,
2266 SMU_MSG_ExitBaco,
2267 NULL);
2268 if (ret)
2269 return ret;
2270
2271 /* clear vbios scratch 6 and 7 for coming asic reinit */
2272 WREG32(adev->bios_scratch_reg_offset + 6, 0);
2273 WREG32(adev->bios_scratch_reg_offset + 7, 0);
2274 }
2275
2276 if (!ret)
2277 smu_baco->state = state;
2278
2279 return ret;
2280}
2281
2282int smu_v13_0_baco_enter(struct smu_context *smu)
2283{
2284 int ret = 0;
2285
2286 ret = smu_v13_0_baco_set_state(smu,
2287 SMU_BACO_STATE_ENTER);
2288 if (ret)
2289 return ret;
2290
2291 msleep(10);
2292
2293 return ret;
2294}
2295
2296int smu_v13_0_baco_exit(struct smu_context *smu)
2297{
2298 return smu_v13_0_baco_set_state(smu,
2299 SMU_BACO_STATE_EXIT);
2300}
a0219175 2301
7101ab97
HR
2302int smu_v13_0_set_gfx_power_up_by_imu(struct smu_context *smu)
2303{
2304 uint16_t index;
2305
2306 index = smu_cmn_to_asic_specific_index(smu, CMN2ASIC_MAPPING_MSG,
2307 SMU_MSG_EnableGfxImu);
5a0918b4
TH
2308 /* Param 1 to tell PMFW to enable GFXOFF feature */
2309 return smu_cmn_send_msg_without_waiting(smu, index, 1);
7101ab97
HR
2310}
2311
a0219175
TH
2312int smu_v13_0_od_edit_dpm_table(struct smu_context *smu,
2313 enum PP_OD_DPM_TABLE_COMMAND type,
2314 long input[], uint32_t size)
2315{
2316 struct smu_dpm_context *smu_dpm = &(smu->smu_dpm);
2317 int ret = 0;
2318
2319 /* Only allowed in manual mode */
2320 if (smu_dpm->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL)
2321 return -EINVAL;
2322
2323 switch (type) {
2324 case PP_OD_EDIT_SCLK_VDDC_TABLE:
2325 if (size != 2) {
2326 dev_err(smu->adev->dev, "Input parameter number not correct\n");
2327 return -EINVAL;
2328 }
2329
2330 if (input[0] == 0) {
2331 if (input[1] < smu->gfx_default_hard_min_freq) {
2332 dev_warn(smu->adev->dev,
2333 "Fine grain setting minimum sclk (%ld) MHz is less than the minimum allowed (%d) MHz\n",
2334 input[1], smu->gfx_default_hard_min_freq);
2335 return -EINVAL;
2336 }
2337 smu->gfx_actual_hard_min_freq = input[1];
2338 } else if (input[0] == 1) {
2339 if (input[1] > smu->gfx_default_soft_max_freq) {
2340 dev_warn(smu->adev->dev,
2341 "Fine grain setting maximum sclk (%ld) MHz is greater than the maximum allowed (%d) MHz\n",
2342 input[1], smu->gfx_default_soft_max_freq);
2343 return -EINVAL;
2344 }
2345 smu->gfx_actual_soft_max_freq = input[1];
2346 } else {
2347 return -EINVAL;
2348 }
2349 break;
2350 case PP_OD_RESTORE_DEFAULT_TABLE:
2351 if (size != 0) {
2352 dev_err(smu->adev->dev, "Input parameter number not correct\n");
2353 return -EINVAL;
2354 }
2355 smu->gfx_actual_hard_min_freq = smu->gfx_default_hard_min_freq;
2356 smu->gfx_actual_soft_max_freq = smu->gfx_default_soft_max_freq;
2357 break;
2358 case PP_OD_COMMIT_DPM_TABLE:
2359 if (size != 0) {
2360 dev_err(smu->adev->dev, "Input parameter number not correct\n");
2361 return -EINVAL;
2362 }
2363 if (smu->gfx_actual_hard_min_freq > smu->gfx_actual_soft_max_freq) {
2364 dev_err(smu->adev->dev,
2365 "The setting minimum sclk (%d) MHz is greater than the setting maximum sclk (%d) MHz\n",
2366 smu->gfx_actual_hard_min_freq,
2367 smu->gfx_actual_soft_max_freq);
2368 return -EINVAL;
2369 }
2370
2371 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinGfxClk,
2372 smu->gfx_actual_hard_min_freq,
2373 NULL);
2374 if (ret) {
2375 dev_err(smu->adev->dev, "Set hard min sclk failed!");
2376 return ret;
2377 }
2378
2379 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxGfxClk,
2380 smu->gfx_actual_soft_max_freq,
2381 NULL);
2382 if (ret) {
2383 dev_err(smu->adev->dev, "Set soft max sclk failed!");
2384 return ret;
2385 }
2386 break;
2387 default:
2388 return -ENOSYS;
2389 }
2390
2391 return ret;
2392}
2393
2394int smu_v13_0_set_default_dpm_tables(struct smu_context *smu)
2395{
2396 struct smu_table_context *smu_table = &smu->smu_table;
2397
2398 return smu_cmn_update_table(smu, SMU_TABLE_DPMCLOCKS, 0,
2399 smu_table->clocks_table, false);
2400}
da1db031
AD
2401
2402void smu_v13_0_set_smu_mailbox_registers(struct smu_context *smu)
2403{
2404 struct amdgpu_device *adev = smu->adev;
2405
2406 smu->param_reg = SOC15_REG_OFFSET(MP1, 0, mmMP1_SMN_C2PMSG_82);
2407 smu->msg_reg = SOC15_REG_OFFSET(MP1, 0, mmMP1_SMN_C2PMSG_66);
2408 smu->resp_reg = SOC15_REG_OFFSET(MP1, 0, mmMP1_SMN_C2PMSG_90);
2409}
d7053e63
EQ
2410
2411int smu_v13_0_mode1_reset(struct smu_context *smu)
2412{
2413 int ret = 0;
2414
2415 ret = smu_cmn_send_smc_msg(smu, SMU_MSG_Mode1Reset, NULL);
2416 if (!ret)
2417 msleep(SMU13_MODE1_RESET_WAIT_TIME_IN_MS);
2418
2419 return ret;
2420}