Commit | Line | Data |
---|---|---|
07845526 HR |
1 | /* |
2 | * Copyright 2019 Advanced Micro Devices, Inc. | |
3 | * | |
4 | * Permission is hereby granted, free of charge, to any person obtaining a | |
5 | * copy of this software and associated documentation files (the "Software"), | |
6 | * to deal in the Software without restriction, including without limitation | |
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | |
8 | * and/or sell copies of the Software, and to permit persons to whom the | |
9 | * Software is furnished to do so, subject to the following conditions: | |
10 | * | |
11 | * The above copyright notice and this permission notice shall be included in | |
12 | * all copies or substantial portions of the Software. | |
13 | * | |
14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |
15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |
16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | |
17 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR | |
18 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | |
19 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | |
20 | * OTHER DEALINGS IN THE SOFTWARE. | |
21 | */ | |
22 | ||
07845526 | 23 | #include <linux/firmware.h> |
841d0023 | 24 | #include <linux/module.h> |
d7929c1e | 25 | #include <linux/pci.h> |
94952205 | 26 | #include <linux/reboot.h> |
841d0023 | 27 | |
73abde4d MC |
28 | #define SMU_11_0_PARTIAL_PPTABLE |
29 | ||
07845526 HR |
30 | #include "amdgpu.h" |
31 | #include "amdgpu_smu.h" | |
18c1d3ce | 32 | #include "smu_internal.h" |
eaf02a4d | 33 | #include "atomfirmware.h" |
244f3449 | 34 | #include "amdgpu_atomfirmware.h" |
e11c4fd5 | 35 | #include "smu_v11_0.h" |
b0b4b413 | 36 | #include "soc15_common.h" |
08115f87 | 37 | #include "atom.h" |
372120f0 | 38 | #include "amd_pcie.h" |
32cc3bf0 | 39 | #include "amdgpu_ras.h" |
b0b4b413 KW |
40 | |
41 | #include "asic_reg/thm/thm_11_0_2_offset.h" | |
42 | #include "asic_reg/thm/thm_11_0_2_sh_mask.h" | |
980e04ec HR |
43 | #include "asic_reg/mp/mp_11_0_offset.h" |
44 | #include "asic_reg/mp/mp_11_0_sh_mask.h" | |
980e04ec HR |
45 | #include "asic_reg/smuio/smuio_11_0_0_offset.h" |
46 | #include "asic_reg/smuio/smuio_11_0_0_sh_mask.h" | |
07845526 | 47 | |
55084d7f EQ |
48 | /* |
49 | * DO NOT use these for err/warn/info/debug messages. | |
50 | * Use dev_err, dev_warn, dev_info and dev_dbg instead. | |
51 | * They are more MGPU friendly. | |
52 | */ | |
53 | #undef pr_err | |
54 | #undef pr_warn | |
55 | #undef pr_info | |
56 | #undef pr_debug | |
57 | ||
e7773c1c | 58 | MODULE_FIRMWARE("amdgpu/arcturus_smc.bin"); |
879af1c6 | 59 | MODULE_FIRMWARE("amdgpu/navi10_smc.bin"); |
b02ff126 | 60 | MODULE_FIRMWARE("amdgpu/navi14_smc.bin"); |
9ea8da75 | 61 | MODULE_FIRMWARE("amdgpu/navi12_smc.bin"); |
b455159c | 62 | MODULE_FIRMWARE("amdgpu/sienna_cichlid_smc.bin"); |
59abab5a | 63 | |
77d1eef4 | 64 | #define SMU11_VOLTAGE_SCALE 4 |
2f613c70 | 65 | |
b0b4b413 KW |
66 | static int smu_v11_0_send_msg_without_waiting(struct smu_context *smu, |
67 | uint16_t msg) | |
68 | { | |
69 | struct amdgpu_device *adev = smu->adev; | |
38748ad8 | 70 | WREG32_SOC15_NO_KIQ(MP1, 0, mmMP1_SMN_C2PMSG_66, msg); |
b0b4b413 KW |
71 | return 0; |
72 | } | |
73 | ||
ae458c7b | 74 | static int smu_v11_0_read_arg(struct smu_context *smu, uint32_t *arg) |
765c50cb KW |
75 | { |
76 | struct amdgpu_device *adev = smu->adev; | |
77 | ||
38748ad8 | 78 | *arg = RREG32_SOC15_NO_KIQ(MP1, 0, mmMP1_SMN_C2PMSG_82); |
765c50cb KW |
79 | return 0; |
80 | } | |
81 | ||
b0b4b413 KW |
82 | static int smu_v11_0_wait_for_response(struct smu_context *smu) |
83 | { | |
84 | struct amdgpu_device *adev = smu->adev; | |
e3000669 | 85 | uint32_t cur_value, i, timeout = adev->usec_timeout * 10; |
b0b4b413 | 86 | |
e3000669 | 87 | for (i = 0; i < timeout; i++) { |
38748ad8 | 88 | cur_value = RREG32_SOC15_NO_KIQ(MP1, 0, mmMP1_SMN_C2PMSG_90); |
b0b4b413 | 89 | if ((cur_value & MP1_C2PMSG_90__CONTENT_MASK) != 0) |
fcb1fe9c EQ |
90 | return cur_value == 0x1 ? 0 : -EIO; |
91 | ||
b0b4b413 KW |
92 | udelay(1); |
93 | } | |
94 | ||
95 | /* timeout means wrong logic */ | |
38748ad8 ML |
96 | if (i == timeout) |
97 | return -ETIME; | |
98 | ||
99 | return RREG32_SOC15_NO_KIQ(MP1, 0, mmMP1_SMN_C2PMSG_90) == 0x1 ? 0 : -EIO; | |
b0b4b413 KW |
100 | } |
101 | ||
6c45e480 | 102 | int |
f275cde7 LG |
103 | smu_v11_0_send_msg_with_param(struct smu_context *smu, |
104 | enum smu_message_type msg, | |
1c58267c MC |
105 | uint32_t param, |
106 | uint32_t *read_arg) | |
b0b4b413 | 107 | { |
b0b4b413 | 108 | struct amdgpu_device *adev = smu->adev; |
5c45103f KW |
109 | int ret = 0, index = 0; |
110 | ||
111 | index = smu_msg_get_index(smu, msg); | |
112 | if (index < 0) | |
4ea5081c | 113 | return index == -EACCES ? 0 : index; |
b0b4b413 | 114 | |
eb696d04 | 115 | mutex_lock(&smu->message_lock); |
b0b4b413 | 116 | ret = smu_v11_0_wait_for_response(smu); |
fcb1fe9c | 117 | if (ret) { |
d9811cfc | 118 | dev_err(adev->dev, "Msg issuing pre-check failed and " |
fcb1fe9c | 119 | "SMU may be not in the right state!\n"); |
eb696d04 | 120 | goto out; |
fcb1fe9c | 121 | } |
b0b4b413 | 122 | |
38748ad8 | 123 | WREG32_SOC15_NO_KIQ(MP1, 0, mmMP1_SMN_C2PMSG_90, 0); |
b0b4b413 | 124 | |
38748ad8 | 125 | WREG32_SOC15_NO_KIQ(MP1, 0, mmMP1_SMN_C2PMSG_82, param); |
b0b4b413 | 126 | |
5c45103f | 127 | smu_v11_0_send_msg_without_waiting(smu, (uint16_t)index); |
b0b4b413 KW |
128 | |
129 | ret = smu_v11_0_wait_for_response(smu); | |
1c58267c | 130 | if (ret) { |
d9811cfc | 131 | dev_err(adev->dev, "failed send message: %10s (%d) \tparam: 0x%08x response %#x\n", |
6b294793 | 132 | smu_get_message_name(smu, msg), index, param, ret); |
eb696d04 | 133 | goto out; |
1c58267c | 134 | } |
38748ad8 | 135 | |
1c58267c MC |
136 | if (read_arg) { |
137 | ret = smu_v11_0_read_arg(smu, read_arg); | |
138 | if (ret) { | |
d9811cfc | 139 | dev_err(adev->dev, "failed to read message arg: %10s (%d) \tparam: 0x%08x response %#x\n", |
1c58267c | 140 | smu_get_message_name(smu, msg), index, param, ret); |
eb696d04 | 141 | goto out; |
1c58267c MC |
142 | } |
143 | } | |
eb696d04 MC |
144 | out: |
145 | mutex_unlock(&smu->message_lock); | |
146 | return ret; | |
b0b4b413 KW |
147 | } |
148 | ||
6c45e480 | 149 | int smu_v11_0_init_microcode(struct smu_context *smu) |
07845526 HR |
150 | { |
151 | struct amdgpu_device *adev = smu->adev; | |
59abab5a LG |
152 | const char *chip_name; |
153 | char fw_name[30]; | |
154 | int err = 0; | |
155 | const struct smc_firmware_header_v1_0 *hdr; | |
156 | const struct common_firmware_header *header; | |
157 | struct amdgpu_firmware_info *ucode = NULL; | |
07845526 | 158 | |
59abab5a | 159 | switch (adev->asic_type) { |
e7773c1c CG |
160 | case CHIP_ARCTURUS: |
161 | chip_name = "arcturus"; | |
162 | break; | |
31528650 HR |
163 | case CHIP_NAVI10: |
164 | chip_name = "navi10"; | |
165 | break; | |
b02ff126 XY |
166 | case CHIP_NAVI14: |
167 | chip_name = "navi14"; | |
168 | break; | |
9ea8da75 XY |
169 | case CHIP_NAVI12: |
170 | chip_name = "navi12"; | |
171 | break; | |
b455159c LG |
172 | case CHIP_SIENNA_CICHLID: |
173 | chip_name = "sienna_cichlid"; | |
174 | break; | |
59abab5a LG |
175 | default: |
176 | BUG(); | |
177 | } | |
178 | ||
179 | snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_smc.bin", chip_name); | |
180 | ||
181 | err = request_firmware(&adev->pm.fw, fw_name, adev->dev); | |
182 | if (err) | |
183 | goto out; | |
184 | err = amdgpu_ucode_validate(adev->pm.fw); | |
185 | if (err) | |
186 | goto out; | |
187 | ||
188 | hdr = (const struct smc_firmware_header_v1_0 *) adev->pm.fw->data; | |
189 | amdgpu_ucode_print_smc_hdr(&hdr->header); | |
190 | adev->pm.fw_version = le32_to_cpu(hdr->header.ucode_version); | |
191 | ||
192 | if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) { | |
193 | ucode = &adev->firmware.ucode[AMDGPU_UCODE_ID_SMC]; | |
194 | ucode->ucode_id = AMDGPU_UCODE_ID_SMC; | |
195 | ucode->fw = adev->pm.fw; | |
196 | header = (const struct common_firmware_header *)ucode->fw->data; | |
197 | adev->firmware.fw_size += | |
198 | ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE); | |
199 | } | |
200 | ||
201 | out: | |
202 | if (err) { | |
203 | DRM_ERROR("smu_v11_0: Failed to load firmware \"%s\"\n", | |
204 | fw_name); | |
205 | release_firmware(adev->pm.fw); | |
206 | adev->pm.fw = NULL; | |
207 | } | |
208 | return err; | |
07845526 HR |
209 | } |
210 | ||
6f47116e EQ |
211 | void smu_v11_0_fini_microcode(struct smu_context *smu) |
212 | { | |
213 | struct amdgpu_device *adev = smu->adev; | |
214 | ||
215 | release_firmware(adev->pm.fw); | |
216 | adev->pm.fw = NULL; | |
217 | adev->pm.fw_version = 0; | |
218 | } | |
219 | ||
6c45e480 | 220 | int smu_v11_0_load_microcode(struct smu_context *smu) |
3d2f5200 | 221 | { |
827440a9 KF |
222 | struct amdgpu_device *adev = smu->adev; |
223 | const uint32_t *src; | |
224 | const struct smc_firmware_header_v1_0 *hdr; | |
225 | uint32_t addr_start = MP1_SRAM; | |
226 | uint32_t i; | |
e8663832 | 227 | uint32_t smc_fw_size; |
827440a9 KF |
228 | uint32_t mp1_fw_flags; |
229 | ||
e7773c1c | 230 | hdr = (const struct smc_firmware_header_v1_0 *) adev->pm.fw->data; |
827440a9 KF |
231 | src = (const uint32_t *)(adev->pm.fw->data + |
232 | le32_to_cpu(hdr->header.ucode_array_offset_bytes)); | |
e8663832 | 233 | smc_fw_size = hdr->header.ucode_size_bytes; |
827440a9 | 234 | |
e8663832 | 235 | for (i = 1; i < smc_fw_size/4 - 1; i++) { |
827440a9 KF |
236 | WREG32_PCIE(addr_start, src[i]); |
237 | addr_start += 4; | |
238 | } | |
239 | ||
240 | WREG32_PCIE(MP1_Public | (smnMP1_PUB_CTRL & 0xffffffff), | |
241 | 1 & MP1_SMN_PUB_CTRL__RESET_MASK); | |
242 | WREG32_PCIE(MP1_Public | (smnMP1_PUB_CTRL & 0xffffffff), | |
243 | 1 & ~MP1_SMN_PUB_CTRL__RESET_MASK); | |
244 | ||
245 | for (i = 0; i < adev->usec_timeout; i++) { | |
246 | mp1_fw_flags = RREG32_PCIE(MP1_Public | | |
247 | (smnMP1_FIRMWARE_FLAGS & 0xffffffff)); | |
248 | if ((mp1_fw_flags & MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED_MASK) >> | |
249 | MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED__SHIFT) | |
250 | break; | |
251 | udelay(1); | |
252 | } | |
253 | ||
254 | if (i == adev->usec_timeout) | |
255 | return -ETIME; | |
256 | ||
3d2f5200 HR |
257 | return 0; |
258 | } | |
259 | ||
6c45e480 | 260 | int smu_v11_0_check_fw_status(struct smu_context *smu) |
e11c4fd5 | 261 | { |
7b0031b6 KW |
262 | struct amdgpu_device *adev = smu->adev; |
263 | uint32_t mp1_fw_flags; | |
264 | ||
a8394cfa HR |
265 | mp1_fw_flags = RREG32_PCIE(MP1_Public | |
266 | (smnMP1_FIRMWARE_FLAGS & 0xffffffff)); | |
7b0031b6 KW |
267 | |
268 | if ((mp1_fw_flags & MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED_MASK) >> | |
269 | MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED__SHIFT) | |
270 | return 0; | |
a8394cfa | 271 | |
7b0031b6 | 272 | return -EIO; |
e11c4fd5 HR |
273 | } |
274 | ||
6c45e480 | 275 | int smu_v11_0_check_fw_version(struct smu_context *smu) |
765c50cb | 276 | { |
4fde03a7 KW |
277 | uint32_t if_version = 0xff, smu_version = 0xff; |
278 | uint16_t smu_major; | |
279 | uint8_t smu_minor, smu_debug; | |
765c50cb KW |
280 | int ret = 0; |
281 | ||
4fde03a7 | 282 | ret = smu_get_smc_version(smu, &if_version, &smu_version); |
765c50cb | 283 | if (ret) |
4fde03a7 | 284 | return ret; |
765c50cb | 285 | |
4fde03a7 KW |
286 | smu_major = (smu_version >> 16) & 0xffff; |
287 | smu_minor = (smu_version >> 8) & 0xff; | |
288 | smu_debug = (smu_version >> 0) & 0xff; | |
289 | ||
1b41b769 | 290 | switch (smu->adev->asic_type) { |
e34640e2 | 291 | case CHIP_ARCTURUS: |
e57761c6 | 292 | smu->smc_driver_if_version = SMU11_DRIVER_IF_VERSION_ARCT; |
e34640e2 | 293 | break; |
1b41b769 | 294 | case CHIP_NAVI10: |
e57761c6 | 295 | smu->smc_driver_if_version = SMU11_DRIVER_IF_VERSION_NV10; |
1b41b769 | 296 | break; |
c1b69212 | 297 | case CHIP_NAVI12: |
e57761c6 | 298 | smu->smc_driver_if_version = SMU11_DRIVER_IF_VERSION_NV12; |
c1b69212 | 299 | break; |
1b41b769 | 300 | case CHIP_NAVI14: |
e57761c6 | 301 | smu->smc_driver_if_version = SMU11_DRIVER_IF_VERSION_NV14; |
1b41b769 | 302 | break; |
b455159c LG |
303 | case CHIP_SIENNA_CICHLID: |
304 | smu->smc_driver_if_version = SMU11_DRIVER_IF_VERSION_Sienna_Cichlid; | |
305 | break; | |
1b41b769 | 306 | default: |
d9811cfc | 307 | dev_err(smu->adev->dev, "smu unsupported asic type:%d.\n", smu->adev->asic_type); |
e57761c6 | 308 | smu->smc_driver_if_version = SMU11_DRIVER_IF_VERSION_INV; |
1b41b769 | 309 | break; |
310 | } | |
311 | ||
93002849 EQ |
312 | /* |
313 | * 1. if_version mismatch is not critical as our fw is designed | |
314 | * to be backward compatible. | |
315 | * 2. New fw usually brings some optimizations. But that's visible | |
316 | * only on the paired driver. | |
317 | * Considering above, we just leave user a warning message instead | |
318 | * of halt driver loading. | |
319 | */ | |
e57761c6 | 320 | if (if_version != smu->smc_driver_if_version) { |
d9811cfc | 321 | dev_info(smu->adev->dev, "smu driver if version = 0x%08x, smu fw if version = 0x%08x, " |
f3121d3d | 322 | "smu fw version = 0x%08x (%d.%d.%d)\n", |
e57761c6 | 323 | smu->smc_driver_if_version, if_version, |
f3121d3d | 324 | smu_version, smu_major, smu_minor, smu_debug); |
d9811cfc | 325 | dev_warn(smu->adev->dev, "SMU driver if version not matched\n"); |
4fde03a7 KW |
326 | } |
327 | ||
765c50cb KW |
328 | return ret; |
329 | } | |
330 | ||
b55c83a7 KW |
331 | static int smu_v11_0_set_pptable_v2_0(struct smu_context *smu, void **table, uint32_t *size) |
332 | { | |
333 | struct amdgpu_device *adev = smu->adev; | |
334 | uint32_t ppt_offset_bytes; | |
335 | const struct smc_firmware_header_v2_0 *v2; | |
336 | ||
337 | v2 = (const struct smc_firmware_header_v2_0 *) adev->pm.fw->data; | |
338 | ||
339 | ppt_offset_bytes = le32_to_cpu(v2->ppt_offset_bytes); | |
340 | *size = le32_to_cpu(v2->ppt_size_bytes); | |
341 | *table = (uint8_t *)v2 + ppt_offset_bytes; | |
342 | ||
343 | return 0; | |
344 | } | |
345 | ||
e7773c1c CG |
346 | static int smu_v11_0_set_pptable_v2_1(struct smu_context *smu, void **table, |
347 | uint32_t *size, uint32_t pptable_id) | |
b55c83a7 KW |
348 | { |
349 | struct amdgpu_device *adev = smu->adev; | |
350 | const struct smc_firmware_header_v2_1 *v2_1; | |
351 | struct smc_soft_pptable_entry *entries; | |
352 | uint32_t pptable_count = 0; | |
353 | int i = 0; | |
354 | ||
355 | v2_1 = (const struct smc_firmware_header_v2_1 *) adev->pm.fw->data; | |
356 | entries = (struct smc_soft_pptable_entry *) | |
357 | ((uint8_t *)v2_1 + le32_to_cpu(v2_1->pptable_entry_offset)); | |
358 | pptable_count = le32_to_cpu(v2_1->pptable_count); | |
359 | for (i = 0; i < pptable_count; i++) { | |
360 | if (le32_to_cpu(entries[i].id) == pptable_id) { | |
361 | *table = ((uint8_t *)v2_1 + le32_to_cpu(entries[i].ppt_offset_bytes)); | |
362 | *size = le32_to_cpu(entries[i].ppt_size_bytes); | |
363 | break; | |
364 | } | |
365 | } | |
366 | ||
367 | if (i == pptable_count) | |
368 | return -EINVAL; | |
369 | ||
370 | return 0; | |
371 | } | |
372 | ||
6c45e480 | 373 | int smu_v11_0_setup_pptable(struct smu_context *smu) |
244f3449 | 374 | { |
b55c83a7 KW |
375 | struct amdgpu_device *adev = smu->adev; |
376 | const struct smc_firmware_header_v1_0 *hdr; | |
244f3449 | 377 | int ret, index; |
c4e1da5e | 378 | uint32_t size = 0; |
ebecc6c4 | 379 | uint16_t atom_table_size; |
244f3449 | 380 | uint8_t frev, crev; |
ce6f7fa8 | 381 | void *table; |
b55c83a7 KW |
382 | uint16_t version_major, version_minor; |
383 | ||
384 | hdr = (const struct smc_firmware_header_v1_0 *) adev->pm.fw->data; | |
385 | version_major = le16_to_cpu(hdr->header.header_version_major); | |
386 | version_minor = le16_to_cpu(hdr->header.header_version_minor); | |
e2c14b2c | 387 | if (version_major == 2 && smu->smu_table.boot_values.pp_table_id > 0) { |
d9811cfc | 388 | dev_info(adev->dev, "use driver provided pptable %d\n", smu->smu_table.boot_values.pp_table_id); |
b55c83a7 KW |
389 | switch (version_minor) { |
390 | case 0: | |
391 | ret = smu_v11_0_set_pptable_v2_0(smu, &table, &size); | |
392 | break; | |
393 | case 1: | |
394 | ret = smu_v11_0_set_pptable_v2_1(smu, &table, &size, | |
395 | smu->smu_table.boot_values.pp_table_id); | |
396 | break; | |
397 | default: | |
398 | ret = -EINVAL; | |
399 | break; | |
400 | } | |
401 | if (ret) | |
402 | return ret; | |
244f3449 | 403 | |
879af1c6 | 404 | } else { |
d9811cfc | 405 | dev_info(adev->dev, "use vbios provided pptable\n"); |
879af1c6 HR |
406 | index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1, |
407 | powerplayinfo); | |
244f3449 | 408 | |
ebecc6c4 | 409 | ret = smu_get_atom_data_table(smu, index, &atom_table_size, &frev, &crev, |
879af1c6 HR |
410 | (uint8_t **)&table); |
411 | if (ret) | |
412 | return ret; | |
ebecc6c4 | 413 | size = atom_table_size; |
879af1c6 | 414 | } |
244f3449 | 415 | |
289921b0 KW |
416 | if (!smu->smu_table.power_play_table) |
417 | smu->smu_table.power_play_table = table; | |
418 | if (!smu->smu_table.power_play_table_size) | |
419 | smu->smu_table.power_play_table_size = size; | |
244f3449 HR |
420 | |
421 | return 0; | |
422 | } | |
423 | ||
142dec62 KW |
424 | static int smu_v11_0_init_dpm_context(struct smu_context *smu) |
425 | { | |
426 | struct smu_dpm_context *smu_dpm = &smu->smu_dpm; | |
427 | ||
428 | if (smu_dpm->dpm_context || smu_dpm->dpm_context_size != 0) | |
429 | return -EINVAL; | |
430 | ||
d76c9e24 | 431 | return smu_alloc_dpm_context(smu); |
142dec62 KW |
432 | } |
433 | ||
434 | static int smu_v11_0_fini_dpm_context(struct smu_context *smu) | |
435 | { | |
436 | struct smu_dpm_context *smu_dpm = &smu->smu_dpm; | |
437 | ||
438 | if (!smu_dpm->dpm_context || smu_dpm->dpm_context_size == 0) | |
439 | return -EINVAL; | |
440 | ||
441 | kfree(smu_dpm->dpm_context); | |
95add959 | 442 | kfree(smu_dpm->golden_dpm_context); |
8554e67d CG |
443 | kfree(smu_dpm->dpm_current_power_state); |
444 | kfree(smu_dpm->dpm_request_power_state); | |
142dec62 | 445 | smu_dpm->dpm_context = NULL; |
95add959 | 446 | smu_dpm->golden_dpm_context = NULL; |
142dec62 | 447 | smu_dpm->dpm_context_size = 0; |
8554e67d CG |
448 | smu_dpm->dpm_current_power_state = NULL; |
449 | smu_dpm->dpm_request_power_state = NULL; | |
142dec62 KW |
450 | |
451 | return 0; | |
452 | } | |
453 | ||
6c45e480 | 454 | int smu_v11_0_init_smc_tables(struct smu_context *smu) |
813ce279 KW |
455 | { |
456 | struct smu_table_context *smu_table = &smu->smu_table; | |
457 | struct smu_table *tables = NULL; | |
142dec62 | 458 | int ret = 0; |
813ce279 | 459 | |
cdb0c632 HR |
460 | tables = kcalloc(SMU_TABLE_COUNT, sizeof(struct smu_table), |
461 | GFP_KERNEL); | |
78eb4a36 EQ |
462 | if (!tables) { |
463 | ret = -ENOMEM; | |
464 | goto err0_out; | |
465 | } | |
813ce279 | 466 | smu_table->tables = tables; |
813ce279 | 467 | |
62b9a88c KW |
468 | ret = smu_tables_init(smu, tables); |
469 | if (ret) | |
78eb4a36 | 470 | goto err1_out; |
813ce279 | 471 | |
142dec62 KW |
472 | ret = smu_v11_0_init_dpm_context(smu); |
473 | if (ret) | |
78eb4a36 EQ |
474 | goto err1_out; |
475 | ||
476 | smu_table->driver_pptable = | |
477 | kzalloc(tables[SMU_TABLE_PPTABLE].size, GFP_KERNEL); | |
478 | if (!smu_table->driver_pptable) { | |
479 | ret = -ENOMEM; | |
480 | goto err2_out; | |
481 | } | |
482 | ||
483 | smu_table->max_sustainable_clocks = | |
484 | kzalloc(sizeof(struct smu_11_0_max_sustainable_clocks), GFP_KERNEL); | |
485 | if (!smu_table->max_sustainable_clocks) { | |
486 | ret = -ENOMEM; | |
487 | goto err3_out; | |
488 | } | |
489 | ||
490 | /* Arcturus does not support OVERDRIVE */ | |
491 | if (tables[SMU_TABLE_OVERDRIVE].size) { | |
492 | smu_table->overdrive_table = | |
493 | kzalloc(tables[SMU_TABLE_OVERDRIVE].size, GFP_KERNEL); | |
494 | if (!smu_table->overdrive_table) { | |
495 | ret = -ENOMEM; | |
496 | goto err4_out; | |
497 | } | |
498 | ||
499 | smu_table->boot_overdrive_table = | |
500 | kzalloc(tables[SMU_TABLE_OVERDRIVE].size, GFP_KERNEL); | |
501 | if (!smu_table->boot_overdrive_table) { | |
502 | ret = -ENOMEM; | |
503 | goto err5_out; | |
504 | } | |
505 | } | |
142dec62 | 506 | |
813ce279 | 507 | return 0; |
78eb4a36 EQ |
508 | |
509 | err5_out: | |
510 | kfree(smu_table->overdrive_table); | |
511 | err4_out: | |
512 | kfree(smu_table->max_sustainable_clocks); | |
513 | err3_out: | |
514 | kfree(smu_table->driver_pptable); | |
515 | err2_out: | |
516 | smu_v11_0_fini_dpm_context(smu); | |
517 | err1_out: | |
518 | kfree(tables); | |
519 | err0_out: | |
520 | return ret; | |
813ce279 KW |
521 | } |
522 | ||
6c45e480 | 523 | int smu_v11_0_fini_smc_tables(struct smu_context *smu) |
813ce279 KW |
524 | { |
525 | struct smu_table_context *smu_table = &smu->smu_table; | |
142dec62 | 526 | int ret = 0; |
813ce279 | 527 | |
871e5e72 | 528 | if (!smu_table->tables) |
813ce279 KW |
529 | return -EINVAL; |
530 | ||
78eb4a36 EQ |
531 | kfree(smu_table->boot_overdrive_table); |
532 | kfree(smu_table->overdrive_table); | |
533 | kfree(smu_table->max_sustainable_clocks); | |
534 | kfree(smu_table->driver_pptable); | |
535 | smu_table->boot_overdrive_table = NULL; | |
536 | smu_table->overdrive_table = NULL; | |
537 | smu_table->max_sustainable_clocks = NULL; | |
538 | smu_table->driver_pptable = NULL; | |
539 | kfree(smu_table->hardcode_pptable); | |
540 | smu_table->hardcode_pptable = NULL; | |
541 | ||
813ce279 | 542 | kfree(smu_table->tables); |
62b9a88c | 543 | kfree(smu_table->metrics_table); |
9fa1ed5b | 544 | kfree(smu_table->watermarks_table); |
813ce279 | 545 | smu_table->tables = NULL; |
62b9a88c | 546 | smu_table->metrics_table = NULL; |
9fa1ed5b | 547 | smu_table->watermarks_table = NULL; |
62b9a88c | 548 | smu_table->metrics_time = 0; |
813ce279 | 549 | |
142dec62 KW |
550 | ret = smu_v11_0_fini_dpm_context(smu); |
551 | if (ret) | |
552 | return ret; | |
813ce279 | 553 | return 0; |
813ce279 | 554 | } |
8bf16963 | 555 | |
6c45e480 | 556 | int smu_v11_0_init_power(struct smu_context *smu) |
8bf16963 KW |
557 | { |
558 | struct smu_power_context *smu_power = &smu->smu_power; | |
559 | ||
560 | if (smu_power->power_context || smu_power->power_context_size != 0) | |
561 | return -EINVAL; | |
562 | ||
563 | smu_power->power_context = kzalloc(sizeof(struct smu_11_0_dpm_context), | |
564 | GFP_KERNEL); | |
565 | if (!smu_power->power_context) | |
566 | return -ENOMEM; | |
567 | smu_power->power_context_size = sizeof(struct smu_11_0_dpm_context); | |
568 | ||
569 | return 0; | |
570 | } | |
571 | ||
6c45e480 | 572 | int smu_v11_0_fini_power(struct smu_context *smu) |
8bf16963 KW |
573 | { |
574 | struct smu_power_context *smu_power = &smu->smu_power; | |
575 | ||
576 | if (!smu_power->power_context || smu_power->power_context_size == 0) | |
577 | return -EINVAL; | |
578 | ||
579 | kfree(smu_power->power_context); | |
580 | smu_power->power_context = NULL; | |
581 | smu_power->power_context_size = 0; | |
582 | ||
583 | return 0; | |
584 | } | |
585 | ||
12ea3449 EQ |
586 | static int smu_v11_0_atom_get_smu_clockinfo(struct amdgpu_device *adev, |
587 | uint8_t clk_id, | |
588 | uint8_t syspll_id, | |
589 | uint32_t *clk_freq) | |
590 | { | |
591 | struct atom_get_smu_clock_info_parameters_v3_1 input = {0}; | |
592 | struct atom_get_smu_clock_info_output_parameters_v3_1 *output; | |
593 | int ret, index; | |
594 | ||
595 | input.clk_id = clk_id; | |
596 | input.syspll_id = syspll_id; | |
597 | input.command = GET_SMU_CLOCK_INFO_V3_1_GET_CLOCK_FREQ; | |
598 | index = get_index_into_master_table(atom_master_list_of_command_functions_v2_1, | |
599 | getsmuclockinfo); | |
600 | ||
601 | ret = amdgpu_atom_execute_table(adev->mode_info.atom_context, index, | |
602 | (uint32_t *)&input); | |
603 | if (ret) | |
604 | return -EINVAL; | |
605 | ||
606 | output = (struct atom_get_smu_clock_info_output_parameters_v3_1 *)&input; | |
607 | *clk_freq = le32_to_cpu(output->atom_smu_outputclkfreq.smu_clock_freq_hz) / 10000; | |
608 | ||
609 | return 0; | |
610 | } | |
611 | ||
846f1a03 HR |
612 | int smu_v11_0_get_vbios_bootup_values(struct smu_context *smu) |
613 | { | |
614 | int ret, index; | |
615 | uint16_t size; | |
616 | uint8_t frev, crev; | |
617 | struct atom_common_table_header *header; | |
618 | struct atom_firmware_info_v3_3 *v_3_3; | |
619 | struct atom_firmware_info_v3_1 *v_3_1; | |
620 | ||
621 | index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1, | |
622 | firmwareinfo); | |
623 | ||
624 | ret = smu_get_atom_data_table(smu, index, &size, &frev, &crev, | |
625 | (uint8_t **)&header); | |
626 | if (ret) | |
627 | return ret; | |
628 | ||
629 | if (header->format_revision != 3) { | |
d9811cfc | 630 | dev_err(smu->adev->dev, "unknown atom_firmware_info version! for smu11\n"); |
846f1a03 HR |
631 | return -EINVAL; |
632 | } | |
633 | ||
634 | switch (header->content_revision) { | |
635 | case 0: | |
636 | case 1: | |
637 | case 2: | |
638 | v_3_1 = (struct atom_firmware_info_v3_1 *)header; | |
639 | smu->smu_table.boot_values.revision = v_3_1->firmware_revision; | |
640 | smu->smu_table.boot_values.gfxclk = v_3_1->bootup_sclk_in10khz; | |
641 | smu->smu_table.boot_values.uclk = v_3_1->bootup_mclk_in10khz; | |
642 | smu->smu_table.boot_values.socclk = 0; | |
643 | smu->smu_table.boot_values.dcefclk = 0; | |
644 | smu->smu_table.boot_values.vddc = v_3_1->bootup_vddc_mv; | |
645 | smu->smu_table.boot_values.vddci = v_3_1->bootup_vddci_mv; | |
646 | smu->smu_table.boot_values.mvddc = v_3_1->bootup_mvddc_mv; | |
647 | smu->smu_table.boot_values.vdd_gfx = v_3_1->bootup_vddgfx_mv; | |
648 | smu->smu_table.boot_values.cooling_id = v_3_1->coolingsolution_id; | |
649 | smu->smu_table.boot_values.pp_table_id = 0; | |
650 | break; | |
651 | case 3: | |
652 | default: | |
653 | v_3_3 = (struct atom_firmware_info_v3_3 *)header; | |
654 | smu->smu_table.boot_values.revision = v_3_3->firmware_revision; | |
655 | smu->smu_table.boot_values.gfxclk = v_3_3->bootup_sclk_in10khz; | |
656 | smu->smu_table.boot_values.uclk = v_3_3->bootup_mclk_in10khz; | |
657 | smu->smu_table.boot_values.socclk = 0; | |
658 | smu->smu_table.boot_values.dcefclk = 0; | |
659 | smu->smu_table.boot_values.vddc = v_3_3->bootup_vddc_mv; | |
660 | smu->smu_table.boot_values.vddci = v_3_3->bootup_vddci_mv; | |
661 | smu->smu_table.boot_values.mvddc = v_3_3->bootup_mvddc_mv; | |
662 | smu->smu_table.boot_values.vdd_gfx = v_3_3->bootup_vddgfx_mv; | |
663 | smu->smu_table.boot_values.cooling_id = v_3_3->coolingsolution_id; | |
664 | smu->smu_table.boot_values.pp_table_id = v_3_3->pplib_pptable_id; | |
665 | } | |
666 | ||
88810f90 EQ |
667 | smu->smu_table.boot_values.format_revision = header->format_revision; |
668 | smu->smu_table.boot_values.content_revision = header->content_revision; | |
669 | ||
12ea3449 EQ |
670 | smu_v11_0_atom_get_smu_clockinfo(smu->adev, |
671 | (uint8_t)SMU11_SYSPLL0_SOCCLK_ID, | |
672 | (uint8_t)0, | |
673 | &smu->smu_table.boot_values.socclk); | |
846f1a03 | 674 | |
12ea3449 EQ |
675 | smu_v11_0_atom_get_smu_clockinfo(smu->adev, |
676 | (uint8_t)SMU11_SYSPLL0_DCEFCLK_ID, | |
677 | (uint8_t)0, | |
678 | &smu->smu_table.boot_values.dcefclk); | |
08115f87 | 679 | |
12ea3449 EQ |
680 | smu_v11_0_atom_get_smu_clockinfo(smu->adev, |
681 | (uint8_t)SMU11_SYSPLL0_ECLK_ID, | |
682 | (uint8_t)0, | |
683 | &smu->smu_table.boot_values.eclk); | |
08115f87 | 684 | |
12ea3449 EQ |
685 | smu_v11_0_atom_get_smu_clockinfo(smu->adev, |
686 | (uint8_t)SMU11_SYSPLL0_VCLK_ID, | |
687 | (uint8_t)0, | |
688 | &smu->smu_table.boot_values.vclk); | |
08115f87 | 689 | |
12ea3449 EQ |
690 | smu_v11_0_atom_get_smu_clockinfo(smu->adev, |
691 | (uint8_t)SMU11_SYSPLL0_DCLK_ID, | |
692 | (uint8_t)0, | |
693 | &smu->smu_table.boot_values.dclk); | |
83e21f57 | 694 | |
88810f90 | 695 | if ((smu->smu_table.boot_values.format_revision == 3) && |
12ea3449 EQ |
696 | (smu->smu_table.boot_values.content_revision >= 2)) |
697 | smu_v11_0_atom_get_smu_clockinfo(smu->adev, | |
698 | (uint8_t)SMU11_SYSPLL1_0_FCLK_ID, | |
699 | (uint8_t)SMU11_SYSPLL1_2_ID, | |
700 | &smu->smu_table.boot_values.fclk); | |
88810f90 | 701 | |
08115f87 HR |
702 | return 0; |
703 | } | |
704 | ||
6c45e480 | 705 | int smu_v11_0_notify_memory_pool_location(struct smu_context *smu) |
d72e91c5 KW |
706 | { |
707 | struct smu_table_context *smu_table = &smu->smu_table; | |
708 | struct smu_table *memory_pool = &smu_table->memory_pool; | |
709 | int ret = 0; | |
710 | uint64_t address; | |
711 | uint32_t address_low, address_high; | |
712 | ||
713 | if (memory_pool->size == 0 || memory_pool->cpu_addr == NULL) | |
714 | return ret; | |
715 | ||
7a65bdc6 | 716 | address = (uintptr_t)memory_pool->cpu_addr; |
d72e91c5 KW |
717 | address_high = (uint32_t)upper_32_bits(address); |
718 | address_low = (uint32_t)lower_32_bits(address); | |
719 | ||
720 | ret = smu_send_smc_msg_with_param(smu, | |
0914f1c6 | 721 | SMU_MSG_SetSystemVirtualDramAddrHigh, |
1c58267c MC |
722 | address_high, |
723 | NULL); | |
d72e91c5 KW |
724 | if (ret) |
725 | return ret; | |
726 | ret = smu_send_smc_msg_with_param(smu, | |
0914f1c6 | 727 | SMU_MSG_SetSystemVirtualDramAddrLow, |
1c58267c MC |
728 | address_low, |
729 | NULL); | |
d72e91c5 KW |
730 | if (ret) |
731 | return ret; | |
732 | ||
733 | address = memory_pool->mc_address; | |
734 | address_high = (uint32_t)upper_32_bits(address); | |
735 | address_low = (uint32_t)lower_32_bits(address); | |
736 | ||
0914f1c6 | 737 | ret = smu_send_smc_msg_with_param(smu, SMU_MSG_DramLogSetDramAddrHigh, |
1c58267c | 738 | address_high, NULL); |
d72e91c5 KW |
739 | if (ret) |
740 | return ret; | |
0914f1c6 | 741 | ret = smu_send_smc_msg_with_param(smu, SMU_MSG_DramLogSetDramAddrLow, |
1c58267c | 742 | address_low, NULL); |
d72e91c5 KW |
743 | if (ret) |
744 | return ret; | |
0914f1c6 | 745 | ret = smu_send_smc_msg_with_param(smu, SMU_MSG_DramLogSetDramSize, |
1c58267c | 746 | (uint32_t)memory_pool->size, NULL); |
d72e91c5 KW |
747 | if (ret) |
748 | return ret; | |
749 | ||
750 | return ret; | |
751 | } | |
752 | ||
6c45e480 | 753 | int smu_v11_0_populate_smc_pptable(struct smu_context *smu) |
29eed6fa | 754 | { |
d6a4aa82 | 755 | int ret; |
29eed6fa | 756 | |
d6a4aa82 | 757 | ret = smu_set_default_dpm_table(smu); |
29eed6fa | 758 | |
d6a4aa82 | 759 | return ret; |
29eed6fa LG |
760 | } |
761 | ||
6c45e480 | 762 | int smu_v11_0_write_pptable(struct smu_context *smu) |
863651b6 | 763 | { |
2c80abe3 | 764 | struct smu_table_context *table_context = &smu->smu_table; |
863651b6 LG |
765 | int ret = 0; |
766 | ||
0d9d78b5 | 767 | ret = smu_update_table(smu, SMU_TABLE_PPTABLE, 0, |
33bd73ae | 768 | table_context->driver_pptable, true); |
863651b6 LG |
769 | |
770 | return ret; | |
771 | } | |
772 | ||
6c45e480 | 773 | int smu_v11_0_set_deep_sleep_dcefclk(struct smu_context *smu, uint32_t clk) |
e73cf108 HR |
774 | { |
775 | int ret; | |
776 | ||
777 | ret = smu_send_smc_msg_with_param(smu, | |
1c58267c | 778 | SMU_MSG_SetMinDeepSleepDcefclk, clk, NULL); |
e73cf108 | 779 | if (ret) |
d9811cfc | 780 | dev_err(smu->adev->dev, "SMU11 attempt to set divider for DCEFCLK Failed!"); |
e73cf108 HR |
781 | |
782 | return ret; | |
783 | } | |
784 | ||
6c45e480 | 785 | int smu_v11_0_set_min_dcef_deep_sleep(struct smu_context *smu) |
44619596 | 786 | { |
44619596 LG |
787 | struct smu_table_context *table_context = &smu->smu_table; |
788 | ||
789 | if (!table_context) | |
790 | return -EINVAL; | |
791 | ||
6c45e480 | 792 | return smu_v11_0_set_deep_sleep_dcefclk(smu, table_context->boot_values.dcefclk / 100); |
44619596 LG |
793 | } |
794 | ||
ce0d0ec3 EQ |
795 | int smu_v11_0_set_driver_table_location(struct smu_context *smu) |
796 | { | |
797 | struct smu_table *driver_table = &smu->smu_table.driver_table; | |
798 | int ret = 0; | |
799 | ||
800 | if (driver_table->mc_address) { | |
801 | ret = smu_send_smc_msg_with_param(smu, | |
802 | SMU_MSG_SetDriverDramAddrHigh, | |
1c58267c MC |
803 | upper_32_bits(driver_table->mc_address), |
804 | NULL); | |
ce0d0ec3 EQ |
805 | if (!ret) |
806 | ret = smu_send_smc_msg_with_param(smu, | |
807 | SMU_MSG_SetDriverDramAddrLow, | |
1c58267c MC |
808 | lower_32_bits(driver_table->mc_address), |
809 | NULL); | |
ce0d0ec3 EQ |
810 | } |
811 | ||
812 | return ret; | |
813 | } | |
814 | ||
6c45e480 | 815 | int smu_v11_0_set_tool_table_location(struct smu_context *smu) |
e88e4f83 LG |
816 | { |
817 | int ret = 0; | |
33bd73ae | 818 | struct smu_table *tool_table = &smu->smu_table.tables[SMU_TABLE_PMSTATUSLOG]; |
e88e4f83 LG |
819 | |
820 | if (tool_table->mc_address) { | |
821 | ret = smu_send_smc_msg_with_param(smu, | |
0914f1c6 | 822 | SMU_MSG_SetToolsDramAddrHigh, |
1c58267c MC |
823 | upper_32_bits(tool_table->mc_address), |
824 | NULL); | |
e88e4f83 LG |
825 | if (!ret) |
826 | ret = smu_send_smc_msg_with_param(smu, | |
0914f1c6 | 827 | SMU_MSG_SetToolsDramAddrLow, |
1c58267c MC |
828 | lower_32_bits(tool_table->mc_address), |
829 | NULL); | |
e88e4f83 LG |
830 | } |
831 | ||
832 | return ret; | |
833 | } | |
834 | ||
6c45e480 | 835 | int smu_v11_0_init_display_count(struct smu_context *smu, uint32_t count) |
56c53ad6 KW |
836 | { |
837 | int ret = 0; | |
a254bfa2 | 838 | |
38748ad8 ML |
839 | if (!smu->pm_enabled) |
840 | return ret; | |
841 | ||
1c58267c | 842 | ret = smu_send_smc_msg_with_param(smu, SMU_MSG_NumOfDisplays, count, NULL); |
56c53ad6 KW |
843 | return ret; |
844 | } | |
845 | ||
f14a323d | 846 | |
6c45e480 | 847 | int smu_v11_0_set_allowed_mask(struct smu_context *smu) |
6b816d73 KW |
848 | { |
849 | struct smu_feature *feature = &smu->smu_feature; | |
850 | int ret = 0; | |
851 | uint32_t feature_mask[2]; | |
852 | ||
f14a323d | 853 | mutex_lock(&feature->mutex); |
6b816d73 | 854 | if (bitmap_empty(feature->allowed, SMU_FEATURE_MAX) || feature->feature_num < 64) |
f14a323d | 855 | goto failed; |
6b816d73 KW |
856 | |
857 | bitmap_copy((unsigned long *)feature_mask, feature->allowed, 64); | |
858 | ||
859 | ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetAllowedFeaturesMaskHigh, | |
1c58267c | 860 | feature_mask[1], NULL); |
6b816d73 | 861 | if (ret) |
f14a323d | 862 | goto failed; |
6b816d73 KW |
863 | |
864 | ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetAllowedFeaturesMaskLow, | |
1c58267c | 865 | feature_mask[0], NULL); |
6b816d73 | 866 | if (ret) |
f14a323d | 867 | goto failed; |
6b816d73 | 868 | |
f14a323d KW |
869 | failed: |
870 | mutex_unlock(&feature->mutex); | |
6b816d73 KW |
871 | return ret; |
872 | } | |
873 | ||
6c45e480 | 874 | int smu_v11_0_get_enabled_mask(struct smu_context *smu, |
6b816d73 KW |
875 | uint32_t *feature_mask, uint32_t num) |
876 | { | |
877 | uint32_t feature_mask_high = 0, feature_mask_low = 0; | |
6a876844 | 878 | struct smu_feature *feature = &smu->smu_feature; |
6b816d73 KW |
879 | int ret = 0; |
880 | ||
881 | if (!feature_mask || num < 2) | |
882 | return -EINVAL; | |
883 | ||
6a876844 | 884 | if (bitmap_empty(feature->enabled, feature->feature_num)) { |
1c58267c | 885 | ret = smu_send_smc_msg(smu, SMU_MSG_GetEnabledSmuFeaturesHigh, &feature_mask_high); |
6a876844 EQ |
886 | if (ret) |
887 | return ret; | |
6b816d73 | 888 | |
1c58267c | 889 | ret = smu_send_smc_msg(smu, SMU_MSG_GetEnabledSmuFeaturesLow, &feature_mask_low); |
6a876844 EQ |
890 | if (ret) |
891 | return ret; | |
6b816d73 | 892 | |
6a876844 EQ |
893 | feature_mask[0] = feature_mask_low; |
894 | feature_mask[1] = feature_mask_high; | |
895 | } else { | |
896 | bitmap_copy((unsigned long *)feature_mask, feature->enabled, | |
897 | feature->feature_num); | |
898 | } | |
6b816d73 KW |
899 | |
900 | return ret; | |
901 | } | |
902 | ||
6c45e480 | 903 | int smu_v11_0_system_features_control(struct smu_context *smu, |
f067499b | 904 | bool en) |
6b816d73 KW |
905 | { |
906 | struct smu_feature *feature = &smu->smu_feature; | |
907 | uint32_t feature_mask[2]; | |
908 | int ret = 0; | |
909 | ||
6a876844 | 910 | ret = smu_send_smc_msg(smu, (en ? SMU_MSG_EnableAllSmuFeatures : |
1c58267c | 911 | SMU_MSG_DisableAllSmuFeatures), NULL); |
6b816d73 KW |
912 | if (ret) |
913 | return ret; | |
914 | ||
79275af6 EQ |
915 | bitmap_zero(feature->enabled, feature->feature_num); |
916 | bitmap_zero(feature->supported, feature->feature_num); | |
917 | ||
6a876844 EQ |
918 | if (en) { |
919 | ret = smu_feature_get_enabled_mask(smu, feature_mask, 2); | |
920 | if (ret) | |
921 | return ret; | |
922 | ||
923 | bitmap_copy(feature->enabled, (unsigned long *)&feature_mask, | |
924 | feature->feature_num); | |
925 | bitmap_copy(feature->supported, (unsigned long *)&feature_mask, | |
926 | feature->feature_num); | |
6a876844 | 927 | } |
6b816d73 KW |
928 | |
929 | return ret; | |
930 | } | |
931 | ||
6c45e480 | 932 | int smu_v11_0_notify_display_change(struct smu_context *smu) |
e1c6f86a KW |
933 | { |
934 | int ret = 0; | |
935 | ||
38748ad8 ML |
936 | if (!smu->pm_enabled) |
937 | return ret; | |
938 | ||
687e8ad0 KF |
939 | if (smu_feature_is_enabled(smu, SMU_FEATURE_DPM_UCLK_BIT) && |
940 | smu->adev->gmc.vram_type == AMDGPU_VRAM_TYPE_HBM) | |
1c58267c | 941 | ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetUclkFastSwitch, 1, NULL); |
e1c6f86a KW |
942 | |
943 | return ret; | |
944 | } | |
945 | ||
7457cf02 HR |
946 | static int |
947 | smu_v11_0_get_max_sustainable_clock(struct smu_context *smu, uint32_t *clock, | |
0de94acf | 948 | enum smu_clk_type clock_select) |
7457cf02 HR |
949 | { |
950 | int ret = 0; | |
c0640304 | 951 | int clk_id; |
7457cf02 | 952 | |
8dd45504 EQ |
953 | if ((smu_msg_get_index(smu, SMU_MSG_GetDcModeMaxDpmFreq) < 0) || |
954 | (smu_msg_get_index(smu, SMU_MSG_GetMaxDpmFreq) < 0)) | |
955 | return 0; | |
956 | ||
c0640304 EQ |
957 | clk_id = smu_clk_get_index(smu, clock_select); |
958 | if (clk_id < 0) | |
959 | return -EINVAL; | |
960 | ||
7457cf02 | 961 | ret = smu_send_smc_msg_with_param(smu, SMU_MSG_GetDcModeMaxDpmFreq, |
1c58267c | 962 | clk_id << 16, clock); |
7457cf02 | 963 | if (ret) { |
d9811cfc | 964 | dev_err(smu->adev->dev, "[GetMaxSustainableClock] Failed to get max DC clock from SMC!"); |
7457cf02 HR |
965 | return ret; |
966 | } | |
967 | ||
7457cf02 HR |
968 | if (*clock != 0) |
969 | return 0; | |
970 | ||
971 | /* if DC limit is zero, return AC limit */ | |
972 | ret = smu_send_smc_msg_with_param(smu, SMU_MSG_GetMaxDpmFreq, | |
1c58267c | 973 | clk_id << 16, clock); |
7457cf02 | 974 | if (ret) { |
d9811cfc | 975 | dev_err(smu->adev->dev, "[GetMaxSustainableClock] failed to get max AC clock from SMC!"); |
7457cf02 HR |
976 | return ret; |
977 | } | |
978 | ||
1c58267c | 979 | return 0; |
7457cf02 HR |
980 | } |
981 | ||
6c45e480 | 982 | int smu_v11_0_init_max_sustainable_clocks(struct smu_context *smu) |
7457cf02 | 983 | { |
78eb4a36 EQ |
984 | struct smu_11_0_max_sustainable_clocks *max_sustainable_clocks = |
985 | smu->smu_table.max_sustainable_clocks; | |
7457cf02 HR |
986 | int ret = 0; |
987 | ||
7457cf02 HR |
988 | max_sustainable_clocks->uclock = smu->smu_table.boot_values.uclk / 100; |
989 | max_sustainable_clocks->soc_clock = smu->smu_table.boot_values.socclk / 100; | |
990 | max_sustainable_clocks->dcef_clock = smu->smu_table.boot_values.dcefclk / 100; | |
991 | max_sustainable_clocks->display_clock = 0xFFFFFFFF; | |
992 | max_sustainable_clocks->phy_clock = 0xFFFFFFFF; | |
993 | max_sustainable_clocks->pixel_clock = 0xFFFFFFFF; | |
994 | ||
ffcb08df | 995 | if (smu_feature_is_enabled(smu, SMU_FEATURE_DPM_UCLK_BIT)) { |
7457cf02 HR |
996 | ret = smu_v11_0_get_max_sustainable_clock(smu, |
997 | &(max_sustainable_clocks->uclock), | |
0de94acf | 998 | SMU_UCLK); |
7457cf02 | 999 | if (ret) { |
d9811cfc | 1000 | dev_err(smu->adev->dev, "[%s] failed to get max UCLK from SMC!", |
7457cf02 HR |
1001 | __func__); |
1002 | return ret; | |
1003 | } | |
1004 | } | |
1005 | ||
ffcb08df | 1006 | if (smu_feature_is_enabled(smu, SMU_FEATURE_DPM_SOCCLK_BIT)) { |
7457cf02 HR |
1007 | ret = smu_v11_0_get_max_sustainable_clock(smu, |
1008 | &(max_sustainable_clocks->soc_clock), | |
0de94acf | 1009 | SMU_SOCCLK); |
7457cf02 | 1010 | if (ret) { |
d9811cfc | 1011 | dev_err(smu->adev->dev, "[%s] failed to get max SOCCLK from SMC!", |
7457cf02 HR |
1012 | __func__); |
1013 | return ret; | |
1014 | } | |
1015 | } | |
1016 | ||
ffcb08df | 1017 | if (smu_feature_is_enabled(smu, SMU_FEATURE_DPM_DCEFCLK_BIT)) { |
7457cf02 HR |
1018 | ret = smu_v11_0_get_max_sustainable_clock(smu, |
1019 | &(max_sustainable_clocks->dcef_clock), | |
0de94acf | 1020 | SMU_DCEFCLK); |
7457cf02 | 1021 | if (ret) { |
d9811cfc | 1022 | dev_err(smu->adev->dev, "[%s] failed to get max DCEFCLK from SMC!", |
7457cf02 HR |
1023 | __func__); |
1024 | return ret; | |
1025 | } | |
1026 | ||
1027 | ret = smu_v11_0_get_max_sustainable_clock(smu, | |
1028 | &(max_sustainable_clocks->display_clock), | |
0de94acf | 1029 | SMU_DISPCLK); |
7457cf02 | 1030 | if (ret) { |
d9811cfc | 1031 | dev_err(smu->adev->dev, "[%s] failed to get max DISPCLK from SMC!", |
7457cf02 HR |
1032 | __func__); |
1033 | return ret; | |
1034 | } | |
1035 | ret = smu_v11_0_get_max_sustainable_clock(smu, | |
1036 | &(max_sustainable_clocks->phy_clock), | |
0de94acf | 1037 | SMU_PHYCLK); |
7457cf02 | 1038 | if (ret) { |
d9811cfc | 1039 | dev_err(smu->adev->dev, "[%s] failed to get max PHYCLK from SMC!", |
7457cf02 HR |
1040 | __func__); |
1041 | return ret; | |
1042 | } | |
1043 | ret = smu_v11_0_get_max_sustainable_clock(smu, | |
1044 | &(max_sustainable_clocks->pixel_clock), | |
0de94acf | 1045 | SMU_PIXCLK); |
7457cf02 | 1046 | if (ret) { |
d9811cfc | 1047 | dev_err(smu->adev->dev, "[%s] failed to get max PIXCLK from SMC!", |
7457cf02 HR |
1048 | __func__); |
1049 | return ret; | |
1050 | } | |
1051 | } | |
1052 | ||
1053 | if (max_sustainable_clocks->soc_clock < max_sustainable_clocks->uclock) | |
1054 | max_sustainable_clocks->uclock = max_sustainable_clocks->soc_clock; | |
1055 | ||
1056 | return 0; | |
1057 | } | |
1058 | ||
6c45e480 | 1059 | int smu_v11_0_set_power_limit(struct smu_context *smu, uint32_t n) |
e66adb1e | 1060 | { |
014c4440 | 1061 | int ret = 0; |
73abde4d MC |
1062 | uint32_t max_power_limit; |
1063 | ||
947c127b | 1064 | max_power_limit = smu_get_max_power_limit(smu); |
c0640304 | 1065 | |
73abde4d | 1066 | if (n > max_power_limit) { |
d9811cfc | 1067 | dev_err(smu->adev->dev, "New power limit (%d) is over the max allowed %d\n", |
73abde4d MC |
1068 | n, |
1069 | max_power_limit); | |
c0640304 | 1070 | return -EINVAL; |
014c4440 CG |
1071 | } |
1072 | ||
07740adc LG |
1073 | if (n == 0) |
1074 | n = smu->default_power_limit; | |
1075 | ||
b4af964e | 1076 | if (!smu_feature_is_enabled(smu, SMU_FEATURE_PPT_BIT)) { |
d9811cfc | 1077 | dev_err(smu->adev->dev, "Setting new power limit is not supported!\n"); |
b4af964e | 1078 | return -EOPNOTSUPP; |
07740adc LG |
1079 | } |
1080 | ||
1c58267c | 1081 | ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetPptLimit, n, NULL); |
e66adb1e | 1082 | if (ret) { |
d9811cfc | 1083 | dev_err(smu->adev->dev, "[%s] Set power limit Failed!\n", __func__); |
e66adb1e LG |
1084 | return ret; |
1085 | } | |
b4af964e | 1086 | smu->power_limit = n; |
e66adb1e | 1087 | |
b4af964e | 1088 | return 0; |
e66adb1e LG |
1089 | } |
1090 | ||
6c45e480 | 1091 | int smu_v11_0_get_current_clk_freq(struct smu_context *smu, |
0de94acf HR |
1092 | enum smu_clk_type clk_id, |
1093 | uint32_t *value) | |
bed3b3a1 KW |
1094 | { |
1095 | int ret = 0; | |
68c3bd95 | 1096 | uint32_t freq = 0; |
c0640304 | 1097 | int asic_clk_id; |
bed3b3a1 | 1098 | |
0de94acf | 1099 | if (clk_id >= SMU_CLK_COUNT || !value) |
bed3b3a1 KW |
1100 | return -EINVAL; |
1101 | ||
c0640304 EQ |
1102 | asic_clk_id = smu_clk_get_index(smu, clk_id); |
1103 | if (asic_clk_id < 0) | |
1104 | return -EINVAL; | |
1105 | ||
98e1a543 | 1106 | /* if don't has GetDpmClockFreq Message, try get current clock by SmuMetrics_t */ |
c0640304 | 1107 | if (smu_msg_get_index(smu, SMU_MSG_GetDpmClockFreq) < 0) |
e3618249 KW |
1108 | ret = smu_get_current_clk_freq_by_table(smu, clk_id, &freq); |
1109 | else { | |
1110 | ret = smu_send_smc_msg_with_param(smu, SMU_MSG_GetDpmClockFreq, | |
1c58267c | 1111 | (asic_clk_id << 16), &freq); |
e3618249 KW |
1112 | if (ret) |
1113 | return ret; | |
1114 | } | |
bed3b3a1 KW |
1115 | |
1116 | freq *= 100; | |
1117 | *value = freq; | |
1118 | ||
1119 | return ret; | |
1120 | } | |
1121 | ||
22f1e0e8 | 1122 | int smu_v11_0_enable_thermal_alert(struct smu_context *smu) |
74ba3553 LG |
1123 | { |
1124 | int ret = 0; | |
a056ddce | 1125 | struct smu_temperature_range range; |
74ba3553 LG |
1126 | struct amdgpu_device *adev = smu->adev; |
1127 | ||
a056ddce EQ |
1128 | memcpy(&range, &smu11_thermal_policy[0], sizeof(struct smu_temperature_range)); |
1129 | ||
e211580d | 1130 | ret = smu_get_thermal_temperature_range(smu, &range); |
7a816371 KW |
1131 | if (ret) |
1132 | return ret; | |
74ba3553 LG |
1133 | |
1134 | if (smu->smu_table.thermal_controller_type) { | |
947c127b | 1135 | ret = smu_set_thermal_range(smu, range); |
74ba3553 LG |
1136 | if (ret) |
1137 | return ret; | |
1138 | ||
aaddad1f | 1139 | ret = amdgpu_irq_get(adev, &smu->irq_source, 0); |
74ba3553 LG |
1140 | if (ret) |
1141 | return ret; | |
5e6d2665 | 1142 | |
ee0db820 | 1143 | ret = smu_set_thermal_fan_table(smu); |
74ba3553 LG |
1144 | if (ret) |
1145 | return ret; | |
1146 | } | |
1147 | ||
a056ddce EQ |
1148 | adev->pm.dpm.thermal.min_temp = range.min; |
1149 | adev->pm.dpm.thermal.max_temp = range.max; | |
1150 | adev->pm.dpm.thermal.max_edge_emergency_temp = range.edge_emergency_max; | |
1151 | adev->pm.dpm.thermal.min_hotspot_temp = range.hotspot_min; | |
1152 | adev->pm.dpm.thermal.max_hotspot_crit_temp = range.hotspot_crit_max; | |
1153 | adev->pm.dpm.thermal.max_hotspot_emergency_temp = range.hotspot_emergency_max; | |
1154 | adev->pm.dpm.thermal.min_mem_temp = range.mem_min; | |
1155 | adev->pm.dpm.thermal.max_mem_crit_temp = range.mem_crit_max; | |
1156 | adev->pm.dpm.thermal.max_mem_emergency_temp = range.mem_emergency_max; | |
74ba3553 LG |
1157 | |
1158 | return ret; | |
1159 | } | |
1160 | ||
22f1e0e8 | 1161 | int smu_v11_0_disable_thermal_alert(struct smu_context *smu) |
faa695c7 | 1162 | { |
aaddad1f | 1163 | return amdgpu_irq_put(smu->adev, &smu->irq_source, 0); |
faa695c7 EQ |
1164 | } |
1165 | ||
77d1eef4 KW |
1166 | static uint16_t convert_to_vddc(uint8_t vid) |
1167 | { | |
1168 | return (uint16_t) ((6200 - (vid * 25)) / SMU11_VOLTAGE_SCALE); | |
1169 | } | |
1170 | ||
1171 | static int smu_v11_0_get_gfx_vdd(struct smu_context *smu, uint32_t *value) | |
1172 | { | |
1173 | struct amdgpu_device *adev = smu->adev; | |
1174 | uint32_t vdd = 0, val_vid = 0; | |
1175 | ||
1176 | if (!value) | |
1177 | return -EINVAL; | |
1178 | val_vid = (RREG32_SOC15(SMUIO, 0, mmSMUSVI0_TEL_PLANE0) & | |
1179 | SMUSVI0_TEL_PLANE0__SVI0_PLANE0_VDDCOR_MASK) >> | |
1180 | SMUSVI0_TEL_PLANE0__SVI0_PLANE0_VDDCOR__SHIFT; | |
1181 | ||
1182 | vdd = (uint32_t)convert_to_vddc((uint8_t)val_vid); | |
1183 | ||
1184 | *value = vdd; | |
1185 | ||
1186 | return 0; | |
1187 | ||
1188 | } | |
1189 | ||
6c45e480 | 1190 | int smu_v11_0_read_sensor(struct smu_context *smu, |
4a5a2de6 KW |
1191 | enum amd_pp_sensors sensor, |
1192 | void *data, uint32_t *size) | |
1193 | { | |
1194 | int ret = 0; | |
9b4e63f4 KF |
1195 | |
1196 | if(!data || !size) | |
1197 | return -EINVAL; | |
1198 | ||
4a5a2de6 | 1199 | switch (sensor) { |
c9b66043 | 1200 | case AMDGPU_PP_SENSOR_GFX_MCLK: |
0de94acf | 1201 | ret = smu_get_current_clk_freq(smu, SMU_UCLK, (uint32_t *)data); |
c9b66043 KW |
1202 | *size = 4; |
1203 | break; | |
1204 | case AMDGPU_PP_SENSOR_GFX_SCLK: | |
0de94acf | 1205 | ret = smu_get_current_clk_freq(smu, SMU_GFXCLK, (uint32_t *)data); |
c9b66043 | 1206 | *size = 4; |
2f613c70 | 1207 | break; |
77d1eef4 KW |
1208 | case AMDGPU_PP_SENSOR_VDDGFX: |
1209 | ret = smu_v11_0_get_gfx_vdd(smu, (uint32_t *)data); | |
1210 | *size = 4; | |
4a5a2de6 | 1211 | break; |
637c1c66 LG |
1212 | case AMDGPU_PP_SENSOR_MIN_FAN_RPM: |
1213 | *(uint32_t *)data = 0; | |
1214 | *size = 4; | |
1215 | break; | |
4a5a2de6 | 1216 | default: |
143c75d6 | 1217 | ret = smu_common_read_sensor(smu, sensor, data, size); |
4a5a2de6 KW |
1218 | break; |
1219 | } | |
1220 | ||
1221 | if (ret) | |
1222 | *size = 0; | |
1223 | ||
1224 | return ret; | |
1225 | } | |
1226 | ||
6c45e480 | 1227 | int |
04885368 HR |
1228 | smu_v11_0_display_clock_voltage_request(struct smu_context *smu, |
1229 | struct pp_display_clock_request | |
1230 | *clock_req) | |
1231 | { | |
1232 | enum amd_pp_clock_type clk_type = clock_req->clock_type; | |
1233 | int ret = 0; | |
0de94acf | 1234 | enum smu_clk_type clk_select = 0; |
04885368 HR |
1235 | uint32_t clk_freq = clock_req->clock_freq_in_khz / 1000; |
1236 | ||
382fb778 | 1237 | if (smu_feature_is_enabled(smu, SMU_FEATURE_DPM_DCEFCLK_BIT) || |
5c170a59 | 1238 | smu_feature_is_enabled(smu, SMU_FEATURE_DPM_UCLK_BIT)) { |
04885368 HR |
1239 | switch (clk_type) { |
1240 | case amd_pp_dcef_clock: | |
0de94acf | 1241 | clk_select = SMU_DCEFCLK; |
04885368 HR |
1242 | break; |
1243 | case amd_pp_disp_clock: | |
0de94acf | 1244 | clk_select = SMU_DISPCLK; |
04885368 HR |
1245 | break; |
1246 | case amd_pp_pixel_clock: | |
0de94acf | 1247 | clk_select = SMU_PIXCLK; |
04885368 HR |
1248 | break; |
1249 | case amd_pp_phy_clock: | |
0de94acf | 1250 | clk_select = SMU_PHYCLK; |
04885368 | 1251 | break; |
382fb778 | 1252 | case amd_pp_mem_clock: |
1253 | clk_select = SMU_UCLK; | |
1254 | break; | |
04885368 | 1255 | default: |
d9811cfc | 1256 | dev_info(smu->adev->dev, "[%s] Invalid Clock Type!", __func__); |
04885368 HR |
1257 | ret = -EINVAL; |
1258 | break; | |
1259 | } | |
1260 | ||
1261 | if (ret) | |
1262 | goto failed; | |
1263 | ||
6e92e156 KF |
1264 | if (clk_select == SMU_UCLK && smu->disable_uclk_switch) |
1265 | return 0; | |
1266 | ||
60adad6f | 1267 | ret = smu_set_hard_freq_range(smu, clk_select, clk_freq, 0); |
6e92e156 KF |
1268 | |
1269 | if(clk_select == SMU_UCLK) | |
1270 | smu->hard_min_uclk_req_from_dal = clk_freq; | |
04885368 HR |
1271 | } |
1272 | ||
1273 | failed: | |
04885368 HR |
1274 | return ret; |
1275 | } | |
1276 | ||
6c45e480 | 1277 | int smu_v11_0_gfx_off_control(struct smu_context *smu, bool enable) |
bca32528 KF |
1278 | { |
1279 | int ret = 0; | |
acbcc111 | 1280 | struct amdgpu_device *adev = smu->adev; |
bca32528 | 1281 | |
acbcc111 | 1282 | switch (adev->asic_type) { |
acbcc111 | 1283 | case CHIP_NAVI10: |
ba02636d | 1284 | case CHIP_NAVI14: |
9ea8da75 | 1285 | case CHIP_NAVI12: |
e0da123a | 1286 | case CHIP_SIENNA_CICHLID: |
acbcc111 KF |
1287 | if (!(adev->pm.pp_feature & PP_GFXOFF_MASK)) |
1288 | return 0; | |
acbcc111 | 1289 | if (enable) |
1c58267c | 1290 | ret = smu_send_smc_msg(smu, SMU_MSG_AllowGfxOff, NULL); |
acbcc111 | 1291 | else |
1c58267c | 1292 | ret = smu_send_smc_msg(smu, SMU_MSG_DisallowGfxOff, NULL); |
acbcc111 KF |
1293 | break; |
1294 | default: | |
1295 | break; | |
1296 | } | |
bca32528 KF |
1297 | |
1298 | return ret; | |
1299 | } | |
1300 | ||
6c45e480 | 1301 | uint32_t |
008a9524 CG |
1302 | smu_v11_0_get_fan_control_mode(struct smu_context *smu) |
1303 | { | |
ffcb08df | 1304 | if (!smu_feature_is_enabled(smu, SMU_FEATURE_FAN_CONTROL_BIT)) |
008a9524 CG |
1305 | return AMD_FAN_CTRL_MANUAL; |
1306 | else | |
1307 | return AMD_FAN_CTRL_AUTO; | |
1308 | } | |
1309 | ||
008a9524 | 1310 | static int |
fcd90fee | 1311 | smu_v11_0_auto_fan_control(struct smu_context *smu, bool auto_fan_control) |
008a9524 CG |
1312 | { |
1313 | int ret = 0; | |
1314 | ||
f0ced3f6 | 1315 | if (!smu_feature_is_supported(smu, SMU_FEATURE_FAN_CONTROL_BIT)) |
008a9524 CG |
1316 | return 0; |
1317 | ||
fcd90fee | 1318 | ret = smu_feature_set_enabled(smu, SMU_FEATURE_FAN_CONTROL_BIT, auto_fan_control); |
008a9524 | 1319 | if (ret) |
d9811cfc | 1320 | dev_err(smu->adev->dev, "[%s]%s smc FAN CONTROL feature failed!", |
fcd90fee | 1321 | __func__, (auto_fan_control ? "Start" : "Stop")); |
008a9524 CG |
1322 | |
1323 | return ret; | |
1324 | } | |
1325 | ||
1326 | static int | |
1327 | smu_v11_0_set_fan_static_mode(struct smu_context *smu, uint32_t mode) | |
1328 | { | |
1329 | struct amdgpu_device *adev = smu->adev; | |
1330 | ||
1331 | WREG32_SOC15(THM, 0, mmCG_FDO_CTRL2, | |
1332 | REG_SET_FIELD(RREG32_SOC15(THM, 0, mmCG_FDO_CTRL2), | |
1333 | CG_FDO_CTRL2, TMIN, 0)); | |
1334 | WREG32_SOC15(THM, 0, mmCG_FDO_CTRL2, | |
1335 | REG_SET_FIELD(RREG32_SOC15(THM, 0, mmCG_FDO_CTRL2), | |
1336 | CG_FDO_CTRL2, FDO_PWM_MODE, mode)); | |
1337 | ||
1338 | return 0; | |
1339 | } | |
1340 | ||
6c45e480 | 1341 | int |
008a9524 CG |
1342 | smu_v11_0_set_fan_speed_percent(struct smu_context *smu, uint32_t speed) |
1343 | { | |
1344 | struct amdgpu_device *adev = smu->adev; | |
fcd90fee | 1345 | uint32_t duty100, duty; |
008a9524 | 1346 | uint64_t tmp64; |
008a9524 CG |
1347 | |
1348 | if (speed > 100) | |
1349 | speed = 100; | |
1350 | ||
fcd90fee | 1351 | if (smu_v11_0_auto_fan_control(smu, 0)) |
008a9524 | 1352 | return -EINVAL; |
fcd90fee | 1353 | |
008a9524 CG |
1354 | duty100 = REG_GET_FIELD(RREG32_SOC15(THM, 0, mmCG_FDO_CTRL1), |
1355 | CG_FDO_CTRL1, FMAX_DUTY100); | |
1356 | if (!duty100) | |
1357 | return -EINVAL; | |
1358 | ||
1359 | tmp64 = (uint64_t)speed * duty100; | |
1360 | do_div(tmp64, 100); | |
1361 | duty = (uint32_t)tmp64; | |
1362 | ||
1363 | WREG32_SOC15(THM, 0, mmCG_FDO_CTRL0, | |
1364 | REG_SET_FIELD(RREG32_SOC15(THM, 0, mmCG_FDO_CTRL0), | |
1365 | CG_FDO_CTRL0, FDO_STATIC_DUTY, duty)); | |
1366 | ||
1367 | return smu_v11_0_set_fan_static_mode(smu, FDO_PWM_MODE_STATIC); | |
1368 | } | |
1369 | ||
6c45e480 | 1370 | int |
a76ff5af CG |
1371 | smu_v11_0_set_fan_control_mode(struct smu_context *smu, |
1372 | uint32_t mode) | |
1373 | { | |
1374 | int ret = 0; | |
a76ff5af CG |
1375 | |
1376 | switch (mode) { | |
1377 | case AMD_FAN_CTRL_NONE: | |
1378 | ret = smu_v11_0_set_fan_speed_percent(smu, 100); | |
1379 | break; | |
1380 | case AMD_FAN_CTRL_MANUAL: | |
fcd90fee | 1381 | ret = smu_v11_0_auto_fan_control(smu, 0); |
a76ff5af CG |
1382 | break; |
1383 | case AMD_FAN_CTRL_AUTO: | |
fcd90fee | 1384 | ret = smu_v11_0_auto_fan_control(smu, 1); |
a76ff5af CG |
1385 | break; |
1386 | default: | |
1387 | break; | |
1388 | } | |
1389 | ||
1390 | if (ret) { | |
d9811cfc | 1391 | dev_err(smu->adev->dev, "[%s]Set fan control mode failed!", __func__); |
a76ff5af CG |
1392 | return -EINVAL; |
1393 | } | |
1394 | ||
1395 | return ret; | |
1396 | } | |
1397 | ||
6c45e480 | 1398 | int smu_v11_0_set_fan_speed_rpm(struct smu_context *smu, |
96026ce0 LG |
1399 | uint32_t speed) |
1400 | { | |
1401 | struct amdgpu_device *adev = smu->adev; | |
1402 | int ret; | |
1403 | uint32_t tach_period, crystal_clock_freq; | |
96026ce0 LG |
1404 | |
1405 | if (!speed) | |
1406 | return -EINVAL; | |
1407 | ||
fcd90fee | 1408 | ret = smu_v11_0_auto_fan_control(smu, 0); |
96026ce0 | 1409 | if (ret) |
3697b339 | 1410 | return ret; |
96026ce0 LG |
1411 | |
1412 | crystal_clock_freq = amdgpu_asic_get_xclk(adev); | |
1413 | tach_period = 60 * crystal_clock_freq * 10000 / (8 * speed); | |
1414 | WREG32_SOC15(THM, 0, mmCG_TACH_CTRL, | |
1415 | REG_SET_FIELD(RREG32_SOC15(THM, 0, mmCG_TACH_CTRL), | |
1416 | CG_TACH_CTRL, TARGET_PERIOD, | |
1417 | tach_period)); | |
1418 | ||
1419 | ret = smu_v11_0_set_fan_static_mode(smu, FDO_PWM_MODE_STATIC_RPM); | |
1420 | ||
96026ce0 LG |
1421 | return ret; |
1422 | } | |
1423 | ||
6c45e480 | 1424 | int smu_v11_0_set_xgmi_pstate(struct smu_context *smu, |
e911671c | 1425 | uint32_t pstate) |
1426 | { | |
a1b11201 | 1427 | int ret = 0; |
a1b11201 | 1428 | ret = smu_send_smc_msg_with_param(smu, |
1429 | SMU_MSG_SetXgmiMode, | |
1c58267c MC |
1430 | pstate ? XGMI_MODE_PSTATE_D0 : XGMI_MODE_PSTATE_D3, |
1431 | NULL); | |
a1b11201 | 1432 | return ret; |
e911671c | 1433 | } |
1434 | ||
be80b431 EQ |
1435 | static int smu_v11_0_set_irq_state(struct amdgpu_device *adev, |
1436 | struct amdgpu_irq_src *source, | |
1437 | unsigned tyep, | |
1438 | enum amdgpu_interrupt_state state) | |
1439 | { | |
1440 | uint32_t val = 0; | |
1441 | ||
1442 | switch (state) { | |
1443 | case AMDGPU_IRQ_STATE_DISABLE: | |
1444 | /* For THM irqs */ | |
1445 | val = RREG32_SOC15(THM, 0, mmTHM_THERMAL_INT_CTRL); | |
1446 | val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, THERM_INTH_MASK, 1); | |
1447 | val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, THERM_INTL_MASK, 1); | |
1448 | WREG32_SOC15(THM, 0, mmTHM_THERMAL_INT_CTRL, val); | |
1449 | ||
1450 | WREG32_SOC15(THM, 0, mmTHM_THERMAL_INT_ENA, 0); | |
1451 | ||
1452 | /* For MP1 SW irqs */ | |
1453 | val = RREG32_SOC15(MP1, 0, mmMP1_SMN_IH_SW_INT_CTRL); | |
1454 | val = REG_SET_FIELD(val, MP1_SMN_IH_SW_INT_CTRL, INT_MASK, 1); | |
1455 | WREG32_SOC15(MP1, 0, mmMP1_SMN_IH_SW_INT_CTRL, val); | |
1456 | ||
1457 | break; | |
1458 | case AMDGPU_IRQ_STATE_ENABLE: | |
1459 | /* For THM irqs */ | |
1460 | val = RREG32_SOC15(THM, 0, mmTHM_THERMAL_INT_CTRL); | |
1461 | val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, THERM_INTH_MASK, 0); | |
1462 | val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, THERM_INTL_MASK, 0); | |
1463 | WREG32_SOC15(THM, 0, mmTHM_THERMAL_INT_CTRL, val); | |
1464 | ||
1465 | val = (1 << THM_THERMAL_INT_ENA__THERM_INTH_CLR__SHIFT); | |
1466 | val |= (1 << THM_THERMAL_INT_ENA__THERM_INTL_CLR__SHIFT); | |
1467 | val |= (1 << THM_THERMAL_INT_ENA__THERM_TRIGGER_CLR__SHIFT); | |
1468 | WREG32_SOC15(THM, 0, mmTHM_THERMAL_INT_ENA, val); | |
1469 | ||
1470 | /* For MP1 SW irqs */ | |
1471 | val = RREG32_SOC15(MP1, 0, mmMP1_SMN_IH_SW_INT); | |
1472 | val = REG_SET_FIELD(val, MP1_SMN_IH_SW_INT, ID, 0xFE); | |
1473 | val = REG_SET_FIELD(val, MP1_SMN_IH_SW_INT, VALID, 0); | |
1474 | WREG32_SOC15(MP1, 0, mmMP1_SMN_IH_SW_INT, val); | |
1475 | ||
1476 | val = RREG32_SOC15(MP1, 0, mmMP1_SMN_IH_SW_INT_CTRL); | |
1477 | val = REG_SET_FIELD(val, MP1_SMN_IH_SW_INT_CTRL, INT_MASK, 0); | |
1478 | WREG32_SOC15(MP1, 0, mmMP1_SMN_IH_SW_INT_CTRL, val); | |
1479 | ||
1480 | break; | |
1481 | default: | |
1482 | break; | |
1483 | } | |
1484 | ||
1485 | return 0; | |
1486 | } | |
1487 | ||
e1188aac AD |
1488 | static int smu_v11_0_ack_ac_dc_interrupt(struct smu_context *smu) |
1489 | { | |
1490 | return smu_send_smc_msg(smu, | |
1491 | SMU_MSG_ReenableAcDcInterrupt, | |
1492 | NULL); | |
1493 | } | |
1494 | ||
5e6d2665 KW |
1495 | #define THM_11_0__SRCID__THM_DIG_THERM_L2H 0 /* ASIC_TEMP > CG_THERMAL_INT.DIG_THERM_INTH */ |
1496 | #define THM_11_0__SRCID__THM_DIG_THERM_H2L 1 /* ASIC_TEMP < CG_THERMAL_INT.DIG_THERM_INTL */ | |
1497 | ||
e528ccf9 EQ |
1498 | #define SMUIO_11_0__SRCID__SMUIO_GPIO19 83 |
1499 | ||
5e6d2665 KW |
1500 | static int smu_v11_0_irq_process(struct amdgpu_device *adev, |
1501 | struct amdgpu_irq_src *source, | |
1502 | struct amdgpu_iv_entry *entry) | |
1503 | { | |
bcdc7c05 | 1504 | struct smu_context *smu = &adev->smu; |
5e6d2665 KW |
1505 | uint32_t client_id = entry->client_id; |
1506 | uint32_t src_id = entry->src_id; | |
cd598d6c EQ |
1507 | /* |
1508 | * ctxid is used to distinguish different | |
1509 | * events for SMCToHost interrupt. | |
1510 | */ | |
1511 | uint32_t ctxid = entry->src_data[0]; | |
d559aba8 | 1512 | uint32_t data; |
5e6d2665 KW |
1513 | |
1514 | if (client_id == SOC15_IH_CLIENTID_THM) { | |
1515 | switch (src_id) { | |
1516 | case THM_11_0__SRCID__THM_DIG_THERM_L2H: | |
27a468ea | 1517 | dev_emerg(adev->dev, "ERROR: GPU over temperature range(SW CTF) detected!\n"); |
94952205 EQ |
1518 | /* |
1519 | * SW CTF just occurred. | |
1520 | * Try to do a graceful shutdown to prevent further damage. | |
1521 | */ | |
27a468ea | 1522 | dev_emerg(adev->dev, "ERROR: System is going to shutdown due to GPU SW CTF!\n"); |
94952205 | 1523 | orderly_poweroff(true); |
5e6d2665 KW |
1524 | break; |
1525 | case THM_11_0__SRCID__THM_DIG_THERM_H2L: | |
27a468ea | 1526 | dev_emerg(adev->dev, "ERROR: GPU under temperature range detected\n"); |
5e6d2665 KW |
1527 | break; |
1528 | default: | |
27a468ea EQ |
1529 | dev_emerg(adev->dev, "ERROR: GPU under temperature range unknown src id (%d)\n", |
1530 | src_id); | |
5e6d2665 | 1531 | break; |
5e6d2665 | 1532 | } |
e528ccf9 | 1533 | } else if (client_id == SOC15_IH_CLIENTID_ROM_SMUIO) { |
27a468ea | 1534 | dev_emerg(adev->dev, "ERROR: GPU HW Critical Temperature Fault(aka CTF) detected!\n"); |
e528ccf9 EQ |
1535 | /* |
1536 | * HW CTF just occurred. Shutdown to prevent further damage. | |
1537 | */ | |
27a468ea | 1538 | dev_emerg(adev->dev, "ERROR: System is going to shutdown due to GPU HW CTF!\n"); |
e528ccf9 | 1539 | orderly_poweroff(true); |
e1188aac | 1540 | } else if (client_id == SOC15_IH_CLIENTID_MP1) { |
cd598d6c | 1541 | if (src_id == 0xfe) { |
d559aba8 EQ |
1542 | /* ACK SMUToHost interrupt */ |
1543 | data = RREG32_SOC15(MP1, 0, mmMP1_SMN_IH_SW_INT_CTRL); | |
1544 | data = REG_SET_FIELD(data, MP1_SMN_IH_SW_INT_CTRL, INT_ACK, 1); | |
1545 | WREG32_SOC15(MP1, 0, mmMP1_SMN_IH_SW_INT_CTRL, data); | |
1546 | ||
cd598d6c EQ |
1547 | switch (ctxid) { |
1548 | case 0x3: | |
1549 | dev_dbg(adev->dev, "Switched to AC mode!\n"); | |
1550 | smu_v11_0_ack_ac_dc_interrupt(&adev->smu); | |
1551 | break; | |
1552 | case 0x4: | |
1553 | dev_dbg(adev->dev, "Switched to DC mode!\n"); | |
1554 | smu_v11_0_ack_ac_dc_interrupt(&adev->smu); | |
1555 | break; | |
bcdc7c05 | 1556 | case 0x7: |
b265bdbd EQ |
1557 | if (!atomic_read(&adev->throttling_logging_enabled)) |
1558 | return 0; | |
1559 | ||
1560 | if (__ratelimit(&adev->throttling_logging_rs)) | |
6961750f | 1561 | schedule_work(&smu->throttling_logging_work); |
bcdc7c05 EQ |
1562 | |
1563 | break; | |
cd598d6c EQ |
1564 | } |
1565 | } | |
5e6d2665 KW |
1566 | } |
1567 | ||
1568 | return 0; | |
1569 | } | |
1570 | ||
1571 | static const struct amdgpu_irq_src_funcs smu_v11_0_irq_funcs = | |
1572 | { | |
be80b431 | 1573 | .set = smu_v11_0_set_irq_state, |
5e6d2665 KW |
1574 | .process = smu_v11_0_irq_process, |
1575 | }; | |
1576 | ||
6c45e480 | 1577 | int smu_v11_0_register_irq_handler(struct smu_context *smu) |
5e6d2665 KW |
1578 | { |
1579 | struct amdgpu_device *adev = smu->adev; | |
aaddad1f | 1580 | struct amdgpu_irq_src *irq_src = &smu->irq_source; |
5e6d2665 KW |
1581 | int ret = 0; |
1582 | ||
be80b431 | 1583 | irq_src->num_types = 1; |
5e6d2665 KW |
1584 | irq_src->funcs = &smu_v11_0_irq_funcs; |
1585 | ||
1586 | ret = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_THM, | |
1587 | THM_11_0__SRCID__THM_DIG_THERM_L2H, | |
1588 | irq_src); | |
1589 | if (ret) | |
1590 | return ret; | |
1591 | ||
1592 | ret = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_THM, | |
1593 | THM_11_0__SRCID__THM_DIG_THERM_H2L, | |
1594 | irq_src); | |
1595 | if (ret) | |
1596 | return ret; | |
1597 | ||
e528ccf9 EQ |
1598 | /* Register CTF(GPIO_19) interrupt */ |
1599 | ret = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_ROM_SMUIO, | |
1600 | SMUIO_11_0__SRCID__SMUIO_GPIO19, | |
1601 | irq_src); | |
1602 | if (ret) | |
1603 | return ret; | |
1604 | ||
e1188aac AD |
1605 | ret = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_MP1, |
1606 | 0xfe, | |
1607 | irq_src); | |
1608 | if (ret) | |
1609 | return ret; | |
1610 | ||
5e6d2665 KW |
1611 | return ret; |
1612 | } | |
1613 | ||
6c45e480 | 1614 | int smu_v11_0_get_max_sustainable_clocks_by_dc(struct smu_context *smu, |
a18bf0ca | 1615 | struct pp_smu_nv_clock_table *max_clocks) |
1616 | { | |
1617 | struct smu_table_context *table_context = &smu->smu_table; | |
1618 | struct smu_11_0_max_sustainable_clocks *sustainable_clocks = NULL; | |
1619 | ||
1620 | if (!max_clocks || !table_context->max_sustainable_clocks) | |
1621 | return -EINVAL; | |
1622 | ||
1623 | sustainable_clocks = table_context->max_sustainable_clocks; | |
1624 | ||
1625 | max_clocks->dcfClockInKhz = | |
1626 | (unsigned int) sustainable_clocks->dcef_clock * 1000; | |
1627 | max_clocks->displayClockInKhz = | |
1628 | (unsigned int) sustainable_clocks->display_clock * 1000; | |
1629 | max_clocks->phyClockInKhz = | |
1630 | (unsigned int) sustainable_clocks->phy_clock * 1000; | |
1631 | max_clocks->pixelClockInKhz = | |
1632 | (unsigned int) sustainable_clocks->pixel_clock * 1000; | |
1633 | max_clocks->uClockInKhz = | |
1634 | (unsigned int) sustainable_clocks->uclock * 1000; | |
1635 | max_clocks->socClockInKhz = | |
1636 | (unsigned int) sustainable_clocks->soc_clock * 1000; | |
1637 | max_clocks->dscClockInKhz = 0; | |
1638 | max_clocks->dppClockInKhz = 0; | |
1639 | max_clocks->fabricClockInKhz = 0; | |
1640 | ||
1641 | return 0; | |
1642 | } | |
1643 | ||
6c45e480 | 1644 | int smu_v11_0_set_azalia_d3_pme(struct smu_context *smu) |
26e2b581 | 1645 | { |
1646 | int ret = 0; | |
1647 | ||
1c58267c | 1648 | ret = smu_send_smc_msg(smu, SMU_MSG_BacoAudioD3PME, NULL); |
26e2b581 | 1649 | |
1650 | return ret; | |
1651 | } | |
1652 | ||
767acabd KW |
1653 | static int smu_v11_0_baco_set_armd3_sequence(struct smu_context *smu, enum smu_v11_0_baco_seq baco_seq) |
1654 | { | |
1c58267c | 1655 | return smu_send_smc_msg_with_param(smu, SMU_MSG_ArmD3, baco_seq, NULL); |
767acabd KW |
1656 | } |
1657 | ||
6c45e480 | 1658 | bool smu_v11_0_baco_is_support(struct smu_context *smu) |
767acabd | 1659 | { |
767acabd | 1660 | struct smu_baco_context *smu_baco = &smu->smu_baco; |
767acabd KW |
1661 | bool baco_support; |
1662 | ||
1663 | mutex_lock(&smu_baco->mutex); | |
1664 | baco_support = smu_baco->platform_support; | |
1665 | mutex_unlock(&smu_baco->mutex); | |
1666 | ||
1667 | if (!baco_support) | |
1668 | return false; | |
1669 | ||
0a650c1d EQ |
1670 | /* Arcturus does not support this bit mask */ |
1671 | if (smu_feature_is_supported(smu, SMU_FEATURE_BACO_BIT) && | |
1672 | !smu_feature_is_enabled(smu, SMU_FEATURE_BACO_BIT)) | |
767acabd KW |
1673 | return false; |
1674 | ||
49e78c82 | 1675 | return true; |
767acabd KW |
1676 | } |
1677 | ||
6c45e480 | 1678 | enum smu_baco_state smu_v11_0_baco_get_state(struct smu_context *smu) |
767acabd KW |
1679 | { |
1680 | struct smu_baco_context *smu_baco = &smu->smu_baco; | |
a13362c1 | 1681 | enum smu_baco_state baco_state; |
767acabd KW |
1682 | |
1683 | mutex_lock(&smu_baco->mutex); | |
1684 | baco_state = smu_baco->state; | |
1685 | mutex_unlock(&smu_baco->mutex); | |
1686 | ||
1687 | return baco_state; | |
1688 | } | |
1689 | ||
6c45e480 | 1690 | int smu_v11_0_baco_set_state(struct smu_context *smu, enum smu_baco_state state) |
767acabd | 1691 | { |
767acabd | 1692 | struct smu_baco_context *smu_baco = &smu->smu_baco; |
b4f8285a EQ |
1693 | struct amdgpu_device *adev = smu->adev; |
1694 | struct amdgpu_ras *ras = amdgpu_ras_get_context(adev); | |
b4f8285a | 1695 | uint32_t data; |
767acabd KW |
1696 | int ret = 0; |
1697 | ||
1698 | if (smu_v11_0_baco_get_state(smu) == state) | |
1699 | return 0; | |
1700 | ||
1701 | mutex_lock(&smu_baco->mutex); | |
1702 | ||
b4f8285a | 1703 | if (state == SMU_BACO_STATE_ENTER) { |
b4f8285a EQ |
1704 | if (!ras || !ras->supported) { |
1705 | data = RREG32_SOC15(THM, 0, mmTHM_BACO_CNTL); | |
1706 | data |= 0x80000000; | |
1707 | WREG32_SOC15(THM, 0, mmTHM_BACO_CNTL, data); | |
1708 | ||
1c58267c | 1709 | ret = smu_send_smc_msg_with_param(smu, SMU_MSG_EnterBaco, 0, NULL); |
b4f8285a | 1710 | } else { |
1c58267c | 1711 | ret = smu_send_smc_msg_with_param(smu, SMU_MSG_EnterBaco, 1, NULL); |
b4f8285a EQ |
1712 | } |
1713 | } else { | |
1c58267c | 1714 | ret = smu_send_smc_msg(smu, SMU_MSG_ExitBaco, NULL); |
ae46533b EQ |
1715 | if (ret) |
1716 | goto out; | |
1717 | ||
4f7d010f EQ |
1718 | if (ras && ras->supported) { |
1719 | ret = smu_send_smc_msg(smu, SMU_MSG_PrepareMp1ForUnload, NULL); | |
1720 | if (ret) | |
1721 | goto out; | |
1722 | } | |
1723 | ||
ae46533b EQ |
1724 | /* clear vbios scratch 6 and 7 for coming asic reinit */ |
1725 | WREG32(adev->bios_scratch_reg_offset + 6, 0); | |
1726 | WREG32(adev->bios_scratch_reg_offset + 7, 0); | |
b4f8285a | 1727 | } |
767acabd KW |
1728 | if (ret) |
1729 | goto out; | |
1730 | ||
1731 | smu_baco->state = state; | |
1732 | out: | |
1733 | mutex_unlock(&smu_baco->mutex); | |
1734 | return ret; | |
1735 | } | |
1736 | ||
11520f27 | 1737 | int smu_v11_0_baco_enter(struct smu_context *smu) |
767acabd | 1738 | { |
0a650c1d | 1739 | struct amdgpu_device *adev = smu->adev; |
767acabd KW |
1740 | int ret = 0; |
1741 | ||
0a650c1d EQ |
1742 | /* Arcturus does not need this audio workaround */ |
1743 | if (adev->asic_type != CHIP_ARCTURUS) { | |
1744 | ret = smu_v11_0_baco_set_armd3_sequence(smu, BACO_SEQ_BACO); | |
1745 | if (ret) | |
1746 | return ret; | |
1747 | } | |
767acabd KW |
1748 | |
1749 | ret = smu_v11_0_baco_set_state(smu, SMU_BACO_STATE_ENTER); | |
1750 | if (ret) | |
1751 | return ret; | |
1752 | ||
1753 | msleep(10); | |
1754 | ||
11520f27 AD |
1755 | return ret; |
1756 | } | |
1757 | ||
1758 | int smu_v11_0_baco_exit(struct smu_context *smu) | |
1759 | { | |
1760 | int ret = 0; | |
1761 | ||
767acabd KW |
1762 | ret = smu_v11_0_baco_set_state(smu, SMU_BACO_STATE_EXIT); |
1763 | if (ret) | |
1764 | return ret; | |
1765 | ||
1766 | return ret; | |
1767 | } | |
1768 | ||
6c45e480 | 1769 | int smu_v11_0_get_dpm_ultimate_freq(struct smu_context *smu, enum smu_clk_type clk_type, |
eee3258e PL |
1770 | uint32_t *min, uint32_t *max) |
1771 | { | |
1772 | int ret = 0, clk_id = 0; | |
1773 | uint32_t param = 0; | |
1774 | ||
eee3258e PL |
1775 | clk_id = smu_clk_get_index(smu, clk_type); |
1776 | if (clk_id < 0) { | |
1777 | ret = -EINVAL; | |
1778 | goto failed; | |
1779 | } | |
1780 | param = (clk_id & 0xffff) << 16; | |
1781 | ||
1782 | if (max) { | |
1c58267c | 1783 | ret = smu_send_smc_msg_with_param(smu, SMU_MSG_GetMaxDpmFreq, param, max); |
eee3258e PL |
1784 | if (ret) |
1785 | goto failed; | |
1786 | } | |
1787 | ||
1788 | if (min) { | |
1c58267c | 1789 | ret = smu_send_smc_msg_with_param(smu, SMU_MSG_GetMinDpmFreq, param, min); |
eee3258e PL |
1790 | if (ret) |
1791 | goto failed; | |
1792 | } | |
1793 | ||
1794 | failed: | |
eee3258e PL |
1795 | return ret; |
1796 | } | |
1797 | ||
6c45e480 | 1798 | int smu_v11_0_set_soft_freq_limited_range(struct smu_context *smu, enum smu_clk_type clk_type, |
4045f36f PL |
1799 | uint32_t min, uint32_t max) |
1800 | { | |
1801 | int ret = 0, clk_id = 0; | |
1802 | uint32_t param; | |
1803 | ||
1804 | clk_id = smu_clk_get_index(smu, clk_type); | |
1805 | if (clk_id < 0) | |
1806 | return clk_id; | |
1807 | ||
1808 | if (max > 0) { | |
1809 | param = (uint32_t)((clk_id << 16) | (max & 0xffff)); | |
1810 | ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxByFreq, | |
1c58267c | 1811 | param, NULL); |
4045f36f PL |
1812 | if (ret) |
1813 | return ret; | |
1814 | } | |
1815 | ||
1816 | if (min > 0) { | |
1817 | param = (uint32_t)((clk_id << 16) | (min & 0xffff)); | |
1818 | ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMinByFreq, | |
1c58267c | 1819 | param, NULL); |
4045f36f PL |
1820 | if (ret) |
1821 | return ret; | |
1822 | } | |
1823 | ||
1824 | return ret; | |
1825 | } | |
1826 | ||
6c45e480 | 1827 | int smu_v11_0_override_pcie_parameters(struct smu_context *smu) |
372120f0 KF |
1828 | { |
1829 | struct amdgpu_device *adev = smu->adev; | |
1830 | uint32_t pcie_gen = 0, pcie_width = 0; | |
1831 | int ret; | |
1832 | ||
1833 | if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN4) | |
1834 | pcie_gen = 3; | |
1835 | else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3) | |
1836 | pcie_gen = 2; | |
1837 | else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2) | |
1838 | pcie_gen = 1; | |
1839 | else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1) | |
1840 | pcie_gen = 0; | |
1841 | ||
1842 | /* Bit 31:16: LCLK DPM level. 0 is DPM0, and 1 is DPM1 | |
1843 | * Bit 15:8: PCIE GEN, 0 to 3 corresponds to GEN1 to GEN4 | |
1844 | * Bit 7:0: PCIE lane width, 1 to 7 corresponds is x1 to x32 | |
1845 | */ | |
1846 | if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X16) | |
1847 | pcie_width = 6; | |
1848 | else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X12) | |
1849 | pcie_width = 5; | |
1850 | else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X8) | |
1851 | pcie_width = 4; | |
1852 | else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X4) | |
1853 | pcie_width = 3; | |
1854 | else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X2) | |
1855 | pcie_width = 2; | |
1856 | else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X1) | |
1857 | pcie_width = 1; | |
1858 | ||
1859 | ret = smu_update_pcie_parameters(smu, pcie_gen, pcie_width); | |
1860 | ||
1861 | if (ret) | |
d9811cfc | 1862 | dev_err(adev->dev, "[%s] Attempt to override pcie params failed!\n", __func__); |
372120f0 KF |
1863 | |
1864 | return ret; | |
1865 | ||
1866 | } | |
21677d08 | 1867 | |
337443d0 AD |
1868 | int smu_v11_0_set_performance_level(struct smu_context *smu, |
1869 | enum amd_dpm_forced_level level) | |
1870 | { | |
1871 | int ret = 0; | |
1872 | uint32_t sclk_mask, mclk_mask, soc_mask; | |
1873 | ||
1874 | switch (level) { | |
1875 | case AMD_DPM_FORCED_LEVEL_HIGH: | |
1876 | ret = smu_force_dpm_limit_value(smu, true); | |
1877 | break; | |
1878 | case AMD_DPM_FORCED_LEVEL_LOW: | |
1879 | ret = smu_force_dpm_limit_value(smu, false); | |
1880 | break; | |
1881 | case AMD_DPM_FORCED_LEVEL_AUTO: | |
1882 | case AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD: | |
1883 | ret = smu_unforce_dpm_levels(smu); | |
1884 | break; | |
1885 | case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK: | |
1886 | case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK: | |
1887 | case AMD_DPM_FORCED_LEVEL_PROFILE_PEAK: | |
1888 | ret = smu_get_profiling_clk_mask(smu, level, | |
1889 | &sclk_mask, | |
1890 | &mclk_mask, | |
1891 | &soc_mask); | |
1892 | if (ret) | |
1893 | return ret; | |
1894 | smu_force_clk_levels(smu, SMU_SCLK, 1 << sclk_mask, false); | |
1895 | smu_force_clk_levels(smu, SMU_MCLK, 1 << mclk_mask, false); | |
1896 | smu_force_clk_levels(smu, SMU_SOCCLK, 1 << soc_mask, false); | |
1897 | break; | |
1898 | case AMD_DPM_FORCED_LEVEL_MANUAL: | |
1899 | case AMD_DPM_FORCED_LEVEL_PROFILE_EXIT: | |
1900 | default: | |
1901 | break; | |
1902 | } | |
1903 | return ret; | |
1904 | } | |
1905 | ||
f8c83215 AD |
1906 | int smu_v11_0_set_power_source(struct smu_context *smu, |
1907 | enum smu_power_src_type power_src) | |
1908 | { | |
1909 | int pwr_source; | |
1910 | ||
1911 | pwr_source = smu_power_get_index(smu, (uint32_t)power_src); | |
1912 | if (pwr_source < 0) | |
1913 | return -EINVAL; | |
1914 | ||
1915 | return smu_send_smc_msg_with_param(smu, | |
1916 | SMU_MSG_NotifyPowerSource, | |
1917 | pwr_source, | |
1918 | NULL); | |
1919 | } | |
1920 |