Commit | Line | Data |
---|---|---|
c05d1c40 KW |
1 | /* |
2 | * Copyright 2020 Advanced Micro Devices, Inc. | |
3 | * | |
4 | * Permission is hereby granted, free of charge, to any person obtaining a | |
5 | * copy of this software and associated documentation files (the "Software"), | |
6 | * to deal in the Software without restriction, including without limitation | |
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | |
8 | * and/or sell copies of the Software, and to permit persons to whom the | |
9 | * Software is furnished to do so, subject to the following conditions: | |
10 | * | |
11 | * The above copyright notice and this permission notice shall be included in | |
12 | * all copies or substantial portions of the Software. | |
13 | * | |
14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |
15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |
16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | |
17 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR | |
18 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | |
19 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | |
20 | * OTHER DEALINGS IN THE SOFTWARE. | |
21 | */ | |
22 | ||
23 | #include <linux/firmware.h> | |
24 | #include <linux/module.h> | |
25 | #include <linux/pci.h> | |
26 | #include <linux/reboot.h> | |
27 | ||
28 | #define SMU_13_0_PARTIAL_PPTABLE | |
29 | #define SWSMU_CODE_LAYER_L3 | |
30 | ||
31 | #include "amdgpu.h" | |
32 | #include "amdgpu_smu.h" | |
33 | #include "atomfirmware.h" | |
34 | #include "amdgpu_atomfirmware.h" | |
35 | #include "amdgpu_atombios.h" | |
36 | #include "smu_v13_0.h" | |
37 | #include "soc15_common.h" | |
38 | #include "atom.h" | |
39 | #include "amdgpu_ras.h" | |
40 | #include "smu_cmn.h" | |
41 | ||
42 | #include "asic_reg/thm/thm_13_0_2_offset.h" | |
43 | #include "asic_reg/thm/thm_13_0_2_sh_mask.h" | |
44 | #include "asic_reg/mp/mp_13_0_2_offset.h" | |
45 | #include "asic_reg/mp/mp_13_0_2_sh_mask.h" | |
46 | #include "asic_reg/smuio/smuio_13_0_2_offset.h" | |
47 | #include "asic_reg/smuio/smuio_13_0_2_sh_mask.h" | |
48 | ||
49 | /* | |
50 | * DO NOT use these for err/warn/info/debug messages. | |
51 | * Use dev_err, dev_warn, dev_info and dev_dbg instead. | |
52 | * They are more MGPU friendly. | |
53 | */ | |
54 | #undef pr_err | |
55 | #undef pr_warn | |
56 | #undef pr_info | |
57 | #undef pr_debug | |
58 | ||
59 | MODULE_FIRMWARE("amdgpu/aldebaran_smc.bin"); | |
60 | ||
61 | #define SMU13_VOLTAGE_SCALE 4 | |
62 | ||
63 | #define SMU13_MODE1_RESET_WAIT_TIME_IN_MS 500 //500ms | |
64 | ||
65 | #define LINK_WIDTH_MAX 6 | |
66 | #define LINK_SPEED_MAX 3 | |
67 | ||
68 | #define smnPCIE_LC_LINK_WIDTH_CNTL 0x11140288 | |
69 | #define PCIE_LC_LINK_WIDTH_CNTL__LC_LINK_WIDTH_RD_MASK 0x00000070L | |
70 | #define PCIE_LC_LINK_WIDTH_CNTL__LC_LINK_WIDTH_RD__SHIFT 0x4 | |
71 | #define smnPCIE_LC_SPEED_CNTL 0x11140290 | |
72 | #define PCIE_LC_SPEED_CNTL__LC_CURRENT_DATA_RATE_MASK 0xC000 | |
73 | #define PCIE_LC_SPEED_CNTL__LC_CURRENT_DATA_RATE__SHIFT 0xE | |
74 | ||
75 | static int link_width[] = {0, 1, 2, 4, 8, 12, 16}; | |
76 | static int link_speed[] = {25, 50, 80, 160}; | |
77 | ||
78 | int smu_v13_0_init_microcode(struct smu_context *smu) | |
79 | { | |
80 | struct amdgpu_device *adev = smu->adev; | |
81 | const char *chip_name; | |
82 | char fw_name[30]; | |
83 | int err = 0; | |
84 | const struct smc_firmware_header_v1_0 *hdr; | |
85 | const struct common_firmware_header *header; | |
86 | struct amdgpu_firmware_info *ucode = NULL; | |
87 | ||
88 | switch (adev->asic_type) { | |
89 | case CHIP_ALDEBARAN: | |
90 | chip_name = "aldebaran"; | |
91 | break; | |
92 | default: | |
93 | dev_err(adev->dev, "Unsupported ASIC type %d\n", adev->asic_type); | |
94 | return -EINVAL; | |
95 | } | |
96 | ||
97 | snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_smc.bin", chip_name); | |
98 | ||
99 | err = request_firmware(&adev->pm.fw, fw_name, adev->dev); | |
100 | if (err) | |
101 | goto out; | |
102 | err = amdgpu_ucode_validate(adev->pm.fw); | |
103 | if (err) | |
104 | goto out; | |
105 | ||
106 | hdr = (const struct smc_firmware_header_v1_0 *) adev->pm.fw->data; | |
107 | amdgpu_ucode_print_smc_hdr(&hdr->header); | |
108 | adev->pm.fw_version = le32_to_cpu(hdr->header.ucode_version); | |
109 | ||
110 | if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) { | |
111 | ucode = &adev->firmware.ucode[AMDGPU_UCODE_ID_SMC]; | |
112 | ucode->ucode_id = AMDGPU_UCODE_ID_SMC; | |
113 | ucode->fw = adev->pm.fw; | |
114 | header = (const struct common_firmware_header *)ucode->fw->data; | |
115 | adev->firmware.fw_size += | |
116 | ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE); | |
117 | } | |
118 | ||
119 | out: | |
120 | if (err) { | |
121 | DRM_ERROR("smu_v13_0: Failed to load firmware \"%s\"\n", | |
122 | fw_name); | |
123 | release_firmware(adev->pm.fw); | |
124 | adev->pm.fw = NULL; | |
125 | } | |
126 | return err; | |
127 | } | |
128 | ||
129 | void smu_v13_0_fini_microcode(struct smu_context *smu) | |
130 | { | |
131 | struct amdgpu_device *adev = smu->adev; | |
132 | ||
133 | release_firmware(adev->pm.fw); | |
134 | adev->pm.fw = NULL; | |
135 | adev->pm.fw_version = 0; | |
136 | } | |
137 | ||
138 | int smu_v13_0_load_microcode(struct smu_context *smu) | |
139 | { | |
140 | #if 0 | |
141 | struct amdgpu_device *adev = smu->adev; | |
142 | const uint32_t *src; | |
143 | const struct smc_firmware_header_v1_0 *hdr; | |
144 | uint32_t addr_start = MP1_SRAM; | |
145 | uint32_t i; | |
146 | uint32_t smc_fw_size; | |
147 | uint32_t mp1_fw_flags; | |
148 | ||
149 | hdr = (const struct smc_firmware_header_v1_0 *) adev->pm.fw->data; | |
150 | src = (const uint32_t *)(adev->pm.fw->data + | |
151 | le32_to_cpu(hdr->header.ucode_array_offset_bytes)); | |
152 | smc_fw_size = hdr->header.ucode_size_bytes; | |
153 | ||
154 | for (i = 1; i < smc_fw_size/4 - 1; i++) { | |
155 | WREG32_PCIE(addr_start, src[i]); | |
156 | addr_start += 4; | |
157 | } | |
158 | ||
159 | WREG32_PCIE(MP1_Public | (smnMP1_PUB_CTRL & 0xffffffff), | |
160 | 1 & MP1_SMN_PUB_CTRL__RESET_MASK); | |
161 | WREG32_PCIE(MP1_Public | (smnMP1_PUB_CTRL & 0xffffffff), | |
162 | 1 & ~MP1_SMN_PUB_CTRL__RESET_MASK); | |
163 | ||
164 | for (i = 0; i < adev->usec_timeout; i++) { | |
165 | mp1_fw_flags = RREG32_PCIE(MP1_Public | | |
166 | (smnMP1_FIRMWARE_FLAGS & 0xffffffff)); | |
167 | if ((mp1_fw_flags & MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED_MASK) >> | |
168 | MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED__SHIFT) | |
169 | break; | |
170 | udelay(1); | |
171 | } | |
172 | ||
173 | if (i == adev->usec_timeout) | |
174 | return -ETIME; | |
175 | #endif | |
176 | return 0; | |
177 | } | |
178 | ||
179 | int smu_v13_0_check_fw_status(struct smu_context *smu) | |
180 | { | |
181 | struct amdgpu_device *adev = smu->adev; | |
182 | uint32_t mp1_fw_flags; | |
183 | ||
184 | mp1_fw_flags = RREG32_PCIE(MP1_Public | | |
185 | (smnMP1_FIRMWARE_FLAGS & 0xffffffff)); | |
186 | ||
187 | if ((mp1_fw_flags & MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED_MASK) >> | |
188 | MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED__SHIFT) | |
189 | return 0; | |
190 | ||
191 | return -EIO; | |
192 | } | |
193 | ||
194 | int smu_v13_0_check_fw_version(struct smu_context *smu) | |
195 | { | |
196 | uint32_t if_version = 0xff, smu_version = 0xff; | |
197 | uint16_t smu_major; | |
198 | uint8_t smu_minor, smu_debug; | |
199 | int ret = 0; | |
200 | ||
201 | ret = smu_cmn_get_smc_version(smu, &if_version, &smu_version); | |
202 | if (ret) | |
203 | return ret; | |
204 | ||
205 | smu_major = (smu_version >> 16) & 0xffff; | |
206 | smu_minor = (smu_version >> 8) & 0xff; | |
207 | smu_debug = (smu_version >> 0) & 0xff; | |
208 | ||
209 | switch (smu->adev->asic_type) { | |
210 | case CHIP_ALDEBARAN: | |
211 | smu->smc_driver_if_version = SMU13_DRIVER_IF_VERSION_ALDE; | |
212 | break; | |
213 | default: | |
214 | dev_err(smu->adev->dev, "smu unsupported asic type:%d.\n", smu->adev->asic_type); | |
215 | smu->smc_driver_if_version = SMU13_DRIVER_IF_VERSION_INV; | |
216 | break; | |
217 | } | |
218 | ||
219 | /* | |
220 | * 1. if_version mismatch is not critical as our fw is designed | |
221 | * to be backward compatible. | |
222 | * 2. New fw usually brings some optimizations. But that's visible | |
223 | * only on the paired driver. | |
224 | * Considering above, we just leave user a warning message instead | |
225 | * of halt driver loading. | |
226 | */ | |
227 | if (if_version != smu->smc_driver_if_version) { | |
228 | dev_info(smu->adev->dev, "smu driver if version = 0x%08x, smu fw if version = 0x%08x, " | |
229 | "smu fw version = 0x%08x (%d.%d.%d)\n", | |
230 | smu->smc_driver_if_version, if_version, | |
231 | smu_version, smu_major, smu_minor, smu_debug); | |
232 | dev_warn(smu->adev->dev, "SMU driver if version not matched\n"); | |
233 | } | |
234 | ||
235 | return ret; | |
236 | } | |
237 | ||
238 | static int smu_v13_0_set_pptable_v2_1(struct smu_context *smu, void **table, | |
239 | uint32_t *size, uint32_t pptable_id) | |
240 | { | |
241 | struct amdgpu_device *adev = smu->adev; | |
242 | const struct smc_firmware_header_v2_1 *v2_1; | |
243 | struct smc_soft_pptable_entry *entries; | |
244 | uint32_t pptable_count = 0; | |
245 | int i = 0; | |
246 | ||
247 | v2_1 = (const struct smc_firmware_header_v2_1 *) adev->pm.fw->data; | |
248 | entries = (struct smc_soft_pptable_entry *) | |
249 | ((uint8_t *)v2_1 + le32_to_cpu(v2_1->pptable_entry_offset)); | |
250 | pptable_count = le32_to_cpu(v2_1->pptable_count); | |
251 | for (i = 0; i < pptable_count; i++) { | |
252 | if (le32_to_cpu(entries[i].id) == pptable_id) { | |
253 | *table = ((uint8_t *)v2_1 + le32_to_cpu(entries[i].ppt_offset_bytes)); | |
254 | *size = le32_to_cpu(entries[i].ppt_size_bytes); | |
255 | break; | |
256 | } | |
257 | } | |
258 | ||
259 | if (i == pptable_count) | |
260 | return -EINVAL; | |
261 | ||
262 | return 0; | |
263 | } | |
264 | ||
265 | int smu_v13_0_setup_pptable(struct smu_context *smu) | |
266 | { | |
267 | struct amdgpu_device *adev = smu->adev; | |
268 | const struct smc_firmware_header_v1_0 *hdr; | |
269 | int ret, index; | |
270 | uint32_t size = 0; | |
271 | uint16_t atom_table_size; | |
272 | uint8_t frev, crev; | |
273 | void *table; | |
274 | uint16_t version_major, version_minor; | |
275 | ||
276 | /* temporarily hardcode */ | |
277 | smu->smu_table.boot_values.pp_table_id = 3000; | |
278 | ||
279 | hdr = (const struct smc_firmware_header_v1_0 *) adev->pm.fw->data; | |
280 | version_major = le16_to_cpu(hdr->header.header_version_major); | |
281 | version_minor = le16_to_cpu(hdr->header.header_version_minor); | |
282 | if (version_major == 2 && smu->smu_table.boot_values.pp_table_id > 0) { | |
283 | dev_info(adev->dev, "use driver provided pptable %d\n", smu->smu_table.boot_values.pp_table_id); | |
284 | switch (version_minor) { | |
285 | case 1: | |
286 | ret = smu_v13_0_set_pptable_v2_1(smu, &table, &size, | |
287 | smu->smu_table.boot_values.pp_table_id); | |
288 | break; | |
289 | default: | |
290 | ret = -EINVAL; | |
291 | break; | |
292 | } | |
293 | if (ret) | |
294 | return ret; | |
295 | ||
296 | } else { | |
297 | dev_info(adev->dev, "use vbios provided pptable\n"); | |
298 | index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1, | |
299 | powerplayinfo); | |
300 | ||
301 | ret = amdgpu_atombios_get_data_table(adev, index, &atom_table_size, &frev, &crev, | |
302 | (uint8_t **)&table); | |
303 | if (ret) | |
304 | return ret; | |
305 | size = atom_table_size; | |
306 | } | |
307 | ||
308 | if (!smu->smu_table.power_play_table) | |
309 | smu->smu_table.power_play_table = table; | |
310 | if (!smu->smu_table.power_play_table_size) | |
311 | smu->smu_table.power_play_table_size = size; | |
312 | ||
313 | return 0; | |
314 | } | |
315 | ||
316 | int smu_v13_0_init_smc_tables(struct smu_context *smu) | |
317 | { | |
318 | struct smu_table_context *smu_table = &smu->smu_table; | |
319 | struct smu_table *tables = smu_table->tables; | |
320 | int ret = 0; | |
321 | ||
322 | smu_table->driver_pptable = | |
323 | kzalloc(tables[SMU_TABLE_PPTABLE].size, GFP_KERNEL); | |
324 | if (!smu_table->driver_pptable) { | |
325 | ret = -ENOMEM; | |
326 | goto err0_out; | |
327 | } | |
328 | ||
329 | smu_table->max_sustainable_clocks = | |
330 | kzalloc(sizeof(struct smu_13_0_max_sustainable_clocks), GFP_KERNEL); | |
331 | if (!smu_table->max_sustainable_clocks) { | |
332 | ret = -ENOMEM; | |
333 | goto err1_out; | |
334 | } | |
335 | ||
336 | /* Aldebaran does not support OVERDRIVE */ | |
337 | if (tables[SMU_TABLE_OVERDRIVE].size) { | |
338 | smu_table->overdrive_table = | |
339 | kzalloc(tables[SMU_TABLE_OVERDRIVE].size, GFP_KERNEL); | |
340 | if (!smu_table->overdrive_table) { | |
341 | ret = -ENOMEM; | |
342 | goto err2_out; | |
343 | } | |
344 | ||
345 | smu_table->boot_overdrive_table = | |
346 | kzalloc(tables[SMU_TABLE_OVERDRIVE].size, GFP_KERNEL); | |
347 | if (!smu_table->boot_overdrive_table) { | |
348 | ret = -ENOMEM; | |
349 | goto err3_out; | |
350 | } | |
351 | } | |
352 | ||
353 | return 0; | |
354 | ||
355 | err3_out: | |
356 | kfree(smu_table->overdrive_table); | |
357 | err2_out: | |
358 | kfree(smu_table->max_sustainable_clocks); | |
359 | err1_out: | |
360 | kfree(smu_table->driver_pptable); | |
361 | err0_out: | |
362 | return ret; | |
363 | } | |
364 | ||
365 | int smu_v13_0_fini_smc_tables(struct smu_context *smu) | |
366 | { | |
367 | struct smu_table_context *smu_table = &smu->smu_table; | |
368 | struct smu_dpm_context *smu_dpm = &smu->smu_dpm; | |
369 | ||
370 | kfree(smu_table->gpu_metrics_table); | |
371 | kfree(smu_table->boot_overdrive_table); | |
372 | kfree(smu_table->overdrive_table); | |
373 | kfree(smu_table->max_sustainable_clocks); | |
374 | kfree(smu_table->driver_pptable); | |
375 | smu_table->gpu_metrics_table = NULL; | |
376 | smu_table->boot_overdrive_table = NULL; | |
377 | smu_table->overdrive_table = NULL; | |
378 | smu_table->max_sustainable_clocks = NULL; | |
379 | smu_table->driver_pptable = NULL; | |
380 | kfree(smu_table->hardcode_pptable); | |
381 | smu_table->hardcode_pptable = NULL; | |
382 | ||
383 | kfree(smu_table->metrics_table); | |
384 | kfree(smu_table->watermarks_table); | |
385 | smu_table->metrics_table = NULL; | |
386 | smu_table->watermarks_table = NULL; | |
387 | smu_table->metrics_time = 0; | |
388 | ||
389 | kfree(smu_dpm->dpm_context); | |
390 | kfree(smu_dpm->golden_dpm_context); | |
391 | kfree(smu_dpm->dpm_current_power_state); | |
392 | kfree(smu_dpm->dpm_request_power_state); | |
393 | smu_dpm->dpm_context = NULL; | |
394 | smu_dpm->golden_dpm_context = NULL; | |
395 | smu_dpm->dpm_context_size = 0; | |
396 | smu_dpm->dpm_current_power_state = NULL; | |
397 | smu_dpm->dpm_request_power_state = NULL; | |
398 | ||
399 | return 0; | |
400 | } | |
401 | ||
402 | int smu_v13_0_init_power(struct smu_context *smu) | |
403 | { | |
404 | struct smu_power_context *smu_power = &smu->smu_power; | |
405 | ||
406 | if (smu_power->power_context || smu_power->power_context_size != 0) | |
407 | return -EINVAL; | |
408 | ||
409 | smu_power->power_context = kzalloc(sizeof(struct smu_13_0_dpm_context), | |
410 | GFP_KERNEL); | |
411 | if (!smu_power->power_context) | |
412 | return -ENOMEM; | |
413 | smu_power->power_context_size = sizeof(struct smu_13_0_dpm_context); | |
414 | ||
415 | return 0; | |
416 | } | |
417 | ||
418 | int smu_v13_0_fini_power(struct smu_context *smu) | |
419 | { | |
420 | struct smu_power_context *smu_power = &smu->smu_power; | |
421 | ||
422 | if (!smu_power->power_context || smu_power->power_context_size == 0) | |
423 | return -EINVAL; | |
424 | ||
425 | kfree(smu_power->power_context); | |
426 | smu_power->power_context = NULL; | |
427 | smu_power->power_context_size = 0; | |
428 | ||
429 | return 0; | |
430 | } | |
431 | ||
432 | static int smu_v13_0_atom_get_smu_clockinfo(struct amdgpu_device *adev, | |
433 | uint8_t clk_id, | |
434 | uint8_t syspll_id, | |
435 | uint32_t *clk_freq) | |
436 | { | |
437 | struct atom_get_smu_clock_info_parameters_v3_1 input = {0}; | |
438 | struct atom_get_smu_clock_info_output_parameters_v3_1 *output; | |
439 | int ret, index; | |
440 | ||
441 | input.clk_id = clk_id; | |
442 | input.syspll_id = syspll_id; | |
443 | input.command = GET_SMU_CLOCK_INFO_V3_1_GET_CLOCK_FREQ; | |
444 | index = get_index_into_master_table(atom_master_list_of_command_functions_v2_1, | |
445 | getsmuclockinfo); | |
446 | ||
447 | ret = amdgpu_atom_execute_table(adev->mode_info.atom_context, index, | |
448 | (uint32_t *)&input); | |
449 | if (ret) | |
450 | return -EINVAL; | |
451 | ||
452 | output = (struct atom_get_smu_clock_info_output_parameters_v3_1 *)&input; | |
453 | *clk_freq = le32_to_cpu(output->atom_smu_outputclkfreq.smu_clock_freq_hz) / 10000; | |
454 | ||
455 | return 0; | |
456 | } | |
457 | ||
458 | int smu_v13_0_get_vbios_bootup_values(struct smu_context *smu) | |
459 | { | |
460 | int ret, index; | |
461 | uint16_t size; | |
462 | uint8_t frev, crev; | |
463 | struct atom_common_table_header *header; | |
3d01361c | 464 | struct atom_firmware_info_v3_4 *v_3_4; |
c05d1c40 KW |
465 | struct atom_firmware_info_v3_3 *v_3_3; |
466 | struct atom_firmware_info_v3_1 *v_3_1; | |
467 | ||
468 | index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1, | |
469 | firmwareinfo); | |
470 | ||
471 | ret = amdgpu_atombios_get_data_table(smu->adev, index, &size, &frev, &crev, | |
472 | (uint8_t **)&header); | |
473 | if (ret) | |
474 | return ret; | |
475 | ||
476 | if (header->format_revision != 3) { | |
477 | dev_err(smu->adev->dev, "unknown atom_firmware_info version! for smu13\n"); | |
478 | return -EINVAL; | |
479 | } | |
480 | ||
481 | switch (header->content_revision) { | |
482 | case 0: | |
483 | case 1: | |
484 | case 2: | |
485 | v_3_1 = (struct atom_firmware_info_v3_1 *)header; | |
486 | smu->smu_table.boot_values.revision = v_3_1->firmware_revision; | |
487 | smu->smu_table.boot_values.gfxclk = v_3_1->bootup_sclk_in10khz; | |
488 | smu->smu_table.boot_values.uclk = v_3_1->bootup_mclk_in10khz; | |
489 | smu->smu_table.boot_values.socclk = 0; | |
490 | smu->smu_table.boot_values.dcefclk = 0; | |
491 | smu->smu_table.boot_values.vddc = v_3_1->bootup_vddc_mv; | |
492 | smu->smu_table.boot_values.vddci = v_3_1->bootup_vddci_mv; | |
493 | smu->smu_table.boot_values.mvddc = v_3_1->bootup_mvddc_mv; | |
494 | smu->smu_table.boot_values.vdd_gfx = v_3_1->bootup_vddgfx_mv; | |
495 | smu->smu_table.boot_values.cooling_id = v_3_1->coolingsolution_id; | |
496 | smu->smu_table.boot_values.pp_table_id = 0; | |
497 | break; | |
498 | case 3: | |
c05d1c40 KW |
499 | v_3_3 = (struct atom_firmware_info_v3_3 *)header; |
500 | smu->smu_table.boot_values.revision = v_3_3->firmware_revision; | |
501 | smu->smu_table.boot_values.gfxclk = v_3_3->bootup_sclk_in10khz; | |
502 | smu->smu_table.boot_values.uclk = v_3_3->bootup_mclk_in10khz; | |
503 | smu->smu_table.boot_values.socclk = 0; | |
504 | smu->smu_table.boot_values.dcefclk = 0; | |
505 | smu->smu_table.boot_values.vddc = v_3_3->bootup_vddc_mv; | |
506 | smu->smu_table.boot_values.vddci = v_3_3->bootup_vddci_mv; | |
507 | smu->smu_table.boot_values.mvddc = v_3_3->bootup_mvddc_mv; | |
508 | smu->smu_table.boot_values.vdd_gfx = v_3_3->bootup_vddgfx_mv; | |
509 | smu->smu_table.boot_values.cooling_id = v_3_3->coolingsolution_id; | |
510 | smu->smu_table.boot_values.pp_table_id = v_3_3->pplib_pptable_id; | |
e5a83213 | 511 | break; |
3d01361c FX |
512 | case 4: |
513 | default: | |
514 | v_3_4 = (struct atom_firmware_info_v3_4 *)header; | |
515 | smu->smu_table.boot_values.revision = v_3_4->firmware_revision; | |
516 | smu->smu_table.boot_values.gfxclk = v_3_4->bootup_sclk_in10khz; | |
517 | smu->smu_table.boot_values.uclk = v_3_4->bootup_mclk_in10khz; | |
518 | smu->smu_table.boot_values.socclk = 0; | |
519 | smu->smu_table.boot_values.dcefclk = 0; | |
520 | smu->smu_table.boot_values.vddc = v_3_4->bootup_vddc_mv; | |
521 | smu->smu_table.boot_values.vddci = v_3_4->bootup_vddci_mv; | |
522 | smu->smu_table.boot_values.mvddc = v_3_4->bootup_mvddc_mv; | |
523 | smu->smu_table.boot_values.vdd_gfx = v_3_4->bootup_vddgfx_mv; | |
524 | smu->smu_table.boot_values.cooling_id = v_3_4->coolingsolution_id; | |
525 | smu->smu_table.boot_values.pp_table_id = v_3_4->pplib_pptable_id; | |
e5a83213 | 526 | break; |
c05d1c40 KW |
527 | } |
528 | ||
529 | smu->smu_table.boot_values.format_revision = header->format_revision; | |
530 | smu->smu_table.boot_values.content_revision = header->content_revision; | |
531 | ||
532 | smu_v13_0_atom_get_smu_clockinfo(smu->adev, | |
533 | (uint8_t)SMU11_SYSPLL0_SOCCLK_ID, | |
534 | (uint8_t)0, | |
535 | &smu->smu_table.boot_values.socclk); | |
536 | ||
537 | smu_v13_0_atom_get_smu_clockinfo(smu->adev, | |
538 | (uint8_t)SMU11_SYSPLL0_DCEFCLK_ID, | |
539 | (uint8_t)0, | |
540 | &smu->smu_table.boot_values.dcefclk); | |
541 | ||
542 | smu_v13_0_atom_get_smu_clockinfo(smu->adev, | |
543 | (uint8_t)SMU11_SYSPLL0_ECLK_ID, | |
544 | (uint8_t)0, | |
545 | &smu->smu_table.boot_values.eclk); | |
546 | ||
547 | smu_v13_0_atom_get_smu_clockinfo(smu->adev, | |
548 | (uint8_t)SMU11_SYSPLL0_VCLK_ID, | |
549 | (uint8_t)0, | |
550 | &smu->smu_table.boot_values.vclk); | |
551 | ||
552 | smu_v13_0_atom_get_smu_clockinfo(smu->adev, | |
553 | (uint8_t)SMU11_SYSPLL0_DCLK_ID, | |
554 | (uint8_t)0, | |
555 | &smu->smu_table.boot_values.dclk); | |
556 | ||
557 | if ((smu->smu_table.boot_values.format_revision == 3) && | |
558 | (smu->smu_table.boot_values.content_revision >= 2)) | |
559 | smu_v13_0_atom_get_smu_clockinfo(smu->adev, | |
560 | (uint8_t)SMU11_SYSPLL1_0_FCLK_ID, | |
561 | (uint8_t)SMU11_SYSPLL1_2_ID, | |
562 | &smu->smu_table.boot_values.fclk); | |
563 | ||
564 | return 0; | |
565 | } | |
566 | ||
567 | int smu_v13_0_notify_memory_pool_location(struct smu_context *smu) | |
568 | { | |
569 | struct smu_table_context *smu_table = &smu->smu_table; | |
570 | struct smu_table *memory_pool = &smu_table->memory_pool; | |
571 | int ret = 0; | |
572 | uint64_t address; | |
573 | uint32_t address_low, address_high; | |
574 | ||
575 | if (memory_pool->size == 0 || memory_pool->cpu_addr == NULL) | |
576 | return ret; | |
577 | ||
578 | address = (uintptr_t)memory_pool->cpu_addr; | |
579 | address_high = (uint32_t)upper_32_bits(address); | |
580 | address_low = (uint32_t)lower_32_bits(address); | |
581 | ||
582 | ret = smu_cmn_send_smc_msg_with_param(smu, | |
583 | SMU_MSG_SetSystemVirtualDramAddrHigh, | |
584 | address_high, | |
585 | NULL); | |
586 | if (ret) | |
587 | return ret; | |
588 | ret = smu_cmn_send_smc_msg_with_param(smu, | |
589 | SMU_MSG_SetSystemVirtualDramAddrLow, | |
590 | address_low, | |
591 | NULL); | |
592 | if (ret) | |
593 | return ret; | |
594 | ||
595 | address = memory_pool->mc_address; | |
596 | address_high = (uint32_t)upper_32_bits(address); | |
597 | address_low = (uint32_t)lower_32_bits(address); | |
598 | ||
599 | ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_DramLogSetDramAddrHigh, | |
600 | address_high, NULL); | |
601 | if (ret) | |
602 | return ret; | |
603 | ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_DramLogSetDramAddrLow, | |
604 | address_low, NULL); | |
605 | if (ret) | |
606 | return ret; | |
607 | ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_DramLogSetDramSize, | |
608 | (uint32_t)memory_pool->size, NULL); | |
609 | if (ret) | |
610 | return ret; | |
611 | ||
612 | return ret; | |
613 | } | |
614 | ||
615 | int smu_v13_0_set_min_deep_sleep_dcefclk(struct smu_context *smu, uint32_t clk) | |
616 | { | |
617 | int ret; | |
618 | ||
619 | ret = smu_cmn_send_smc_msg_with_param(smu, | |
620 | SMU_MSG_SetMinDeepSleepDcefclk, clk, NULL); | |
621 | if (ret) | |
622 | dev_err(smu->adev->dev, "SMU13 attempt to set divider for DCEFCLK Failed!"); | |
623 | ||
624 | return ret; | |
625 | } | |
626 | ||
627 | int smu_v13_0_set_driver_table_location(struct smu_context *smu) | |
628 | { | |
629 | struct smu_table *driver_table = &smu->smu_table.driver_table; | |
630 | int ret = 0; | |
631 | ||
632 | if (driver_table->mc_address) { | |
633 | ret = smu_cmn_send_smc_msg_with_param(smu, | |
634 | SMU_MSG_SetDriverDramAddrHigh, | |
635 | upper_32_bits(driver_table->mc_address), | |
636 | NULL); | |
637 | if (!ret) | |
638 | ret = smu_cmn_send_smc_msg_with_param(smu, | |
639 | SMU_MSG_SetDriverDramAddrLow, | |
640 | lower_32_bits(driver_table->mc_address), | |
641 | NULL); | |
642 | } | |
643 | ||
644 | return ret; | |
645 | } | |
646 | ||
647 | int smu_v13_0_set_tool_table_location(struct smu_context *smu) | |
648 | { | |
649 | int ret = 0; | |
650 | struct smu_table *tool_table = &smu->smu_table.tables[SMU_TABLE_PMSTATUSLOG]; | |
651 | ||
652 | if (tool_table->mc_address) { | |
653 | ret = smu_cmn_send_smc_msg_with_param(smu, | |
654 | SMU_MSG_SetToolsDramAddrHigh, | |
655 | upper_32_bits(tool_table->mc_address), | |
656 | NULL); | |
657 | if (!ret) | |
658 | ret = smu_cmn_send_smc_msg_with_param(smu, | |
659 | SMU_MSG_SetToolsDramAddrLow, | |
660 | lower_32_bits(tool_table->mc_address), | |
661 | NULL); | |
662 | } | |
663 | ||
664 | return ret; | |
665 | } | |
666 | ||
667 | int smu_v13_0_init_display_count(struct smu_context *smu, uint32_t count) | |
668 | { | |
669 | int ret = 0; | |
670 | ||
671 | if (!smu->pm_enabled) | |
672 | return ret; | |
673 | ||
674 | ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_NumOfDisplays, count, NULL); | |
675 | ||
676 | return ret; | |
677 | } | |
678 | ||
679 | ||
680 | int smu_v13_0_set_allowed_mask(struct smu_context *smu) | |
681 | { | |
682 | struct smu_feature *feature = &smu->smu_feature; | |
683 | int ret = 0; | |
684 | uint32_t feature_mask[2]; | |
685 | ||
686 | mutex_lock(&feature->mutex); | |
687 | if (bitmap_empty(feature->allowed, SMU_FEATURE_MAX) || feature->feature_num < 64) | |
688 | goto failed; | |
689 | ||
690 | bitmap_copy((unsigned long *)feature_mask, feature->allowed, 64); | |
691 | ||
692 | ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetAllowedFeaturesMaskHigh, | |
693 | feature_mask[1], NULL); | |
694 | if (ret) | |
695 | goto failed; | |
696 | ||
697 | ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetAllowedFeaturesMaskLow, | |
698 | feature_mask[0], NULL); | |
699 | if (ret) | |
700 | goto failed; | |
701 | ||
702 | failed: | |
703 | mutex_unlock(&feature->mutex); | |
704 | return ret; | |
705 | } | |
706 | ||
707 | int smu_v13_0_system_features_control(struct smu_context *smu, | |
708 | bool en) | |
709 | { | |
710 | struct smu_feature *feature = &smu->smu_feature; | |
711 | uint32_t feature_mask[2]; | |
712 | int ret = 0; | |
713 | ||
714 | ret = smu_cmn_send_smc_msg(smu, (en ? SMU_MSG_EnableAllSmuFeatures : | |
715 | SMU_MSG_DisableAllSmuFeatures), NULL); | |
716 | if (ret) | |
717 | return ret; | |
718 | ||
719 | bitmap_zero(feature->enabled, feature->feature_num); | |
720 | bitmap_zero(feature->supported, feature->feature_num); | |
721 | ||
722 | if (en) { | |
723 | ret = smu_cmn_get_enabled_mask(smu, feature_mask, 2); | |
724 | if (ret) | |
725 | return ret; | |
726 | ||
727 | bitmap_copy(feature->enabled, (unsigned long *)&feature_mask, | |
728 | feature->feature_num); | |
729 | bitmap_copy(feature->supported, (unsigned long *)&feature_mask, | |
730 | feature->feature_num); | |
731 | } | |
732 | ||
733 | return ret; | |
734 | } | |
735 | ||
736 | int smu_v13_0_notify_display_change(struct smu_context *smu) | |
737 | { | |
738 | int ret = 0; | |
739 | ||
740 | if (!smu->pm_enabled) | |
741 | return ret; | |
742 | ||
743 | if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_UCLK_BIT) && | |
744 | smu->adev->gmc.vram_type == AMDGPU_VRAM_TYPE_HBM) | |
745 | ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetUclkFastSwitch, 1, NULL); | |
746 | ||
747 | return ret; | |
748 | } | |
749 | ||
750 | static int | |
751 | smu_v13_0_get_max_sustainable_clock(struct smu_context *smu, uint32_t *clock, | |
752 | enum smu_clk_type clock_select) | |
753 | { | |
754 | int ret = 0; | |
755 | int clk_id; | |
756 | ||
757 | if ((smu_cmn_to_asic_specific_index(smu, CMN2ASIC_MAPPING_MSG, SMU_MSG_GetDcModeMaxDpmFreq) < 0) || | |
758 | (smu_cmn_to_asic_specific_index(smu, CMN2ASIC_MAPPING_MSG, SMU_MSG_GetMaxDpmFreq) < 0)) | |
759 | return 0; | |
760 | ||
761 | clk_id = smu_cmn_to_asic_specific_index(smu, | |
762 | CMN2ASIC_MAPPING_CLK, | |
763 | clock_select); | |
764 | if (clk_id < 0) | |
765 | return -EINVAL; | |
766 | ||
767 | ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_GetDcModeMaxDpmFreq, | |
768 | clk_id << 16, clock); | |
769 | if (ret) { | |
770 | dev_err(smu->adev->dev, "[GetMaxSustainableClock] Failed to get max DC clock from SMC!"); | |
771 | return ret; | |
772 | } | |
773 | ||
774 | if (*clock != 0) | |
775 | return 0; | |
776 | ||
777 | /* if DC limit is zero, return AC limit */ | |
778 | ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_GetMaxDpmFreq, | |
779 | clk_id << 16, clock); | |
780 | if (ret) { | |
781 | dev_err(smu->adev->dev, "[GetMaxSustainableClock] failed to get max AC clock from SMC!"); | |
782 | return ret; | |
783 | } | |
784 | ||
785 | return 0; | |
786 | } | |
787 | ||
788 | int smu_v13_0_init_max_sustainable_clocks(struct smu_context *smu) | |
789 | { | |
790 | struct smu_13_0_max_sustainable_clocks *max_sustainable_clocks = | |
791 | smu->smu_table.max_sustainable_clocks; | |
792 | int ret = 0; | |
793 | ||
794 | max_sustainable_clocks->uclock = smu->smu_table.boot_values.uclk / 100; | |
795 | max_sustainable_clocks->soc_clock = smu->smu_table.boot_values.socclk / 100; | |
796 | max_sustainable_clocks->dcef_clock = smu->smu_table.boot_values.dcefclk / 100; | |
797 | max_sustainable_clocks->display_clock = 0xFFFFFFFF; | |
798 | max_sustainable_clocks->phy_clock = 0xFFFFFFFF; | |
799 | max_sustainable_clocks->pixel_clock = 0xFFFFFFFF; | |
800 | ||
801 | if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_UCLK_BIT)) { | |
802 | ret = smu_v13_0_get_max_sustainable_clock(smu, | |
803 | &(max_sustainable_clocks->uclock), | |
804 | SMU_UCLK); | |
805 | if (ret) { | |
806 | dev_err(smu->adev->dev, "[%s] failed to get max UCLK from SMC!", | |
807 | __func__); | |
808 | return ret; | |
809 | } | |
810 | } | |
811 | ||
812 | if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_SOCCLK_BIT)) { | |
813 | ret = smu_v13_0_get_max_sustainable_clock(smu, | |
814 | &(max_sustainable_clocks->soc_clock), | |
815 | SMU_SOCCLK); | |
816 | if (ret) { | |
817 | dev_err(smu->adev->dev, "[%s] failed to get max SOCCLK from SMC!", | |
818 | __func__); | |
819 | return ret; | |
820 | } | |
821 | } | |
822 | ||
823 | if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_DCEFCLK_BIT)) { | |
824 | ret = smu_v13_0_get_max_sustainable_clock(smu, | |
825 | &(max_sustainable_clocks->dcef_clock), | |
826 | SMU_DCEFCLK); | |
827 | if (ret) { | |
828 | dev_err(smu->adev->dev, "[%s] failed to get max DCEFCLK from SMC!", | |
829 | __func__); | |
830 | return ret; | |
831 | } | |
832 | ||
833 | ret = smu_v13_0_get_max_sustainable_clock(smu, | |
834 | &(max_sustainable_clocks->display_clock), | |
835 | SMU_DISPCLK); | |
836 | if (ret) { | |
837 | dev_err(smu->adev->dev, "[%s] failed to get max DISPCLK from SMC!", | |
838 | __func__); | |
839 | return ret; | |
840 | } | |
841 | ret = smu_v13_0_get_max_sustainable_clock(smu, | |
842 | &(max_sustainable_clocks->phy_clock), | |
843 | SMU_PHYCLK); | |
844 | if (ret) { | |
845 | dev_err(smu->adev->dev, "[%s] failed to get max PHYCLK from SMC!", | |
846 | __func__); | |
847 | return ret; | |
848 | } | |
849 | ret = smu_v13_0_get_max_sustainable_clock(smu, | |
850 | &(max_sustainable_clocks->pixel_clock), | |
851 | SMU_PIXCLK); | |
852 | if (ret) { | |
853 | dev_err(smu->adev->dev, "[%s] failed to get max PIXCLK from SMC!", | |
854 | __func__); | |
855 | return ret; | |
856 | } | |
857 | } | |
858 | ||
859 | if (max_sustainable_clocks->soc_clock < max_sustainable_clocks->uclock) | |
860 | max_sustainable_clocks->uclock = max_sustainable_clocks->soc_clock; | |
861 | ||
862 | return 0; | |
863 | } | |
864 | ||
865 | int smu_v13_0_get_current_power_limit(struct smu_context *smu, | |
866 | uint32_t *power_limit) | |
867 | { | |
868 | int power_src; | |
869 | int ret = 0; | |
870 | ||
871 | if (!smu_cmn_feature_is_enabled(smu, SMU_FEATURE_PPT_BIT)) | |
872 | return -EINVAL; | |
873 | ||
874 | power_src = smu_cmn_to_asic_specific_index(smu, | |
875 | CMN2ASIC_MAPPING_PWR, | |
876 | smu->adev->pm.ac_power ? | |
877 | SMU_POWER_SOURCE_AC : | |
878 | SMU_POWER_SOURCE_DC); | |
879 | if (power_src < 0) | |
880 | return -EINVAL; | |
881 | ||
882 | ret = smu_cmn_send_smc_msg_with_param(smu, | |
883 | SMU_MSG_GetPptLimit, | |
884 | power_src << 16, | |
885 | power_limit); | |
886 | if (ret) | |
887 | dev_err(smu->adev->dev, "[%s] get PPT limit failed!", __func__); | |
888 | ||
889 | return ret; | |
890 | } | |
891 | ||
892 | int smu_v13_0_set_power_limit(struct smu_context *smu, uint32_t n) | |
893 | { | |
894 | int ret = 0; | |
895 | ||
896 | if (!smu_cmn_feature_is_enabled(smu, SMU_FEATURE_PPT_BIT)) { | |
897 | dev_err(smu->adev->dev, "Setting new power limit is not supported!\n"); | |
898 | return -EOPNOTSUPP; | |
899 | } | |
900 | ||
901 | ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetPptLimit, n, NULL); | |
902 | if (ret) { | |
903 | dev_err(smu->adev->dev, "[%s] Set power limit Failed!\n", __func__); | |
904 | return ret; | |
905 | } | |
906 | ||
907 | smu->current_power_limit = n; | |
908 | ||
909 | return 0; | |
910 | } | |
911 | ||
912 | int smu_v13_0_enable_thermal_alert(struct smu_context *smu) | |
913 | { | |
914 | if (smu->smu_table.thermal_controller_type) | |
915 | return amdgpu_irq_get(smu->adev, &smu->irq_source, 0); | |
916 | ||
917 | return 0; | |
918 | } | |
919 | ||
920 | int smu_v13_0_disable_thermal_alert(struct smu_context *smu) | |
921 | { | |
922 | return amdgpu_irq_put(smu->adev, &smu->irq_source, 0); | |
923 | } | |
924 | ||
925 | static uint16_t convert_to_vddc(uint8_t vid) | |
926 | { | |
927 | return (uint16_t) ((6200 - (vid * 25)) / SMU13_VOLTAGE_SCALE); | |
928 | } | |
929 | ||
930 | int smu_v13_0_get_gfx_vdd(struct smu_context *smu, uint32_t *value) | |
931 | { | |
932 | struct amdgpu_device *adev = smu->adev; | |
933 | uint32_t vdd = 0, val_vid = 0; | |
934 | ||
935 | if (!value) | |
936 | return -EINVAL; | |
937 | val_vid = (RREG32_SOC15(SMUIO, 0, regSMUSVI0_TEL_PLANE0) & | |
938 | SMUSVI0_TEL_PLANE0__SVI0_PLANE0_VDDCOR_MASK) >> | |
939 | SMUSVI0_TEL_PLANE0__SVI0_PLANE0_VDDCOR__SHIFT; | |
940 | ||
941 | vdd = (uint32_t)convert_to_vddc((uint8_t)val_vid); | |
942 | ||
943 | *value = vdd; | |
944 | ||
945 | return 0; | |
946 | ||
947 | } | |
948 | ||
949 | int | |
950 | smu_v13_0_display_clock_voltage_request(struct smu_context *smu, | |
951 | struct pp_display_clock_request | |
952 | *clock_req) | |
953 | { | |
954 | enum amd_pp_clock_type clk_type = clock_req->clock_type; | |
955 | int ret = 0; | |
956 | enum smu_clk_type clk_select = 0; | |
957 | uint32_t clk_freq = clock_req->clock_freq_in_khz / 1000; | |
958 | ||
959 | if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_DCEFCLK_BIT) || | |
960 | smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_UCLK_BIT)) { | |
961 | switch (clk_type) { | |
962 | case amd_pp_dcef_clock: | |
963 | clk_select = SMU_DCEFCLK; | |
964 | break; | |
965 | case amd_pp_disp_clock: | |
966 | clk_select = SMU_DISPCLK; | |
967 | break; | |
968 | case amd_pp_pixel_clock: | |
969 | clk_select = SMU_PIXCLK; | |
970 | break; | |
971 | case amd_pp_phy_clock: | |
972 | clk_select = SMU_PHYCLK; | |
973 | break; | |
974 | case amd_pp_mem_clock: | |
975 | clk_select = SMU_UCLK; | |
976 | break; | |
977 | default: | |
978 | dev_info(smu->adev->dev, "[%s] Invalid Clock Type!", __func__); | |
979 | ret = -EINVAL; | |
980 | break; | |
981 | } | |
982 | ||
983 | if (ret) | |
984 | goto failed; | |
985 | ||
986 | if (clk_select == SMU_UCLK && smu->disable_uclk_switch) | |
987 | return 0; | |
988 | ||
989 | ret = smu_v13_0_set_hard_freq_limited_range(smu, clk_select, clk_freq, 0); | |
990 | ||
991 | if(clk_select == SMU_UCLK) | |
992 | smu->hard_min_uclk_req_from_dal = clk_freq; | |
993 | } | |
994 | ||
995 | failed: | |
996 | return ret; | |
997 | } | |
998 | ||
999 | uint32_t smu_v13_0_get_fan_control_mode(struct smu_context *smu) | |
1000 | { | |
1001 | if (!smu_cmn_feature_is_enabled(smu, SMU_FEATURE_FAN_CONTROL_BIT)) | |
1002 | return AMD_FAN_CTRL_MANUAL; | |
1003 | else | |
1004 | return AMD_FAN_CTRL_AUTO; | |
1005 | } | |
1006 | ||
1007 | static int | |
1008 | smu_v13_0_auto_fan_control(struct smu_context *smu, bool auto_fan_control) | |
1009 | { | |
1010 | int ret = 0; | |
1011 | ||
1012 | if (!smu_cmn_feature_is_supported(smu, SMU_FEATURE_FAN_CONTROL_BIT)) | |
1013 | return 0; | |
1014 | ||
1015 | ret = smu_cmn_feature_set_enabled(smu, SMU_FEATURE_FAN_CONTROL_BIT, auto_fan_control); | |
1016 | if (ret) | |
1017 | dev_err(smu->adev->dev, "[%s]%s smc FAN CONTROL feature failed!", | |
1018 | __func__, (auto_fan_control ? "Start" : "Stop")); | |
1019 | ||
1020 | return ret; | |
1021 | } | |
1022 | ||
1023 | static int | |
1024 | smu_v13_0_set_fan_static_mode(struct smu_context *smu, uint32_t mode) | |
1025 | { | |
1026 | struct amdgpu_device *adev = smu->adev; | |
1027 | ||
1028 | WREG32_SOC15(THM, 0, regCG_FDO_CTRL2, | |
1029 | REG_SET_FIELD(RREG32_SOC15(THM, 0, regCG_FDO_CTRL2), | |
1030 | CG_FDO_CTRL2, TMIN, 0)); | |
1031 | WREG32_SOC15(THM, 0, regCG_FDO_CTRL2, | |
1032 | REG_SET_FIELD(RREG32_SOC15(THM, 0, regCG_FDO_CTRL2), | |
1033 | CG_FDO_CTRL2, FDO_PWM_MODE, mode)); | |
1034 | ||
1035 | return 0; | |
1036 | } | |
1037 | ||
1038 | int | |
1039 | smu_v13_0_set_fan_speed_percent(struct smu_context *smu, uint32_t speed) | |
1040 | { | |
1041 | struct amdgpu_device *adev = smu->adev; | |
1042 | uint32_t duty100, duty; | |
1043 | uint64_t tmp64; | |
1044 | ||
1045 | if (speed > 100) | |
1046 | speed = 100; | |
1047 | ||
1048 | if (smu_v13_0_auto_fan_control(smu, 0)) | |
1049 | return -EINVAL; | |
1050 | ||
1051 | duty100 = REG_GET_FIELD(RREG32_SOC15(THM, 0, regCG_FDO_CTRL1), | |
1052 | CG_FDO_CTRL1, FMAX_DUTY100); | |
1053 | if (!duty100) | |
1054 | return -EINVAL; | |
1055 | ||
1056 | tmp64 = (uint64_t)speed * duty100; | |
1057 | do_div(tmp64, 100); | |
1058 | duty = (uint32_t)tmp64; | |
1059 | ||
1060 | WREG32_SOC15(THM, 0, regCG_FDO_CTRL0, | |
1061 | REG_SET_FIELD(RREG32_SOC15(THM, 0, regCG_FDO_CTRL0), | |
1062 | CG_FDO_CTRL0, FDO_STATIC_DUTY, duty)); | |
1063 | ||
1064 | return smu_v13_0_set_fan_static_mode(smu, FDO_PWM_MODE_STATIC); | |
1065 | } | |
1066 | ||
1067 | int | |
1068 | smu_v13_0_set_fan_control_mode(struct smu_context *smu, | |
1069 | uint32_t mode) | |
1070 | { | |
1071 | int ret = 0; | |
1072 | ||
1073 | switch (mode) { | |
1074 | case AMD_FAN_CTRL_NONE: | |
1075 | ret = smu_v13_0_set_fan_speed_percent(smu, 100); | |
1076 | break; | |
1077 | case AMD_FAN_CTRL_MANUAL: | |
1078 | ret = smu_v13_0_auto_fan_control(smu, 0); | |
1079 | break; | |
1080 | case AMD_FAN_CTRL_AUTO: | |
1081 | ret = smu_v13_0_auto_fan_control(smu, 1); | |
1082 | break; | |
1083 | default: | |
1084 | break; | |
1085 | } | |
1086 | ||
1087 | if (ret) { | |
1088 | dev_err(smu->adev->dev, "[%s]Set fan control mode failed!", __func__); | |
1089 | return -EINVAL; | |
1090 | } | |
1091 | ||
1092 | return ret; | |
1093 | } | |
1094 | ||
1095 | int smu_v13_0_set_fan_speed_rpm(struct smu_context *smu, | |
1096 | uint32_t speed) | |
1097 | { | |
1098 | struct amdgpu_device *adev = smu->adev; | |
1099 | int ret; | |
1100 | uint32_t tach_period, crystal_clock_freq; | |
1101 | ||
1102 | if (!speed) | |
1103 | return -EINVAL; | |
1104 | ||
1105 | ret = smu_v13_0_auto_fan_control(smu, 0); | |
1106 | if (ret) | |
1107 | return ret; | |
1108 | ||
1109 | crystal_clock_freq = amdgpu_asic_get_xclk(adev); | |
1110 | tach_period = 60 * crystal_clock_freq * 10000 / (8 * speed); | |
1111 | WREG32_SOC15(THM, 0, regCG_TACH_CTRL, | |
1112 | REG_SET_FIELD(RREG32_SOC15(THM, 0, regCG_TACH_CTRL), | |
1113 | CG_TACH_CTRL, TARGET_PERIOD, | |
1114 | tach_period)); | |
1115 | ||
1116 | ret = smu_v13_0_set_fan_static_mode(smu, FDO_PWM_MODE_STATIC_RPM); | |
1117 | ||
1118 | return ret; | |
1119 | } | |
1120 | ||
1121 | int smu_v13_0_set_xgmi_pstate(struct smu_context *smu, | |
1122 | uint32_t pstate) | |
1123 | { | |
1124 | int ret = 0; | |
1125 | ret = smu_cmn_send_smc_msg_with_param(smu, | |
1126 | SMU_MSG_SetXgmiMode, | |
1127 | pstate ? XGMI_MODE_PSTATE_D0 : XGMI_MODE_PSTATE_D3, | |
1128 | NULL); | |
1129 | return ret; | |
1130 | } | |
1131 | ||
1132 | static int smu_v13_0_set_irq_state(struct amdgpu_device *adev, | |
1133 | struct amdgpu_irq_src *source, | |
1134 | unsigned tyep, | |
1135 | enum amdgpu_interrupt_state state) | |
1136 | { | |
1137 | struct smu_context *smu = &adev->smu; | |
1138 | uint32_t low, high; | |
1139 | uint32_t val = 0; | |
1140 | ||
1141 | switch (state) { | |
1142 | case AMDGPU_IRQ_STATE_DISABLE: | |
1143 | /* For THM irqs */ | |
1144 | val = RREG32_SOC15(THM, 0, regTHM_THERMAL_INT_CTRL); | |
1145 | val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, THERM_INTH_MASK, 1); | |
1146 | val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, THERM_INTL_MASK, 1); | |
1147 | WREG32_SOC15(THM, 0, regTHM_THERMAL_INT_CTRL, val); | |
1148 | ||
1149 | WREG32_SOC15(THM, 0, regTHM_THERMAL_INT_ENA, 0); | |
1150 | ||
1151 | /* For MP1 SW irqs */ | |
1152 | val = RREG32_SOC15(MP1, 0, regMP1_SMN_IH_SW_INT_CTRL); | |
1153 | val = REG_SET_FIELD(val, MP1_SMN_IH_SW_INT_CTRL, INT_MASK, 1); | |
1154 | WREG32_SOC15(MP1, 0, regMP1_SMN_IH_SW_INT_CTRL, val); | |
1155 | ||
1156 | break; | |
1157 | case AMDGPU_IRQ_STATE_ENABLE: | |
1158 | /* For THM irqs */ | |
1159 | low = max(SMU_THERMAL_MINIMUM_ALERT_TEMP, | |
1160 | smu->thermal_range.min / SMU_TEMPERATURE_UNITS_PER_CENTIGRADES); | |
1161 | high = min(SMU_THERMAL_MAXIMUM_ALERT_TEMP, | |
1162 | smu->thermal_range.software_shutdown_temp); | |
1163 | ||
1164 | val = RREG32_SOC15(THM, 0, regTHM_THERMAL_INT_CTRL); | |
1165 | val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, MAX_IH_CREDIT, 5); | |
1166 | val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, THERM_IH_HW_ENA, 1); | |
1167 | val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, THERM_INTH_MASK, 0); | |
1168 | val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, THERM_INTL_MASK, 0); | |
1169 | val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, DIG_THERM_INTH, (high & 0xff)); | |
1170 | val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, DIG_THERM_INTL, (low & 0xff)); | |
1171 | val = val & (~THM_THERMAL_INT_CTRL__THERM_TRIGGER_MASK_MASK); | |
1172 | WREG32_SOC15(THM, 0, regTHM_THERMAL_INT_CTRL, val); | |
1173 | ||
1174 | val = (1 << THM_THERMAL_INT_ENA__THERM_INTH_CLR__SHIFT); | |
1175 | val |= (1 << THM_THERMAL_INT_ENA__THERM_INTL_CLR__SHIFT); | |
1176 | val |= (1 << THM_THERMAL_INT_ENA__THERM_TRIGGER_CLR__SHIFT); | |
1177 | WREG32_SOC15(THM, 0, regTHM_THERMAL_INT_ENA, val); | |
1178 | ||
1179 | /* For MP1 SW irqs */ | |
1180 | val = RREG32_SOC15(MP1, 0, regMP1_SMN_IH_SW_INT); | |
1181 | val = REG_SET_FIELD(val, MP1_SMN_IH_SW_INT, ID, 0xFE); | |
1182 | val = REG_SET_FIELD(val, MP1_SMN_IH_SW_INT, VALID, 0); | |
1183 | WREG32_SOC15(MP1, 0, regMP1_SMN_IH_SW_INT, val); | |
1184 | ||
1185 | val = RREG32_SOC15(MP1, 0, regMP1_SMN_IH_SW_INT_CTRL); | |
1186 | val = REG_SET_FIELD(val, MP1_SMN_IH_SW_INT_CTRL, INT_MASK, 0); | |
1187 | WREG32_SOC15(MP1, 0, regMP1_SMN_IH_SW_INT_CTRL, val); | |
1188 | ||
1189 | break; | |
1190 | default: | |
1191 | break; | |
1192 | } | |
1193 | ||
1194 | return 0; | |
1195 | } | |
1196 | ||
1197 | static int smu_v13_0_ack_ac_dc_interrupt(struct smu_context *smu) | |
1198 | { | |
1199 | return smu_cmn_send_smc_msg(smu, | |
1200 | SMU_MSG_ReenableAcDcInterrupt, | |
1201 | NULL); | |
1202 | } | |
1203 | ||
1204 | #define THM_11_0__SRCID__THM_DIG_THERM_L2H 0 /* ASIC_TEMP > CG_THERMAL_INT.DIG_THERM_INTH */ | |
1205 | #define THM_11_0__SRCID__THM_DIG_THERM_H2L 1 /* ASIC_TEMP < CG_THERMAL_INT.DIG_THERM_INTL */ | |
1206 | #define SMUIO_11_0__SRCID__SMUIO_GPIO19 83 | |
1207 | ||
1208 | static int smu_v13_0_irq_process(struct amdgpu_device *adev, | |
1209 | struct amdgpu_irq_src *source, | |
1210 | struct amdgpu_iv_entry *entry) | |
1211 | { | |
1212 | struct smu_context *smu = &adev->smu; | |
1213 | uint32_t client_id = entry->client_id; | |
1214 | uint32_t src_id = entry->src_id; | |
1215 | /* | |
1216 | * ctxid is used to distinguish different | |
1217 | * events for SMCToHost interrupt. | |
1218 | */ | |
1219 | uint32_t ctxid = entry->src_data[0]; | |
1220 | uint32_t data; | |
1221 | ||
1222 | if (client_id == SOC15_IH_CLIENTID_THM) { | |
1223 | switch (src_id) { | |
1224 | case THM_11_0__SRCID__THM_DIG_THERM_L2H: | |
1225 | dev_emerg(adev->dev, "ERROR: GPU over temperature range(SW CTF) detected!\n"); | |
1226 | /* | |
1227 | * SW CTF just occurred. | |
1228 | * Try to do a graceful shutdown to prevent further damage. | |
1229 | */ | |
1230 | dev_emerg(adev->dev, "ERROR: System is going to shutdown due to GPU SW CTF!\n"); | |
1231 | orderly_poweroff(true); | |
1232 | break; | |
1233 | case THM_11_0__SRCID__THM_DIG_THERM_H2L: | |
1234 | dev_emerg(adev->dev, "ERROR: GPU under temperature range detected\n"); | |
1235 | break; | |
1236 | default: | |
1237 | dev_emerg(adev->dev, "ERROR: GPU under temperature range unknown src id (%d)\n", | |
1238 | src_id); | |
1239 | break; | |
1240 | } | |
1241 | } else if (client_id == SOC15_IH_CLIENTID_ROM_SMUIO) { | |
1242 | dev_emerg(adev->dev, "ERROR: GPU HW Critical Temperature Fault(aka CTF) detected!\n"); | |
1243 | /* | |
1244 | * HW CTF just occurred. Shutdown to prevent further damage. | |
1245 | */ | |
1246 | dev_emerg(adev->dev, "ERROR: System is going to shutdown due to GPU HW CTF!\n"); | |
1247 | orderly_poweroff(true); | |
1248 | } else if (client_id == SOC15_IH_CLIENTID_MP1) { | |
1249 | if (src_id == 0xfe) { | |
1250 | /* ACK SMUToHost interrupt */ | |
1251 | data = RREG32_SOC15(MP1, 0, regMP1_SMN_IH_SW_INT_CTRL); | |
1252 | data = REG_SET_FIELD(data, MP1_SMN_IH_SW_INT_CTRL, INT_ACK, 1); | |
1253 | WREG32_SOC15(MP1, 0, regMP1_SMN_IH_SW_INT_CTRL, data); | |
1254 | ||
1255 | switch (ctxid) { | |
1256 | case 0x3: | |
1257 | dev_dbg(adev->dev, "Switched to AC mode!\n"); | |
1258 | smu_v13_0_ack_ac_dc_interrupt(&adev->smu); | |
1259 | break; | |
1260 | case 0x4: | |
1261 | dev_dbg(adev->dev, "Switched to DC mode!\n"); | |
1262 | smu_v13_0_ack_ac_dc_interrupt(&adev->smu); | |
1263 | break; | |
1264 | case 0x7: | |
1265 | /* | |
1266 | * Increment the throttle interrupt counter | |
1267 | */ | |
1268 | atomic64_inc(&smu->throttle_int_counter); | |
1269 | ||
1270 | if (!atomic_read(&adev->throttling_logging_enabled)) | |
1271 | return 0; | |
1272 | ||
1273 | if (__ratelimit(&adev->throttling_logging_rs)) | |
1274 | schedule_work(&smu->throttling_logging_work); | |
1275 | ||
1276 | break; | |
1277 | } | |
1278 | } | |
1279 | } | |
1280 | ||
1281 | return 0; | |
1282 | } | |
1283 | ||
1284 | static const struct amdgpu_irq_src_funcs smu_v13_0_irq_funcs = | |
1285 | { | |
1286 | .set = smu_v13_0_set_irq_state, | |
1287 | .process = smu_v13_0_irq_process, | |
1288 | }; | |
1289 | ||
1290 | int smu_v13_0_register_irq_handler(struct smu_context *smu) | |
1291 | { | |
1292 | struct amdgpu_device *adev = smu->adev; | |
1293 | struct amdgpu_irq_src *irq_src = &smu->irq_source; | |
1294 | int ret = 0; | |
1295 | ||
1296 | irq_src->num_types = 1; | |
1297 | irq_src->funcs = &smu_v13_0_irq_funcs; | |
1298 | ||
1299 | ret = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_THM, | |
1300 | THM_11_0__SRCID__THM_DIG_THERM_L2H, | |
1301 | irq_src); | |
1302 | if (ret) | |
1303 | return ret; | |
1304 | ||
1305 | ret = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_THM, | |
1306 | THM_11_0__SRCID__THM_DIG_THERM_H2L, | |
1307 | irq_src); | |
1308 | if (ret) | |
1309 | return ret; | |
1310 | ||
1311 | /* Register CTF(GPIO_19) interrupt */ | |
1312 | ret = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_ROM_SMUIO, | |
1313 | SMUIO_11_0__SRCID__SMUIO_GPIO19, | |
1314 | irq_src); | |
1315 | if (ret) | |
1316 | return ret; | |
1317 | ||
1318 | ret = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_MP1, | |
1319 | 0xfe, | |
1320 | irq_src); | |
1321 | if (ret) | |
1322 | return ret; | |
1323 | ||
1324 | return ret; | |
1325 | } | |
1326 | ||
1327 | int smu_v13_0_get_max_sustainable_clocks_by_dc(struct smu_context *smu, | |
1328 | struct pp_smu_nv_clock_table *max_clocks) | |
1329 | { | |
1330 | struct smu_table_context *table_context = &smu->smu_table; | |
1331 | struct smu_13_0_max_sustainable_clocks *sustainable_clocks = NULL; | |
1332 | ||
1333 | if (!max_clocks || !table_context->max_sustainable_clocks) | |
1334 | return -EINVAL; | |
1335 | ||
1336 | sustainable_clocks = table_context->max_sustainable_clocks; | |
1337 | ||
1338 | max_clocks->dcfClockInKhz = | |
1339 | (unsigned int) sustainable_clocks->dcef_clock * 1000; | |
1340 | max_clocks->displayClockInKhz = | |
1341 | (unsigned int) sustainable_clocks->display_clock * 1000; | |
1342 | max_clocks->phyClockInKhz = | |
1343 | (unsigned int) sustainable_clocks->phy_clock * 1000; | |
1344 | max_clocks->pixelClockInKhz = | |
1345 | (unsigned int) sustainable_clocks->pixel_clock * 1000; | |
1346 | max_clocks->uClockInKhz = | |
1347 | (unsigned int) sustainable_clocks->uclock * 1000; | |
1348 | max_clocks->socClockInKhz = | |
1349 | (unsigned int) sustainable_clocks->soc_clock * 1000; | |
1350 | max_clocks->dscClockInKhz = 0; | |
1351 | max_clocks->dppClockInKhz = 0; | |
1352 | max_clocks->fabricClockInKhz = 0; | |
1353 | ||
1354 | return 0; | |
1355 | } | |
1356 | ||
1357 | int smu_v13_0_set_azalia_d3_pme(struct smu_context *smu) | |
1358 | { | |
1359 | int ret = 0; | |
1360 | ||
1361 | ret = smu_cmn_send_smc_msg(smu, SMU_MSG_BacoAudioD3PME, NULL); | |
1362 | ||
1363 | return ret; | |
1364 | } | |
1365 | ||
1366 | int smu_v13_0_mode1_reset(struct smu_context *smu) | |
1367 | { | |
5c03e584 | 1368 | u32 smu_version; |
c05d1c40 | 1369 | int ret = 0; |
5c03e584 FX |
1370 | /* |
1371 | * PM FW support SMU_MSG_GfxDeviceDriverReset from 68.07 | |
1372 | */ | |
1373 | smu_cmn_get_smc_version(smu, NULL, &smu_version); | |
1374 | if (smu_version < 0x00440700) | |
1375 | ret = smu_cmn_send_smc_msg(smu, SMU_MSG_Mode1Reset, NULL); | |
1376 | else | |
1377 | ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_GfxDeviceDriverReset, SMU_RESET_MODE_1, NULL); | |
c05d1c40 | 1378 | |
c05d1c40 KW |
1379 | if (!ret) |
1380 | msleep(SMU13_MODE1_RESET_WAIT_TIME_IN_MS); | |
1381 | ||
1382 | return ret; | |
1383 | } | |
1384 | ||
5c03e584 FX |
1385 | int smu_v13_0_mode2_reset(struct smu_context *smu) |
1386 | { | |
1387 | u32 smu_version; | |
1388 | int ret = 0; | |
1389 | struct amdgpu_device *adev = smu->adev; | |
1390 | smu_cmn_get_smc_version(smu, NULL, &smu_version); | |
1391 | if (smu_version >= 0x00440700) | |
1392 | ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_GfxDeviceDriverReset, SMU_RESET_MODE_2, NULL); | |
1393 | else | |
1394 | dev_err(adev->dev, "smu fw 0x%x does not support MSG_GfxDeviceDriverReset MSG\n", smu_version); | |
1395 | /*TODO: mode2 reset wait time should be shorter, will modify it later*/ | |
1396 | if (!ret) | |
1397 | msleep(SMU13_MODE1_RESET_WAIT_TIME_IN_MS); | |
1398 | return ret; | |
1399 | } | |
1400 | ||
c05d1c40 KW |
1401 | int smu_v13_0_get_dpm_ultimate_freq(struct smu_context *smu, enum smu_clk_type clk_type, |
1402 | uint32_t *min, uint32_t *max) | |
1403 | { | |
1404 | int ret = 0, clk_id = 0; | |
1405 | uint32_t param = 0; | |
1406 | uint32_t clock_limit; | |
1407 | ||
1408 | if (!smu_cmn_clk_dpm_is_enabled(smu, clk_type)) { | |
1409 | switch (clk_type) { | |
1410 | case SMU_MCLK: | |
1411 | case SMU_UCLK: | |
1412 | clock_limit = smu->smu_table.boot_values.uclk; | |
1413 | break; | |
1414 | case SMU_GFXCLK: | |
1415 | case SMU_SCLK: | |
1416 | clock_limit = smu->smu_table.boot_values.gfxclk; | |
1417 | break; | |
1418 | case SMU_SOCCLK: | |
1419 | clock_limit = smu->smu_table.boot_values.socclk; | |
1420 | break; | |
1421 | default: | |
1422 | clock_limit = 0; | |
1423 | break; | |
1424 | } | |
1425 | ||
1426 | /* clock in Mhz unit */ | |
1427 | if (min) | |
1428 | *min = clock_limit / 100; | |
1429 | if (max) | |
1430 | *max = clock_limit / 100; | |
1431 | ||
1432 | return 0; | |
1433 | } | |
1434 | ||
1435 | clk_id = smu_cmn_to_asic_specific_index(smu, | |
1436 | CMN2ASIC_MAPPING_CLK, | |
1437 | clk_type); | |
1438 | if (clk_id < 0) { | |
1439 | ret = -EINVAL; | |
1440 | goto failed; | |
1441 | } | |
1442 | param = (clk_id & 0xffff) << 16; | |
1443 | ||
1444 | if (max) { | |
1445 | ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_GetMaxDpmFreq, param, max); | |
1446 | if (ret) | |
1447 | goto failed; | |
1448 | } | |
1449 | ||
1450 | if (min) { | |
1451 | ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_GetMinDpmFreq, param, min); | |
1452 | if (ret) | |
1453 | goto failed; | |
1454 | } | |
1455 | ||
1456 | failed: | |
1457 | return ret; | |
1458 | } | |
1459 | ||
1460 | int smu_v13_0_set_soft_freq_limited_range(struct smu_context *smu, | |
1461 | enum smu_clk_type clk_type, | |
1462 | uint32_t min, | |
1463 | uint32_t max) | |
1464 | { | |
1465 | struct amdgpu_device *adev = smu->adev; | |
1466 | int ret = 0, clk_id = 0; | |
1467 | uint32_t param; | |
1468 | ||
1469 | if (!smu_cmn_clk_dpm_is_enabled(smu, clk_type)) | |
1470 | return 0; | |
1471 | ||
1472 | clk_id = smu_cmn_to_asic_specific_index(smu, | |
1473 | CMN2ASIC_MAPPING_CLK, | |
1474 | clk_type); | |
1475 | if (clk_id < 0) | |
1476 | return clk_id; | |
1477 | ||
1478 | if (clk_type == SMU_GFXCLK) | |
1479 | amdgpu_gfx_off_ctrl(adev, false); | |
1480 | ||
1481 | if (max > 0) { | |
1482 | param = (uint32_t)((clk_id << 16) | (max & 0xffff)); | |
1483 | ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxByFreq, | |
1484 | param, NULL); | |
1485 | if (ret) | |
1486 | goto out; | |
1487 | } | |
1488 | ||
1489 | if (min > 0) { | |
1490 | param = (uint32_t)((clk_id << 16) | (min & 0xffff)); | |
1491 | ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMinByFreq, | |
1492 | param, NULL); | |
1493 | if (ret) | |
1494 | goto out; | |
1495 | } | |
1496 | ||
1497 | out: | |
1498 | if (clk_type == SMU_GFXCLK) | |
1499 | amdgpu_gfx_off_ctrl(adev, true); | |
1500 | ||
1501 | return ret; | |
1502 | } | |
1503 | ||
1504 | int smu_v13_0_set_hard_freq_limited_range(struct smu_context *smu, | |
1505 | enum smu_clk_type clk_type, | |
1506 | uint32_t min, | |
1507 | uint32_t max) | |
1508 | { | |
1509 | int ret = 0, clk_id = 0; | |
1510 | uint32_t param; | |
1511 | ||
1512 | if (min <= 0 && max <= 0) | |
1513 | return -EINVAL; | |
1514 | ||
1515 | if (!smu_cmn_clk_dpm_is_enabled(smu, clk_type)) | |
1516 | return 0; | |
1517 | ||
1518 | clk_id = smu_cmn_to_asic_specific_index(smu, | |
1519 | CMN2ASIC_MAPPING_CLK, | |
1520 | clk_type); | |
1521 | if (clk_id < 0) | |
1522 | return clk_id; | |
1523 | ||
1524 | if (max > 0) { | |
1525 | param = (uint32_t)((clk_id << 16) | (max & 0xffff)); | |
1526 | ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetHardMaxByFreq, | |
1527 | param, NULL); | |
1528 | if (ret) | |
1529 | return ret; | |
1530 | } | |
1531 | ||
1532 | if (min > 0) { | |
1533 | param = (uint32_t)((clk_id << 16) | (min & 0xffff)); | |
1534 | ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinByFreq, | |
1535 | param, NULL); | |
1536 | if (ret) | |
1537 | return ret; | |
1538 | } | |
1539 | ||
1540 | return ret; | |
1541 | } | |
1542 | ||
1543 | int smu_v13_0_set_performance_level(struct smu_context *smu, | |
1544 | enum amd_dpm_forced_level level) | |
1545 | { | |
1546 | struct smu_13_0_dpm_context *dpm_context = | |
1547 | smu->smu_dpm.dpm_context; | |
1548 | struct smu_13_0_dpm_table *gfx_table = | |
1549 | &dpm_context->dpm_tables.gfx_table; | |
1550 | struct smu_13_0_dpm_table *mem_table = | |
1551 | &dpm_context->dpm_tables.uclk_table; | |
1552 | struct smu_13_0_dpm_table *soc_table = | |
1553 | &dpm_context->dpm_tables.soc_table; | |
1554 | struct smu_umd_pstate_table *pstate_table = | |
1555 | &smu->pstate_table; | |
1556 | struct amdgpu_device *adev = smu->adev; | |
1557 | uint32_t sclk_min = 0, sclk_max = 0; | |
1558 | uint32_t mclk_min = 0, mclk_max = 0; | |
1559 | uint32_t socclk_min = 0, socclk_max = 0; | |
1560 | int ret = 0; | |
1561 | ||
1562 | switch (level) { | |
1563 | case AMD_DPM_FORCED_LEVEL_HIGH: | |
1564 | sclk_min = sclk_max = gfx_table->max; | |
1565 | mclk_min = mclk_max = mem_table->max; | |
1566 | socclk_min = socclk_max = soc_table->max; | |
1567 | break; | |
1568 | case AMD_DPM_FORCED_LEVEL_LOW: | |
1569 | sclk_min = sclk_max = gfx_table->min; | |
1570 | mclk_min = mclk_max = mem_table->min; | |
1571 | socclk_min = socclk_max = soc_table->min; | |
1572 | break; | |
1573 | case AMD_DPM_FORCED_LEVEL_AUTO: | |
1574 | sclk_min = gfx_table->min; | |
1575 | sclk_max = gfx_table->max; | |
1576 | mclk_min = mem_table->min; | |
1577 | mclk_max = mem_table->max; | |
1578 | socclk_min = soc_table->min; | |
1579 | socclk_max = soc_table->max; | |
1580 | break; | |
1581 | case AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD: | |
1582 | sclk_min = sclk_max = pstate_table->gfxclk_pstate.standard; | |
1583 | mclk_min = mclk_max = pstate_table->uclk_pstate.standard; | |
1584 | socclk_min = socclk_max = pstate_table->socclk_pstate.standard; | |
1585 | break; | |
1586 | case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK: | |
1587 | sclk_min = sclk_max = pstate_table->gfxclk_pstate.min; | |
1588 | break; | |
1589 | case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK: | |
1590 | mclk_min = mclk_max = pstate_table->uclk_pstate.min; | |
1591 | break; | |
1592 | case AMD_DPM_FORCED_LEVEL_PROFILE_PEAK: | |
1593 | sclk_min = sclk_max = pstate_table->gfxclk_pstate.peak; | |
1594 | mclk_min = mclk_max = pstate_table->uclk_pstate.peak; | |
1595 | socclk_min = socclk_max = pstate_table->socclk_pstate.peak; | |
1596 | break; | |
1597 | case AMD_DPM_FORCED_LEVEL_MANUAL: | |
1598 | case AMD_DPM_FORCED_LEVEL_PROFILE_EXIT: | |
1599 | return 0; | |
1600 | default: | |
1601 | dev_err(adev->dev, "Invalid performance level %d\n", level); | |
1602 | return -EINVAL; | |
1603 | } | |
1604 | ||
1605 | mclk_min = mclk_max = 0; | |
1606 | socclk_min = socclk_max = 0; | |
1607 | ||
1608 | if (sclk_min && sclk_max) { | |
1609 | ret = smu_v13_0_set_soft_freq_limited_range(smu, | |
1610 | SMU_GFXCLK, | |
1611 | sclk_min, | |
1612 | sclk_max); | |
1613 | if (ret) | |
1614 | return ret; | |
1615 | } | |
1616 | ||
1617 | if (mclk_min && mclk_max) { | |
1618 | ret = smu_v13_0_set_soft_freq_limited_range(smu, | |
1619 | SMU_MCLK, | |
1620 | mclk_min, | |
1621 | mclk_max); | |
1622 | if (ret) | |
1623 | return ret; | |
1624 | } | |
1625 | ||
1626 | if (socclk_min && socclk_max) { | |
1627 | ret = smu_v13_0_set_soft_freq_limited_range(smu, | |
1628 | SMU_SOCCLK, | |
1629 | socclk_min, | |
1630 | socclk_max); | |
1631 | if (ret) | |
1632 | return ret; | |
1633 | } | |
1634 | ||
1635 | return ret; | |
1636 | } | |
1637 | ||
1638 | int smu_v13_0_set_power_source(struct smu_context *smu, | |
1639 | enum smu_power_src_type power_src) | |
1640 | { | |
1641 | int pwr_source; | |
1642 | ||
1643 | pwr_source = smu_cmn_to_asic_specific_index(smu, | |
1644 | CMN2ASIC_MAPPING_PWR, | |
1645 | (uint32_t)power_src); | |
1646 | if (pwr_source < 0) | |
1647 | return -EINVAL; | |
1648 | ||
1649 | return smu_cmn_send_smc_msg_with_param(smu, | |
1650 | SMU_MSG_NotifyPowerSource, | |
1651 | pwr_source, | |
1652 | NULL); | |
1653 | } | |
1654 | ||
1655 | int smu_v13_0_get_dpm_freq_by_index(struct smu_context *smu, | |
1656 | enum smu_clk_type clk_type, | |
1657 | uint16_t level, | |
1658 | uint32_t *value) | |
1659 | { | |
1660 | int ret = 0, clk_id = 0; | |
1661 | uint32_t param; | |
1662 | ||
1663 | if (!value) | |
1664 | return -EINVAL; | |
1665 | ||
1666 | if (!smu_cmn_clk_dpm_is_enabled(smu, clk_type)) | |
1667 | return 0; | |
1668 | ||
1669 | clk_id = smu_cmn_to_asic_specific_index(smu, | |
1670 | CMN2ASIC_MAPPING_CLK, | |
1671 | clk_type); | |
1672 | if (clk_id < 0) | |
1673 | return clk_id; | |
1674 | ||
1675 | param = (uint32_t)(((clk_id & 0xffff) << 16) | (level & 0xffff)); | |
1676 | ||
1677 | ret = smu_cmn_send_smc_msg_with_param(smu, | |
1678 | SMU_MSG_GetDpmFreqByIndex, | |
1679 | param, | |
1680 | value); | |
1681 | if (ret) | |
1682 | return ret; | |
1683 | ||
1684 | /* | |
1685 | * BIT31: 0 - Fine grained DPM, 1 - Dicrete DPM | |
1686 | * now, we un-support it | |
1687 | */ | |
1688 | *value = *value & 0x7fffffff; | |
1689 | ||
1690 | return ret; | |
1691 | } | |
1692 | ||
1693 | int smu_v13_0_get_dpm_level_count(struct smu_context *smu, | |
1694 | enum smu_clk_type clk_type, | |
1695 | uint32_t *value) | |
1696 | { | |
1697 | return smu_v13_0_get_dpm_freq_by_index(smu, | |
1698 | clk_type, | |
1699 | 0xff, | |
1700 | value); | |
1701 | } | |
1702 | ||
1703 | int smu_v13_0_set_single_dpm_table(struct smu_context *smu, | |
1704 | enum smu_clk_type clk_type, | |
1705 | struct smu_13_0_dpm_table *single_dpm_table) | |
1706 | { | |
1707 | int ret = 0; | |
1708 | uint32_t clk; | |
1709 | int i; | |
1710 | ||
1711 | ret = smu_v13_0_get_dpm_level_count(smu, | |
1712 | clk_type, | |
1713 | &single_dpm_table->count); | |
1714 | if (ret) { | |
1715 | dev_err(smu->adev->dev, "[%s] failed to get dpm levels!\n", __func__); | |
1716 | return ret; | |
1717 | } | |
1718 | ||
1719 | for (i = 0; i < single_dpm_table->count; i++) { | |
1720 | ret = smu_v13_0_get_dpm_freq_by_index(smu, | |
1721 | clk_type, | |
1722 | i, | |
1723 | &clk); | |
1724 | if (ret) { | |
1725 | dev_err(smu->adev->dev, "[%s] failed to get dpm freq by index!\n", __func__); | |
1726 | return ret; | |
1727 | } | |
1728 | ||
1729 | single_dpm_table->dpm_levels[i].value = clk; | |
1730 | single_dpm_table->dpm_levels[i].enabled = true; | |
1731 | ||
1732 | if (i == 0) | |
1733 | single_dpm_table->min = clk; | |
1734 | else if (i == single_dpm_table->count - 1) | |
1735 | single_dpm_table->max = clk; | |
1736 | } | |
1737 | ||
1738 | return 0; | |
1739 | } | |
1740 | ||
1741 | int smu_v13_0_get_dpm_level_range(struct smu_context *smu, | |
1742 | enum smu_clk_type clk_type, | |
1743 | uint32_t *min_value, | |
1744 | uint32_t *max_value) | |
1745 | { | |
1746 | uint32_t level_count = 0; | |
1747 | int ret = 0; | |
1748 | ||
1749 | if (!min_value && !max_value) | |
1750 | return -EINVAL; | |
1751 | ||
1752 | if (min_value) { | |
1753 | /* by default, level 0 clock value as min value */ | |
1754 | ret = smu_v13_0_get_dpm_freq_by_index(smu, | |
1755 | clk_type, | |
1756 | 0, | |
1757 | min_value); | |
1758 | if (ret) | |
1759 | return ret; | |
1760 | } | |
1761 | ||
1762 | if (max_value) { | |
1763 | ret = smu_v13_0_get_dpm_level_count(smu, | |
1764 | clk_type, | |
1765 | &level_count); | |
1766 | if (ret) | |
1767 | return ret; | |
1768 | ||
1769 | ret = smu_v13_0_get_dpm_freq_by_index(smu, | |
1770 | clk_type, | |
1771 | level_count - 1, | |
1772 | max_value); | |
1773 | if (ret) | |
1774 | return ret; | |
1775 | } | |
1776 | ||
1777 | return ret; | |
1778 | } | |
1779 | ||
1780 | int smu_v13_0_get_current_pcie_link_width_level(struct smu_context *smu) | |
1781 | { | |
1782 | struct amdgpu_device *adev = smu->adev; | |
1783 | ||
1784 | return (RREG32_PCIE(smnPCIE_LC_LINK_WIDTH_CNTL) & | |
1785 | PCIE_LC_LINK_WIDTH_CNTL__LC_LINK_WIDTH_RD_MASK) | |
1786 | >> PCIE_LC_LINK_WIDTH_CNTL__LC_LINK_WIDTH_RD__SHIFT; | |
1787 | } | |
1788 | ||
1789 | int smu_v13_0_get_current_pcie_link_width(struct smu_context *smu) | |
1790 | { | |
1791 | uint32_t width_level; | |
1792 | ||
1793 | width_level = smu_v13_0_get_current_pcie_link_width_level(smu); | |
1794 | if (width_level > LINK_WIDTH_MAX) | |
1795 | width_level = 0; | |
1796 | ||
1797 | return link_width[width_level]; | |
1798 | } | |
1799 | ||
1800 | int smu_v13_0_get_current_pcie_link_speed_level(struct smu_context *smu) | |
1801 | { | |
1802 | struct amdgpu_device *adev = smu->adev; | |
1803 | ||
1804 | return (RREG32_PCIE(smnPCIE_LC_SPEED_CNTL) & | |
1805 | PCIE_LC_SPEED_CNTL__LC_CURRENT_DATA_RATE_MASK) | |
1806 | >> PCIE_LC_SPEED_CNTL__LC_CURRENT_DATA_RATE__SHIFT; | |
1807 | } | |
1808 | ||
1809 | int smu_v13_0_get_current_pcie_link_speed(struct smu_context *smu) | |
1810 | { | |
1811 | uint32_t speed_level; | |
1812 | ||
1813 | speed_level = smu_v13_0_get_current_pcie_link_speed_level(smu); | |
1814 | if (speed_level > LINK_SPEED_MAX) | |
1815 | speed_level = 0; | |
1816 | ||
1817 | return link_speed[speed_level]; | |
1818 | } | |
1819 | ||
1820 | void smu_v13_0_init_gpu_metrics_v1_0(struct gpu_metrics_v1_0 *gpu_metrics) | |
1821 | { | |
1822 | memset(gpu_metrics, 0xFF, sizeof(struct gpu_metrics_v1_0)); | |
1823 | ||
1824 | gpu_metrics->common_header.structure_size = | |
1825 | sizeof(struct gpu_metrics_v1_0); | |
1826 | gpu_metrics->common_header.format_revision = 1; | |
1827 | gpu_metrics->common_header.content_revision = 0; | |
1828 | ||
1829 | gpu_metrics->system_clock_counter = ktime_get_boottime_ns(); | |
1830 | } |