Merge tag 'amd-drm-next-5.8-2020-04-30' of git://people.freedesktop.org/~agd5f/linux...
[linux-block.git] / drivers / gpu / drm / amd / powerplay / smu_v11_0.c
1 /*
2  * Copyright 2019 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  */
22
23 #include <linux/firmware.h>
24 #include <linux/module.h>
25 #include <linux/pci.h>
26
27 #define SMU_11_0_PARTIAL_PPTABLE
28
29 #include "amdgpu.h"
30 #include "amdgpu_smu.h"
31 #include "smu_internal.h"
32 #include "atomfirmware.h"
33 #include "amdgpu_atomfirmware.h"
34 #include "smu_v11_0.h"
35 #include "smu_v11_0_pptable.h"
36 #include "soc15_common.h"
37 #include "atom.h"
38 #include "amd_pcie.h"
39 #include "amdgpu_ras.h"
40
41 #include "asic_reg/thm/thm_11_0_2_offset.h"
42 #include "asic_reg/thm/thm_11_0_2_sh_mask.h"
43 #include "asic_reg/mp/mp_11_0_offset.h"
44 #include "asic_reg/mp/mp_11_0_sh_mask.h"
45 #include "asic_reg/smuio/smuio_11_0_0_offset.h"
46 #include "asic_reg/smuio/smuio_11_0_0_sh_mask.h"
47
48 MODULE_FIRMWARE("amdgpu/vega20_smc.bin");
49 MODULE_FIRMWARE("amdgpu/arcturus_smc.bin");
50 MODULE_FIRMWARE("amdgpu/navi10_smc.bin");
51 MODULE_FIRMWARE("amdgpu/navi14_smc.bin");
52 MODULE_FIRMWARE("amdgpu/navi12_smc.bin");
53
54 #define SMU11_VOLTAGE_SCALE 4
55
56 static int smu_v11_0_send_msg_without_waiting(struct smu_context *smu,
57                                               uint16_t msg)
58 {
59         struct amdgpu_device *adev = smu->adev;
60         WREG32_SOC15_NO_KIQ(MP1, 0, mmMP1_SMN_C2PMSG_66, msg);
61         return 0;
62 }
63
64 static int smu_v11_0_read_arg(struct smu_context *smu, uint32_t *arg)
65 {
66         struct amdgpu_device *adev = smu->adev;
67
68         *arg = RREG32_SOC15_NO_KIQ(MP1, 0, mmMP1_SMN_C2PMSG_82);
69         return 0;
70 }
71
72 static int smu_v11_0_wait_for_response(struct smu_context *smu)
73 {
74         struct amdgpu_device *adev = smu->adev;
75         uint32_t cur_value, i, timeout = adev->usec_timeout * 10;
76
77         for (i = 0; i < timeout; i++) {
78                 cur_value = RREG32_SOC15_NO_KIQ(MP1, 0, mmMP1_SMN_C2PMSG_90);
79                 if ((cur_value & MP1_C2PMSG_90__CONTENT_MASK) != 0)
80                         return cur_value == 0x1 ? 0 : -EIO;
81
82                 udelay(1);
83         }
84
85         /* timeout means wrong logic */
86         if (i == timeout)
87                 return -ETIME;
88
89         return RREG32_SOC15_NO_KIQ(MP1, 0, mmMP1_SMN_C2PMSG_90) == 0x1 ? 0 : -EIO;
90 }
91
92 int
93 smu_v11_0_send_msg_with_param(struct smu_context *smu,
94                               enum smu_message_type msg,
95                               uint32_t param,
96                               uint32_t *read_arg)
97 {
98         struct amdgpu_device *adev = smu->adev;
99         int ret = 0, index = 0;
100
101         index = smu_msg_get_index(smu, msg);
102         if (index < 0)
103                 return index;
104
105         mutex_lock(&smu->message_lock);
106         ret = smu_v11_0_wait_for_response(smu);
107         if (ret) {
108                 pr_err("Msg issuing pre-check failed and "
109                        "SMU may be not in the right state!\n");
110                 goto out;
111         }
112
113         WREG32_SOC15_NO_KIQ(MP1, 0, mmMP1_SMN_C2PMSG_90, 0);
114
115         WREG32_SOC15_NO_KIQ(MP1, 0, mmMP1_SMN_C2PMSG_82, param);
116
117         smu_v11_0_send_msg_without_waiting(smu, (uint16_t)index);
118
119         ret = smu_v11_0_wait_for_response(smu);
120         if (ret) {
121                 pr_err("failed send message: %10s (%d) \tparam: 0x%08x response %#x\n",
122                        smu_get_message_name(smu, msg), index, param, ret);
123                 goto out;
124         }
125
126         if (read_arg) {
127                 ret = smu_v11_0_read_arg(smu, read_arg);
128                 if (ret) {
129                         pr_err("failed to read message arg: %10s (%d) \tparam: 0x%08x response %#x\n",
130                                smu_get_message_name(smu, msg), index, param, ret);
131                         goto out;
132                 }
133         }
134 out:
135         mutex_unlock(&smu->message_lock);
136         return ret;
137 }
138
139 int smu_v11_0_init_microcode(struct smu_context *smu)
140 {
141         struct amdgpu_device *adev = smu->adev;
142         const char *chip_name;
143         char fw_name[30];
144         int err = 0;
145         const struct smc_firmware_header_v1_0 *hdr;
146         const struct common_firmware_header *header;
147         struct amdgpu_firmware_info *ucode = NULL;
148
149         switch (adev->asic_type) {
150         case CHIP_VEGA20:
151                 chip_name = "vega20";
152                 break;
153         case CHIP_ARCTURUS:
154                 chip_name = "arcturus";
155                 break;
156         case CHIP_NAVI10:
157                 chip_name = "navi10";
158                 break;
159         case CHIP_NAVI14:
160                 chip_name = "navi14";
161                 break;
162         case CHIP_NAVI12:
163                 chip_name = "navi12";
164                 break;
165         default:
166                 BUG();
167         }
168
169         snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_smc.bin", chip_name);
170
171         err = request_firmware(&adev->pm.fw, fw_name, adev->dev);
172         if (err)
173                 goto out;
174         err = amdgpu_ucode_validate(adev->pm.fw);
175         if (err)
176                 goto out;
177
178         hdr = (const struct smc_firmware_header_v1_0 *) adev->pm.fw->data;
179         amdgpu_ucode_print_smc_hdr(&hdr->header);
180         adev->pm.fw_version = le32_to_cpu(hdr->header.ucode_version);
181
182         if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
183                 ucode = &adev->firmware.ucode[AMDGPU_UCODE_ID_SMC];
184                 ucode->ucode_id = AMDGPU_UCODE_ID_SMC;
185                 ucode->fw = adev->pm.fw;
186                 header = (const struct common_firmware_header *)ucode->fw->data;
187                 adev->firmware.fw_size +=
188                         ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
189         }
190
191 out:
192         if (err) {
193                 DRM_ERROR("smu_v11_0: Failed to load firmware \"%s\"\n",
194                           fw_name);
195                 release_firmware(adev->pm.fw);
196                 adev->pm.fw = NULL;
197         }
198         return err;
199 }
200
201 int smu_v11_0_load_microcode(struct smu_context *smu)
202 {
203         struct amdgpu_device *adev = smu->adev;
204         const uint32_t *src;
205         const struct smc_firmware_header_v1_0 *hdr;
206         uint32_t addr_start = MP1_SRAM;
207         uint32_t i;
208         uint32_t smc_fw_size;
209         uint32_t mp1_fw_flags;
210
211         hdr = (const struct smc_firmware_header_v1_0 *) adev->pm.fw->data;
212         src = (const uint32_t *)(adev->pm.fw->data +
213                 le32_to_cpu(hdr->header.ucode_array_offset_bytes));
214         smc_fw_size = hdr->header.ucode_size_bytes;
215
216         for (i = 1; i < smc_fw_size/4 - 1; i++) {
217                 WREG32_PCIE(addr_start, src[i]);
218                 addr_start += 4;
219         }
220
221         WREG32_PCIE(MP1_Public | (smnMP1_PUB_CTRL & 0xffffffff),
222                 1 & MP1_SMN_PUB_CTRL__RESET_MASK);
223         WREG32_PCIE(MP1_Public | (smnMP1_PUB_CTRL & 0xffffffff),
224                 1 & ~MP1_SMN_PUB_CTRL__RESET_MASK);
225
226         for (i = 0; i < adev->usec_timeout; i++) {
227                 mp1_fw_flags = RREG32_PCIE(MP1_Public |
228                         (smnMP1_FIRMWARE_FLAGS & 0xffffffff));
229                 if ((mp1_fw_flags & MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED_MASK) >>
230                         MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED__SHIFT)
231                         break;
232                 udelay(1);
233         }
234
235         if (i == adev->usec_timeout)
236                 return -ETIME;
237
238         return 0;
239 }
240
241 int smu_v11_0_check_fw_status(struct smu_context *smu)
242 {
243         struct amdgpu_device *adev = smu->adev;
244         uint32_t mp1_fw_flags;
245
246         mp1_fw_flags = RREG32_PCIE(MP1_Public |
247                                    (smnMP1_FIRMWARE_FLAGS & 0xffffffff));
248
249         if ((mp1_fw_flags & MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED_MASK) >>
250             MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED__SHIFT)
251                 return 0;
252
253         return -EIO;
254 }
255
256 int smu_v11_0_check_fw_version(struct smu_context *smu)
257 {
258         uint32_t if_version = 0xff, smu_version = 0xff;
259         uint16_t smu_major;
260         uint8_t smu_minor, smu_debug;
261         int ret = 0;
262
263         ret = smu_get_smc_version(smu, &if_version, &smu_version);
264         if (ret)
265                 return ret;
266
267         smu_major = (smu_version >> 16) & 0xffff;
268         smu_minor = (smu_version >> 8) & 0xff;
269         smu_debug = (smu_version >> 0) & 0xff;
270
271         switch (smu->adev->asic_type) {
272         case CHIP_VEGA20:
273                 smu->smc_driver_if_version = SMU11_DRIVER_IF_VERSION_VG20;
274                 break;
275         case CHIP_ARCTURUS:
276                 smu->smc_driver_if_version = SMU11_DRIVER_IF_VERSION_ARCT;
277                 break;
278         case CHIP_NAVI10:
279                 smu->smc_driver_if_version = SMU11_DRIVER_IF_VERSION_NV10;
280                 break;
281         case CHIP_NAVI12:
282                 smu->smc_driver_if_version = SMU11_DRIVER_IF_VERSION_NV12;
283                 break;
284         case CHIP_NAVI14:
285                 smu->smc_driver_if_version = SMU11_DRIVER_IF_VERSION_NV14;
286                 break;
287         default:
288                 pr_err("smu unsupported asic type:%d.\n", smu->adev->asic_type);
289                 smu->smc_driver_if_version = SMU11_DRIVER_IF_VERSION_INV;
290                 break;
291         }
292
293         /*
294          * 1. if_version mismatch is not critical as our fw is designed
295          * to be backward compatible.
296          * 2. New fw usually brings some optimizations. But that's visible
297          * only on the paired driver.
298          * Considering above, we just leave user a warning message instead
299          * of halt driver loading.
300          */
301         if (if_version != smu->smc_driver_if_version) {
302                 pr_info("smu driver if version = 0x%08x, smu fw if version = 0x%08x, "
303                         "smu fw version = 0x%08x (%d.%d.%d)\n",
304                         smu->smc_driver_if_version, if_version,
305                         smu_version, smu_major, smu_minor, smu_debug);
306                 pr_warn("SMU driver if version not matched\n");
307         }
308
309         return ret;
310 }
311
312 static int smu_v11_0_set_pptable_v2_0(struct smu_context *smu, void **table, uint32_t *size)
313 {
314         struct amdgpu_device *adev = smu->adev;
315         uint32_t ppt_offset_bytes;
316         const struct smc_firmware_header_v2_0 *v2;
317
318         v2 = (const struct smc_firmware_header_v2_0 *) adev->pm.fw->data;
319
320         ppt_offset_bytes = le32_to_cpu(v2->ppt_offset_bytes);
321         *size = le32_to_cpu(v2->ppt_size_bytes);
322         *table = (uint8_t *)v2 + ppt_offset_bytes;
323
324         return 0;
325 }
326
327 static int smu_v11_0_set_pptable_v2_1(struct smu_context *smu, void **table,
328                                       uint32_t *size, uint32_t pptable_id)
329 {
330         struct amdgpu_device *adev = smu->adev;
331         const struct smc_firmware_header_v2_1 *v2_1;
332         struct smc_soft_pptable_entry *entries;
333         uint32_t pptable_count = 0;
334         int i = 0;
335
336         v2_1 = (const struct smc_firmware_header_v2_1 *) adev->pm.fw->data;
337         entries = (struct smc_soft_pptable_entry *)
338                 ((uint8_t *)v2_1 + le32_to_cpu(v2_1->pptable_entry_offset));
339         pptable_count = le32_to_cpu(v2_1->pptable_count);
340         for (i = 0; i < pptable_count; i++) {
341                 if (le32_to_cpu(entries[i].id) == pptable_id) {
342                         *table = ((uint8_t *)v2_1 + le32_to_cpu(entries[i].ppt_offset_bytes));
343                         *size = le32_to_cpu(entries[i].ppt_size_bytes);
344                         break;
345                 }
346         }
347
348         if (i == pptable_count)
349                 return -EINVAL;
350
351         return 0;
352 }
353
354 int smu_v11_0_setup_pptable(struct smu_context *smu)
355 {
356         struct amdgpu_device *adev = smu->adev;
357         const struct smc_firmware_header_v1_0 *hdr;
358         int ret, index;
359         uint32_t size = 0;
360         uint16_t atom_table_size;
361         uint8_t frev, crev;
362         void *table;
363         uint16_t version_major, version_minor;
364
365         hdr = (const struct smc_firmware_header_v1_0 *) adev->pm.fw->data;
366         version_major = le16_to_cpu(hdr->header.header_version_major);
367         version_minor = le16_to_cpu(hdr->header.header_version_minor);
368         if (version_major == 2 && smu->smu_table.boot_values.pp_table_id > 0) {
369                 pr_info("use driver provided pptable %d\n", smu->smu_table.boot_values.pp_table_id);
370                 switch (version_minor) {
371                 case 0:
372                         ret = smu_v11_0_set_pptable_v2_0(smu, &table, &size);
373                         break;
374                 case 1:
375                         ret = smu_v11_0_set_pptable_v2_1(smu, &table, &size,
376                                                          smu->smu_table.boot_values.pp_table_id);
377                         break;
378                 default:
379                         ret = -EINVAL;
380                         break;
381                 }
382                 if (ret)
383                         return ret;
384
385         } else {
386                 pr_info("use vbios provided pptable\n");
387                 index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
388                                                     powerplayinfo);
389
390                 ret = smu_get_atom_data_table(smu, index, &atom_table_size, &frev, &crev,
391                                               (uint8_t **)&table);
392                 if (ret)
393                         return ret;
394                 size = atom_table_size;
395         }
396
397         if (!smu->smu_table.power_play_table)
398                 smu->smu_table.power_play_table = table;
399         if (!smu->smu_table.power_play_table_size)
400                 smu->smu_table.power_play_table_size = size;
401
402         return 0;
403 }
404
405 static int smu_v11_0_init_dpm_context(struct smu_context *smu)
406 {
407         struct smu_dpm_context *smu_dpm = &smu->smu_dpm;
408
409         if (smu_dpm->dpm_context || smu_dpm->dpm_context_size != 0)
410                 return -EINVAL;
411
412         return smu_alloc_dpm_context(smu);
413 }
414
415 static int smu_v11_0_fini_dpm_context(struct smu_context *smu)
416 {
417         struct smu_dpm_context *smu_dpm = &smu->smu_dpm;
418
419         if (!smu_dpm->dpm_context || smu_dpm->dpm_context_size == 0)
420                 return -EINVAL;
421
422         kfree(smu_dpm->dpm_context);
423         kfree(smu_dpm->golden_dpm_context);
424         kfree(smu_dpm->dpm_current_power_state);
425         kfree(smu_dpm->dpm_request_power_state);
426         smu_dpm->dpm_context = NULL;
427         smu_dpm->golden_dpm_context = NULL;
428         smu_dpm->dpm_context_size = 0;
429         smu_dpm->dpm_current_power_state = NULL;
430         smu_dpm->dpm_request_power_state = NULL;
431
432         return 0;
433 }
434
435 int smu_v11_0_init_smc_tables(struct smu_context *smu)
436 {
437         struct smu_table_context *smu_table = &smu->smu_table;
438         struct smu_table *tables = NULL;
439         int ret = 0;
440
441         if (smu_table->tables)
442                 return -EINVAL;
443
444         tables = kcalloc(SMU_TABLE_COUNT, sizeof(struct smu_table),
445                          GFP_KERNEL);
446         if (!tables)
447                 return -ENOMEM;
448
449         smu_table->tables = tables;
450
451         ret = smu_tables_init(smu, tables);
452         if (ret)
453                 return ret;
454
455         ret = smu_v11_0_init_dpm_context(smu);
456         if (ret)
457                 return ret;
458
459         return 0;
460 }
461
462 int smu_v11_0_fini_smc_tables(struct smu_context *smu)
463 {
464         struct smu_table_context *smu_table = &smu->smu_table;
465         int ret = 0;
466
467         if (!smu_table->tables)
468                 return -EINVAL;
469
470         kfree(smu_table->tables);
471         kfree(smu_table->metrics_table);
472         kfree(smu_table->watermarks_table);
473         smu_table->tables = NULL;
474         smu_table->metrics_table = NULL;
475         smu_table->watermarks_table = NULL;
476         smu_table->metrics_time = 0;
477
478         ret = smu_v11_0_fini_dpm_context(smu);
479         if (ret)
480                 return ret;
481         return 0;
482 }
483
484 int smu_v11_0_init_power(struct smu_context *smu)
485 {
486         struct smu_power_context *smu_power = &smu->smu_power;
487
488         if (smu_power->power_context || smu_power->power_context_size != 0)
489                 return -EINVAL;
490
491         smu_power->power_context = kzalloc(sizeof(struct smu_11_0_dpm_context),
492                                            GFP_KERNEL);
493         if (!smu_power->power_context)
494                 return -ENOMEM;
495         smu_power->power_context_size = sizeof(struct smu_11_0_dpm_context);
496
497         return 0;
498 }
499
500 int smu_v11_0_fini_power(struct smu_context *smu)
501 {
502         struct smu_power_context *smu_power = &smu->smu_power;
503
504         if (!smu_power->power_context || smu_power->power_context_size == 0)
505                 return -EINVAL;
506
507         kfree(smu_power->power_context);
508         smu_power->power_context = NULL;
509         smu_power->power_context_size = 0;
510
511         return 0;
512 }
513
514 int smu_v11_0_get_vbios_bootup_values(struct smu_context *smu)
515 {
516         int ret, index;
517         uint16_t size;
518         uint8_t frev, crev;
519         struct atom_common_table_header *header;
520         struct atom_firmware_info_v3_3 *v_3_3;
521         struct atom_firmware_info_v3_1 *v_3_1;
522
523         index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
524                                             firmwareinfo);
525
526         ret = smu_get_atom_data_table(smu, index, &size, &frev, &crev,
527                                       (uint8_t **)&header);
528         if (ret)
529                 return ret;
530
531         if (header->format_revision != 3) {
532                 pr_err("unknown atom_firmware_info version! for smu11\n");
533                 return -EINVAL;
534         }
535
536         switch (header->content_revision) {
537         case 0:
538         case 1:
539         case 2:
540                 v_3_1 = (struct atom_firmware_info_v3_1 *)header;
541                 smu->smu_table.boot_values.revision = v_3_1->firmware_revision;
542                 smu->smu_table.boot_values.gfxclk = v_3_1->bootup_sclk_in10khz;
543                 smu->smu_table.boot_values.uclk = v_3_1->bootup_mclk_in10khz;
544                 smu->smu_table.boot_values.socclk = 0;
545                 smu->smu_table.boot_values.dcefclk = 0;
546                 smu->smu_table.boot_values.vddc = v_3_1->bootup_vddc_mv;
547                 smu->smu_table.boot_values.vddci = v_3_1->bootup_vddci_mv;
548                 smu->smu_table.boot_values.mvddc = v_3_1->bootup_mvddc_mv;
549                 smu->smu_table.boot_values.vdd_gfx = v_3_1->bootup_vddgfx_mv;
550                 smu->smu_table.boot_values.cooling_id = v_3_1->coolingsolution_id;
551                 smu->smu_table.boot_values.pp_table_id = 0;
552                 break;
553         case 3:
554         default:
555                 v_3_3 = (struct atom_firmware_info_v3_3 *)header;
556                 smu->smu_table.boot_values.revision = v_3_3->firmware_revision;
557                 smu->smu_table.boot_values.gfxclk = v_3_3->bootup_sclk_in10khz;
558                 smu->smu_table.boot_values.uclk = v_3_3->bootup_mclk_in10khz;
559                 smu->smu_table.boot_values.socclk = 0;
560                 smu->smu_table.boot_values.dcefclk = 0;
561                 smu->smu_table.boot_values.vddc = v_3_3->bootup_vddc_mv;
562                 smu->smu_table.boot_values.vddci = v_3_3->bootup_vddci_mv;
563                 smu->smu_table.boot_values.mvddc = v_3_3->bootup_mvddc_mv;
564                 smu->smu_table.boot_values.vdd_gfx = v_3_3->bootup_vddgfx_mv;
565                 smu->smu_table.boot_values.cooling_id = v_3_3->coolingsolution_id;
566                 smu->smu_table.boot_values.pp_table_id = v_3_3->pplib_pptable_id;
567         }
568
569         smu->smu_table.boot_values.format_revision = header->format_revision;
570         smu->smu_table.boot_values.content_revision = header->content_revision;
571
572         return 0;
573 }
574
575 int smu_v11_0_get_clk_info_from_vbios(struct smu_context *smu)
576 {
577         int ret, index;
578         struct amdgpu_device *adev = smu->adev;
579         struct atom_get_smu_clock_info_parameters_v3_1 input = {0};
580         struct atom_get_smu_clock_info_output_parameters_v3_1 *output;
581
582         input.clk_id = SMU11_SYSPLL0_SOCCLK_ID;
583         input.command = GET_SMU_CLOCK_INFO_V3_1_GET_CLOCK_FREQ;
584         index = get_index_into_master_table(atom_master_list_of_command_functions_v2_1,
585                                             getsmuclockinfo);
586
587         ret = amdgpu_atom_execute_table(adev->mode_info.atom_context, index,
588                                         (uint32_t *)&input);
589         if (ret)
590                 return -EINVAL;
591
592         output = (struct atom_get_smu_clock_info_output_parameters_v3_1 *)&input;
593         smu->smu_table.boot_values.socclk = le32_to_cpu(output->atom_smu_outputclkfreq.smu_clock_freq_hz) / 10000;
594
595         memset(&input, 0, sizeof(input));
596         input.clk_id = SMU11_SYSPLL0_DCEFCLK_ID;
597         input.command = GET_SMU_CLOCK_INFO_V3_1_GET_CLOCK_FREQ;
598         index = get_index_into_master_table(atom_master_list_of_command_functions_v2_1,
599                                             getsmuclockinfo);
600
601         ret = amdgpu_atom_execute_table(adev->mode_info.atom_context, index,
602                                         (uint32_t *)&input);
603         if (ret)
604                 return -EINVAL;
605
606         output = (struct atom_get_smu_clock_info_output_parameters_v3_1 *)&input;
607         smu->smu_table.boot_values.dcefclk = le32_to_cpu(output->atom_smu_outputclkfreq.smu_clock_freq_hz) / 10000;
608
609         memset(&input, 0, sizeof(input));
610         input.clk_id = SMU11_SYSPLL0_ECLK_ID;
611         input.command = GET_SMU_CLOCK_INFO_V3_1_GET_CLOCK_FREQ;
612         index = get_index_into_master_table(atom_master_list_of_command_functions_v2_1,
613                                             getsmuclockinfo);
614
615         ret = amdgpu_atom_execute_table(adev->mode_info.atom_context, index,
616                                         (uint32_t *)&input);
617         if (ret)
618                 return -EINVAL;
619
620         output = (struct atom_get_smu_clock_info_output_parameters_v3_1 *)&input;
621         smu->smu_table.boot_values.eclk = le32_to_cpu(output->atom_smu_outputclkfreq.smu_clock_freq_hz) / 10000;
622
623         memset(&input, 0, sizeof(input));
624         input.clk_id = SMU11_SYSPLL0_VCLK_ID;
625         input.command = GET_SMU_CLOCK_INFO_V3_1_GET_CLOCK_FREQ;
626         index = get_index_into_master_table(atom_master_list_of_command_functions_v2_1,
627                                             getsmuclockinfo);
628
629         ret = amdgpu_atom_execute_table(adev->mode_info.atom_context, index,
630                                         (uint32_t *)&input);
631         if (ret)
632                 return -EINVAL;
633
634         output = (struct atom_get_smu_clock_info_output_parameters_v3_1 *)&input;
635         smu->smu_table.boot_values.vclk = le32_to_cpu(output->atom_smu_outputclkfreq.smu_clock_freq_hz) / 10000;
636
637         memset(&input, 0, sizeof(input));
638         input.clk_id = SMU11_SYSPLL0_DCLK_ID;
639         input.command = GET_SMU_CLOCK_INFO_V3_1_GET_CLOCK_FREQ;
640         index = get_index_into_master_table(atom_master_list_of_command_functions_v2_1,
641                                             getsmuclockinfo);
642
643         ret = amdgpu_atom_execute_table(adev->mode_info.atom_context, index,
644                                         (uint32_t *)&input);
645         if (ret)
646                 return -EINVAL;
647
648         output = (struct atom_get_smu_clock_info_output_parameters_v3_1 *)&input;
649         smu->smu_table.boot_values.dclk = le32_to_cpu(output->atom_smu_outputclkfreq.smu_clock_freq_hz) / 10000;
650
651         if ((smu->smu_table.boot_values.format_revision == 3) &&
652             (smu->smu_table.boot_values.content_revision >= 2)) {
653                 memset(&input, 0, sizeof(input));
654                 input.clk_id = SMU11_SYSPLL1_0_FCLK_ID;
655                 input.syspll_id = SMU11_SYSPLL1_2_ID;
656                 input.command = GET_SMU_CLOCK_INFO_V3_1_GET_CLOCK_FREQ;
657                 index = get_index_into_master_table(atom_master_list_of_command_functions_v2_1,
658                                                     getsmuclockinfo);
659
660                 ret = amdgpu_atom_execute_table(adev->mode_info.atom_context, index,
661                                                 (uint32_t *)&input);
662                 if (ret)
663                         return -EINVAL;
664
665                 output = (struct atom_get_smu_clock_info_output_parameters_v3_1 *)&input;
666                 smu->smu_table.boot_values.fclk = le32_to_cpu(output->atom_smu_outputclkfreq.smu_clock_freq_hz) / 10000;
667         }
668
669         return 0;
670 }
671
672 int smu_v11_0_notify_memory_pool_location(struct smu_context *smu)
673 {
674         struct smu_table_context *smu_table = &smu->smu_table;
675         struct smu_table *memory_pool = &smu_table->memory_pool;
676         int ret = 0;
677         uint64_t address;
678         uint32_t address_low, address_high;
679
680         if (memory_pool->size == 0 || memory_pool->cpu_addr == NULL)
681                 return ret;
682
683         address = (uintptr_t)memory_pool->cpu_addr;
684         address_high = (uint32_t)upper_32_bits(address);
685         address_low  = (uint32_t)lower_32_bits(address);
686
687         ret = smu_send_smc_msg_with_param(smu,
688                                           SMU_MSG_SetSystemVirtualDramAddrHigh,
689                                           address_high,
690                                           NULL);
691         if (ret)
692                 return ret;
693         ret = smu_send_smc_msg_with_param(smu,
694                                           SMU_MSG_SetSystemVirtualDramAddrLow,
695                                           address_low,
696                                           NULL);
697         if (ret)
698                 return ret;
699
700         address = memory_pool->mc_address;
701         address_high = (uint32_t)upper_32_bits(address);
702         address_low  = (uint32_t)lower_32_bits(address);
703
704         ret = smu_send_smc_msg_with_param(smu, SMU_MSG_DramLogSetDramAddrHigh,
705                                           address_high, NULL);
706         if (ret)
707                 return ret;
708         ret = smu_send_smc_msg_with_param(smu, SMU_MSG_DramLogSetDramAddrLow,
709                                           address_low, NULL);
710         if (ret)
711                 return ret;
712         ret = smu_send_smc_msg_with_param(smu, SMU_MSG_DramLogSetDramSize,
713                                           (uint32_t)memory_pool->size, NULL);
714         if (ret)
715                 return ret;
716
717         return ret;
718 }
719
720 int smu_v11_0_check_pptable(struct smu_context *smu)
721 {
722         int ret;
723
724         ret = smu_check_powerplay_table(smu);
725         return ret;
726 }
727
728 int smu_v11_0_parse_pptable(struct smu_context *smu)
729 {
730         int ret;
731
732         struct smu_table_context *table_context = &smu->smu_table;
733         struct smu_table *table = &table_context->tables[SMU_TABLE_PPTABLE];
734
735         /* during TDR we need to free and alloc the pptable */
736         if (table_context->driver_pptable)
737                 kfree(table_context->driver_pptable);
738
739         table_context->driver_pptable = kzalloc(table->size, GFP_KERNEL);
740
741         if (!table_context->driver_pptable)
742                 return -ENOMEM;
743
744         ret = smu_store_powerplay_table(smu);
745         if (ret)
746                 return -EINVAL;
747
748         ret = smu_append_powerplay_table(smu);
749
750         return ret;
751 }
752
753 int smu_v11_0_populate_smc_pptable(struct smu_context *smu)
754 {
755         int ret;
756
757         ret = smu_set_default_dpm_table(smu);
758
759         return ret;
760 }
761
762 int smu_v11_0_write_pptable(struct smu_context *smu)
763 {
764         struct smu_table_context *table_context = &smu->smu_table;
765         int ret = 0;
766
767         ret = smu_update_table(smu, SMU_TABLE_PPTABLE, 0,
768                                table_context->driver_pptable, true);
769
770         return ret;
771 }
772
773 int smu_v11_0_set_deep_sleep_dcefclk(struct smu_context *smu, uint32_t clk)
774 {
775         int ret;
776
777         if (amdgpu_sriov_vf(smu->adev))
778                 return 0;
779
780         ret = smu_send_smc_msg_with_param(smu,
781                                           SMU_MSG_SetMinDeepSleepDcefclk, clk, NULL);
782         if (ret)
783                 pr_err("SMU11 attempt to set divider for DCEFCLK Failed!");
784
785         return ret;
786 }
787
788 int smu_v11_0_set_min_dcef_deep_sleep(struct smu_context *smu)
789 {
790         struct smu_table_context *table_context = &smu->smu_table;
791
792         if (!table_context)
793                 return -EINVAL;
794
795         return smu_v11_0_set_deep_sleep_dcefclk(smu, table_context->boot_values.dcefclk / 100);
796 }
797
798 int smu_v11_0_set_driver_table_location(struct smu_context *smu)
799 {
800         struct smu_table *driver_table = &smu->smu_table.driver_table;
801         int ret = 0;
802
803         if (driver_table->mc_address) {
804                 ret = smu_send_smc_msg_with_param(smu,
805                                 SMU_MSG_SetDriverDramAddrHigh,
806                                 upper_32_bits(driver_table->mc_address),
807                                 NULL);
808                 if (!ret)
809                         ret = smu_send_smc_msg_with_param(smu,
810                                 SMU_MSG_SetDriverDramAddrLow,
811                                 lower_32_bits(driver_table->mc_address),
812                                 NULL);
813         }
814
815         return ret;
816 }
817
818 int smu_v11_0_set_tool_table_location(struct smu_context *smu)
819 {
820         int ret = 0;
821         struct smu_table *tool_table = &smu->smu_table.tables[SMU_TABLE_PMSTATUSLOG];
822
823         if (amdgpu_sriov_vf(smu->adev))
824                 return 0;
825
826         if (tool_table->mc_address) {
827                 ret = smu_send_smc_msg_with_param(smu,
828                                 SMU_MSG_SetToolsDramAddrHigh,
829                                 upper_32_bits(tool_table->mc_address),
830                                 NULL);
831                 if (!ret)
832                         ret = smu_send_smc_msg_with_param(smu,
833                                 SMU_MSG_SetToolsDramAddrLow,
834                                 lower_32_bits(tool_table->mc_address),
835                                 NULL);
836         }
837
838         return ret;
839 }
840
841 int smu_v11_0_init_display_count(struct smu_context *smu, uint32_t count)
842 {
843         int ret = 0;
844
845         if (amdgpu_sriov_vf(smu->adev))
846                 return 0;
847
848         if (!smu->pm_enabled)
849                 return ret;
850
851         ret = smu_send_smc_msg_with_param(smu, SMU_MSG_NumOfDisplays, count, NULL);
852         return ret;
853 }
854
855
856 int smu_v11_0_set_allowed_mask(struct smu_context *smu)
857 {
858         struct smu_feature *feature = &smu->smu_feature;
859         int ret = 0;
860         uint32_t feature_mask[2];
861
862         if (amdgpu_sriov_vf(smu->adev))
863                 return 0;
864
865         mutex_lock(&feature->mutex);
866         if (bitmap_empty(feature->allowed, SMU_FEATURE_MAX) || feature->feature_num < 64)
867                 goto failed;
868
869         bitmap_copy((unsigned long *)feature_mask, feature->allowed, 64);
870
871         ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetAllowedFeaturesMaskHigh,
872                                           feature_mask[1], NULL);
873         if (ret)
874                 goto failed;
875
876         ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetAllowedFeaturesMaskLow,
877                                           feature_mask[0], NULL);
878         if (ret)
879                 goto failed;
880
881 failed:
882         mutex_unlock(&feature->mutex);
883         return ret;
884 }
885
886 int smu_v11_0_get_enabled_mask(struct smu_context *smu,
887                                       uint32_t *feature_mask, uint32_t num)
888 {
889         uint32_t feature_mask_high = 0, feature_mask_low = 0;
890         struct smu_feature *feature = &smu->smu_feature;
891         int ret = 0;
892
893         if (amdgpu_sriov_vf(smu->adev) && !amdgpu_sriov_is_pp_one_vf(smu->adev))
894                 return 0;
895
896         if (!feature_mask || num < 2)
897                 return -EINVAL;
898
899         if (bitmap_empty(feature->enabled, feature->feature_num)) {
900                 ret = smu_send_smc_msg(smu, SMU_MSG_GetEnabledSmuFeaturesHigh, &feature_mask_high);
901                 if (ret)
902                         return ret;
903
904                 ret = smu_send_smc_msg(smu, SMU_MSG_GetEnabledSmuFeaturesLow, &feature_mask_low);
905                 if (ret)
906                         return ret;
907
908                 feature_mask[0] = feature_mask_low;
909                 feature_mask[1] = feature_mask_high;
910         } else {
911                 bitmap_copy((unsigned long *)feature_mask, feature->enabled,
912                              feature->feature_num);
913         }
914
915         return ret;
916 }
917
918 int smu_v11_0_system_features_control(struct smu_context *smu,
919                                              bool en)
920 {
921         struct smu_feature *feature = &smu->smu_feature;
922         uint32_t feature_mask[2];
923         int ret = 0;
924
925         ret = smu_send_smc_msg(smu, (en ? SMU_MSG_EnableAllSmuFeatures :
926                                      SMU_MSG_DisableAllSmuFeatures), NULL);
927         if (ret)
928                 return ret;
929
930         bitmap_zero(feature->enabled, feature->feature_num);
931         bitmap_zero(feature->supported, feature->feature_num);
932
933         if (en) {
934                 ret = smu_feature_get_enabled_mask(smu, feature_mask, 2);
935                 if (ret)
936                         return ret;
937
938                 bitmap_copy(feature->enabled, (unsigned long *)&feature_mask,
939                             feature->feature_num);
940                 bitmap_copy(feature->supported, (unsigned long *)&feature_mask,
941                             feature->feature_num);
942         }
943
944         return ret;
945 }
946
947 int smu_v11_0_notify_display_change(struct smu_context *smu)
948 {
949         int ret = 0;
950
951         if (amdgpu_sriov_vf(smu->adev))
952                 return 0;
953
954         if (!smu->pm_enabled)
955                 return ret;
956
957         if (smu_feature_is_enabled(smu, SMU_FEATURE_DPM_UCLK_BIT) &&
958             smu->adev->gmc.vram_type == AMDGPU_VRAM_TYPE_HBM)
959                 ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetUclkFastSwitch, 1, NULL);
960
961         return ret;
962 }
963
964 static int
965 smu_v11_0_get_max_sustainable_clock(struct smu_context *smu, uint32_t *clock,
966                                     enum smu_clk_type clock_select)
967 {
968         int ret = 0;
969         int clk_id;
970
971         if ((smu_msg_get_index(smu, SMU_MSG_GetDcModeMaxDpmFreq) < 0) ||
972             (smu_msg_get_index(smu, SMU_MSG_GetMaxDpmFreq) < 0))
973                 return 0;
974
975         clk_id = smu_clk_get_index(smu, clock_select);
976         if (clk_id < 0)
977                 return -EINVAL;
978
979         ret = smu_send_smc_msg_with_param(smu, SMU_MSG_GetDcModeMaxDpmFreq,
980                                           clk_id << 16, clock);
981         if (ret) {
982                 pr_err("[GetMaxSustainableClock] Failed to get max DC clock from SMC!");
983                 return ret;
984         }
985
986         if (*clock != 0)
987                 return 0;
988
989         /* if DC limit is zero, return AC limit */
990         ret = smu_send_smc_msg_with_param(smu, SMU_MSG_GetMaxDpmFreq,
991                                           clk_id << 16, clock);
992         if (ret) {
993                 pr_err("[GetMaxSustainableClock] failed to get max AC clock from SMC!");
994                 return ret;
995         }
996
997         return 0;
998 }
999
1000 int smu_v11_0_init_max_sustainable_clocks(struct smu_context *smu)
1001 {
1002         struct smu_11_0_max_sustainable_clocks *max_sustainable_clocks;
1003         int ret = 0;
1004
1005         if (!smu->smu_table.max_sustainable_clocks)
1006                 max_sustainable_clocks = kzalloc(sizeof(struct smu_11_0_max_sustainable_clocks),
1007                                          GFP_KERNEL);
1008         else
1009                 max_sustainable_clocks = smu->smu_table.max_sustainable_clocks;
1010
1011         smu->smu_table.max_sustainable_clocks = (void *)max_sustainable_clocks;
1012
1013         max_sustainable_clocks->uclock = smu->smu_table.boot_values.uclk / 100;
1014         max_sustainable_clocks->soc_clock = smu->smu_table.boot_values.socclk / 100;
1015         max_sustainable_clocks->dcef_clock = smu->smu_table.boot_values.dcefclk / 100;
1016         max_sustainable_clocks->display_clock = 0xFFFFFFFF;
1017         max_sustainable_clocks->phy_clock = 0xFFFFFFFF;
1018         max_sustainable_clocks->pixel_clock = 0xFFFFFFFF;
1019
1020         if (smu_feature_is_enabled(smu, SMU_FEATURE_DPM_UCLK_BIT)) {
1021                 ret = smu_v11_0_get_max_sustainable_clock(smu,
1022                                                           &(max_sustainable_clocks->uclock),
1023                                                           SMU_UCLK);
1024                 if (ret) {
1025                         pr_err("[%s] failed to get max UCLK from SMC!",
1026                                __func__);
1027                         return ret;
1028                 }
1029         }
1030
1031         if (smu_feature_is_enabled(smu, SMU_FEATURE_DPM_SOCCLK_BIT)) {
1032                 ret = smu_v11_0_get_max_sustainable_clock(smu,
1033                                                           &(max_sustainable_clocks->soc_clock),
1034                                                           SMU_SOCCLK);
1035                 if (ret) {
1036                         pr_err("[%s] failed to get max SOCCLK from SMC!",
1037                                __func__);
1038                         return ret;
1039                 }
1040         }
1041
1042         if (smu_feature_is_enabled(smu, SMU_FEATURE_DPM_DCEFCLK_BIT)) {
1043                 ret = smu_v11_0_get_max_sustainable_clock(smu,
1044                                                           &(max_sustainable_clocks->dcef_clock),
1045                                                           SMU_DCEFCLK);
1046                 if (ret) {
1047                         pr_err("[%s] failed to get max DCEFCLK from SMC!",
1048                                __func__);
1049                         return ret;
1050                 }
1051
1052                 ret = smu_v11_0_get_max_sustainable_clock(smu,
1053                                                           &(max_sustainable_clocks->display_clock),
1054                                                           SMU_DISPCLK);
1055                 if (ret) {
1056                         pr_err("[%s] failed to get max DISPCLK from SMC!",
1057                                __func__);
1058                         return ret;
1059                 }
1060                 ret = smu_v11_0_get_max_sustainable_clock(smu,
1061                                                           &(max_sustainable_clocks->phy_clock),
1062                                                           SMU_PHYCLK);
1063                 if (ret) {
1064                         pr_err("[%s] failed to get max PHYCLK from SMC!",
1065                                __func__);
1066                         return ret;
1067                 }
1068                 ret = smu_v11_0_get_max_sustainable_clock(smu,
1069                                                           &(max_sustainable_clocks->pixel_clock),
1070                                                           SMU_PIXCLK);
1071                 if (ret) {
1072                         pr_err("[%s] failed to get max PIXCLK from SMC!",
1073                                __func__);
1074                         return ret;
1075                 }
1076         }
1077
1078         if (max_sustainable_clocks->soc_clock < max_sustainable_clocks->uclock)
1079                 max_sustainable_clocks->uclock = max_sustainable_clocks->soc_clock;
1080
1081         return 0;
1082 }
1083
1084 uint32_t smu_v11_0_get_max_power_limit(struct smu_context *smu) {
1085         uint32_t od_limit, max_power_limit;
1086         struct smu_11_0_powerplay_table *powerplay_table = NULL;
1087         struct smu_table_context *table_context = &smu->smu_table;
1088         powerplay_table = table_context->power_play_table;
1089
1090         max_power_limit = smu_get_pptable_power_limit(smu);
1091
1092         if (!max_power_limit) {
1093                 // If we couldn't get the table limit, fall back on first-read value
1094                 if (!smu->default_power_limit)
1095                         smu->default_power_limit = smu->power_limit;
1096                 max_power_limit = smu->default_power_limit;
1097         }
1098
1099         if (smu->od_enabled) {
1100                 od_limit = le32_to_cpu(powerplay_table->overdrive_table.max[SMU_11_0_ODSETTING_POWERPERCENTAGE]);
1101
1102                 pr_debug("ODSETTING_POWERPERCENTAGE: %d (default: %d)\n", od_limit, smu->default_power_limit);
1103
1104                 max_power_limit *= (100 + od_limit);
1105                 max_power_limit /= 100;
1106         }
1107
1108         return max_power_limit;
1109 }
1110
1111 int smu_v11_0_set_power_limit(struct smu_context *smu, uint32_t n)
1112 {
1113         int ret = 0;
1114         uint32_t max_power_limit;
1115
1116         if (amdgpu_sriov_vf(smu->adev))
1117                 return 0;
1118
1119         max_power_limit = smu_v11_0_get_max_power_limit(smu);
1120
1121         if (n > max_power_limit) {
1122                 pr_err("New power limit (%d) is over the max allowed %d\n",
1123                                 n,
1124                                 max_power_limit);
1125                 return -EINVAL;
1126         }
1127
1128         if (n == 0)
1129                 n = smu->default_power_limit;
1130
1131         if (!smu_feature_is_enabled(smu, SMU_FEATURE_PPT_BIT)) {
1132                 pr_err("Setting new power limit is not supported!\n");
1133                 return -EOPNOTSUPP;
1134         }
1135
1136         ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetPptLimit, n, NULL);
1137         if (ret) {
1138                 pr_err("[%s] Set power limit Failed!\n", __func__);
1139                 return ret;
1140         }
1141         smu->power_limit = n;
1142
1143         return 0;
1144 }
1145
1146 int smu_v11_0_get_current_clk_freq(struct smu_context *smu,
1147                                           enum smu_clk_type clk_id,
1148                                           uint32_t *value)
1149 {
1150         int ret = 0;
1151         uint32_t freq = 0;
1152         int asic_clk_id;
1153
1154         if (clk_id >= SMU_CLK_COUNT || !value)
1155                 return -EINVAL;
1156
1157         asic_clk_id = smu_clk_get_index(smu, clk_id);
1158         if (asic_clk_id < 0)
1159                 return -EINVAL;
1160
1161         /* if don't has GetDpmClockFreq Message, try get current clock by SmuMetrics_t */
1162         if (smu_msg_get_index(smu, SMU_MSG_GetDpmClockFreq) < 0)
1163                 ret =  smu_get_current_clk_freq_by_table(smu, clk_id, &freq);
1164         else {
1165                 ret = smu_send_smc_msg_with_param(smu, SMU_MSG_GetDpmClockFreq,
1166                                                   (asic_clk_id << 16), &freq);
1167                 if (ret)
1168                         return ret;
1169         }
1170
1171         freq *= 100;
1172         *value = freq;
1173
1174         return ret;
1175 }
1176
1177 static int smu_v11_0_set_thermal_range(struct smu_context *smu,
1178                                        struct smu_temperature_range range)
1179 {
1180         struct amdgpu_device *adev = smu->adev;
1181         int low = SMU_THERMAL_MINIMUM_ALERT_TEMP;
1182         int high = SMU_THERMAL_MAXIMUM_ALERT_TEMP;
1183         uint32_t val;
1184         struct smu_table_context *table_context = &smu->smu_table;
1185         struct smu_11_0_powerplay_table *powerplay_table = table_context->power_play_table;
1186
1187         low = max(SMU_THERMAL_MINIMUM_ALERT_TEMP,
1188                         range.min / SMU_TEMPERATURE_UNITS_PER_CENTIGRADES);
1189         high = min((uint16_t)SMU_THERMAL_MAXIMUM_ALERT_TEMP, powerplay_table->software_shutdown_temp);
1190
1191         if (low > high)
1192                 return -EINVAL;
1193
1194         val = RREG32_SOC15(THM, 0, mmTHM_THERMAL_INT_CTRL);
1195         val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, MAX_IH_CREDIT, 5);
1196         val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, THERM_IH_HW_ENA, 1);
1197         val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, THERM_INTH_MASK, 0);
1198         val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, THERM_INTL_MASK, 0);
1199         val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, DIG_THERM_INTH, (high & 0xff));
1200         val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, DIG_THERM_INTL, (low & 0xff));
1201         val = val & (~THM_THERMAL_INT_CTRL__THERM_TRIGGER_MASK_MASK);
1202
1203         WREG32_SOC15(THM, 0, mmTHM_THERMAL_INT_CTRL, val);
1204
1205         return 0;
1206 }
1207
1208 static int smu_v11_0_enable_thermal_alert(struct smu_context *smu)
1209 {
1210         struct amdgpu_device *adev = smu->adev;
1211         uint32_t val = 0;
1212
1213         val |= (1 << THM_THERMAL_INT_ENA__THERM_INTH_CLR__SHIFT);
1214         val |= (1 << THM_THERMAL_INT_ENA__THERM_INTL_CLR__SHIFT);
1215         val |= (1 << THM_THERMAL_INT_ENA__THERM_TRIGGER_CLR__SHIFT);
1216
1217         WREG32_SOC15(THM, 0, mmTHM_THERMAL_INT_ENA, val);
1218
1219         return 0;
1220 }
1221
1222 int smu_v11_0_start_thermal_control(struct smu_context *smu)
1223 {
1224         int ret = 0;
1225         struct smu_temperature_range range;
1226         struct amdgpu_device *adev = smu->adev;
1227
1228         memcpy(&range, &smu11_thermal_policy[0], sizeof(struct smu_temperature_range));
1229
1230         ret = smu_get_thermal_temperature_range(smu, &range);
1231         if (ret)
1232                 return ret;
1233
1234         if (smu->smu_table.thermal_controller_type) {
1235                 ret = smu_v11_0_set_thermal_range(smu, range);
1236                 if (ret)
1237                         return ret;
1238
1239                 ret = smu_v11_0_enable_thermal_alert(smu);
1240                 if (ret)
1241                         return ret;
1242
1243                 ret = smu_set_thermal_fan_table(smu);
1244                 if (ret)
1245                         return ret;
1246         }
1247
1248         adev->pm.dpm.thermal.min_temp = range.min;
1249         adev->pm.dpm.thermal.max_temp = range.max;
1250         adev->pm.dpm.thermal.max_edge_emergency_temp = range.edge_emergency_max;
1251         adev->pm.dpm.thermal.min_hotspot_temp = range.hotspot_min;
1252         adev->pm.dpm.thermal.max_hotspot_crit_temp = range.hotspot_crit_max;
1253         adev->pm.dpm.thermal.max_hotspot_emergency_temp = range.hotspot_emergency_max;
1254         adev->pm.dpm.thermal.min_mem_temp = range.mem_min;
1255         adev->pm.dpm.thermal.max_mem_crit_temp = range.mem_crit_max;
1256         adev->pm.dpm.thermal.max_mem_emergency_temp = range.mem_emergency_max;
1257
1258         return ret;
1259 }
1260
1261 int smu_v11_0_stop_thermal_control(struct smu_context *smu)
1262 {
1263         struct amdgpu_device *adev = smu->adev;
1264
1265         WREG32_SOC15(THM, 0, mmTHM_THERMAL_INT_ENA, 0);
1266
1267         return 0;
1268 }
1269
1270 static uint16_t convert_to_vddc(uint8_t vid)
1271 {
1272         return (uint16_t) ((6200 - (vid * 25)) / SMU11_VOLTAGE_SCALE);
1273 }
1274
1275 static int smu_v11_0_get_gfx_vdd(struct smu_context *smu, uint32_t *value)
1276 {
1277         struct amdgpu_device *adev = smu->adev;
1278         uint32_t vdd = 0, val_vid = 0;
1279
1280         if (!value)
1281                 return -EINVAL;
1282         val_vid = (RREG32_SOC15(SMUIO, 0, mmSMUSVI0_TEL_PLANE0) &
1283                 SMUSVI0_TEL_PLANE0__SVI0_PLANE0_VDDCOR_MASK) >>
1284                 SMUSVI0_TEL_PLANE0__SVI0_PLANE0_VDDCOR__SHIFT;
1285
1286         vdd = (uint32_t)convert_to_vddc((uint8_t)val_vid);
1287
1288         *value = vdd;
1289
1290         return 0;
1291
1292 }
1293
1294 int smu_v11_0_read_sensor(struct smu_context *smu,
1295                                  enum amd_pp_sensors sensor,
1296                                  void *data, uint32_t *size)
1297 {
1298         int ret = 0;
1299
1300         if(!data || !size)
1301                 return -EINVAL;
1302
1303         switch (sensor) {
1304         case AMDGPU_PP_SENSOR_GFX_MCLK:
1305                 ret = smu_get_current_clk_freq(smu, SMU_UCLK, (uint32_t *)data);
1306                 *size = 4;
1307                 break;
1308         case AMDGPU_PP_SENSOR_GFX_SCLK:
1309                 ret = smu_get_current_clk_freq(smu, SMU_GFXCLK, (uint32_t *)data);
1310                 *size = 4;
1311                 break;
1312         case AMDGPU_PP_SENSOR_VDDGFX:
1313                 ret = smu_v11_0_get_gfx_vdd(smu, (uint32_t *)data);
1314                 *size = 4;
1315                 break;
1316         case AMDGPU_PP_SENSOR_MIN_FAN_RPM:
1317                 *(uint32_t *)data = 0;
1318                 *size = 4;
1319                 break;
1320         default:
1321                 ret = smu_common_read_sensor(smu, sensor, data, size);
1322                 break;
1323         }
1324
1325         if (ret)
1326                 *size = 0;
1327
1328         return ret;
1329 }
1330
1331 int
1332 smu_v11_0_display_clock_voltage_request(struct smu_context *smu,
1333                                         struct pp_display_clock_request
1334                                         *clock_req)
1335 {
1336         enum amd_pp_clock_type clk_type = clock_req->clock_type;
1337         int ret = 0;
1338         enum smu_clk_type clk_select = 0;
1339         uint32_t clk_freq = clock_req->clock_freq_in_khz / 1000;
1340
1341         if (smu_feature_is_enabled(smu, SMU_FEATURE_DPM_DCEFCLK_BIT) ||
1342                 smu_feature_is_enabled(smu, SMU_FEATURE_DPM_UCLK_BIT)) {
1343                 switch (clk_type) {
1344                 case amd_pp_dcef_clock:
1345                         clk_select = SMU_DCEFCLK;
1346                         break;
1347                 case amd_pp_disp_clock:
1348                         clk_select = SMU_DISPCLK;
1349                         break;
1350                 case amd_pp_pixel_clock:
1351                         clk_select = SMU_PIXCLK;
1352                         break;
1353                 case amd_pp_phy_clock:
1354                         clk_select = SMU_PHYCLK;
1355                         break;
1356                 case amd_pp_mem_clock:
1357                         clk_select = SMU_UCLK;
1358                         break;
1359                 default:
1360                         pr_info("[%s] Invalid Clock Type!", __func__);
1361                         ret = -EINVAL;
1362                         break;
1363                 }
1364
1365                 if (ret)
1366                         goto failed;
1367
1368                 if (clk_select == SMU_UCLK && smu->disable_uclk_switch)
1369                         return 0;
1370
1371                 ret = smu_set_hard_freq_range(smu, clk_select, clk_freq, 0);
1372
1373                 if(clk_select == SMU_UCLK)
1374                         smu->hard_min_uclk_req_from_dal = clk_freq;
1375         }
1376
1377 failed:
1378         return ret;
1379 }
1380
1381 int smu_v11_0_gfx_off_control(struct smu_context *smu, bool enable)
1382 {
1383         int ret = 0;
1384         struct amdgpu_device *adev = smu->adev;
1385
1386         switch (adev->asic_type) {
1387         case CHIP_VEGA20:
1388                 break;
1389         case CHIP_NAVI10:
1390         case CHIP_NAVI14:
1391         case CHIP_NAVI12:
1392                 if (!(adev->pm.pp_feature & PP_GFXOFF_MASK))
1393                         return 0;
1394                 if (enable)
1395                         ret = smu_send_smc_msg(smu, SMU_MSG_AllowGfxOff, NULL);
1396                 else
1397                         ret = smu_send_smc_msg(smu, SMU_MSG_DisallowGfxOff, NULL);
1398                 break;
1399         default:
1400                 break;
1401         }
1402
1403         return ret;
1404 }
1405
1406 uint32_t
1407 smu_v11_0_get_fan_control_mode(struct smu_context *smu)
1408 {
1409         if (!smu_feature_is_enabled(smu, SMU_FEATURE_FAN_CONTROL_BIT))
1410                 return AMD_FAN_CTRL_MANUAL;
1411         else
1412                 return AMD_FAN_CTRL_AUTO;
1413 }
1414
1415 static int
1416 smu_v11_0_auto_fan_control(struct smu_context *smu, bool auto_fan_control)
1417 {
1418         int ret = 0;
1419
1420         if (!smu_feature_is_supported(smu, SMU_FEATURE_FAN_CONTROL_BIT))
1421                 return 0;
1422
1423         ret = smu_feature_set_enabled(smu, SMU_FEATURE_FAN_CONTROL_BIT, auto_fan_control);
1424         if (ret)
1425                 pr_err("[%s]%s smc FAN CONTROL feature failed!",
1426                        __func__, (auto_fan_control ? "Start" : "Stop"));
1427
1428         return ret;
1429 }
1430
1431 static int
1432 smu_v11_0_set_fan_static_mode(struct smu_context *smu, uint32_t mode)
1433 {
1434         struct amdgpu_device *adev = smu->adev;
1435
1436         WREG32_SOC15(THM, 0, mmCG_FDO_CTRL2,
1437                      REG_SET_FIELD(RREG32_SOC15(THM, 0, mmCG_FDO_CTRL2),
1438                                    CG_FDO_CTRL2, TMIN, 0));
1439         WREG32_SOC15(THM, 0, mmCG_FDO_CTRL2,
1440                      REG_SET_FIELD(RREG32_SOC15(THM, 0, mmCG_FDO_CTRL2),
1441                                    CG_FDO_CTRL2, FDO_PWM_MODE, mode));
1442
1443         return 0;
1444 }
1445
1446 int
1447 smu_v11_0_set_fan_speed_percent(struct smu_context *smu, uint32_t speed)
1448 {
1449         struct amdgpu_device *adev = smu->adev;
1450         uint32_t duty100, duty;
1451         uint64_t tmp64;
1452
1453         if (speed > 100)
1454                 speed = 100;
1455
1456         if (smu_v11_0_auto_fan_control(smu, 0))
1457                 return -EINVAL;
1458
1459         duty100 = REG_GET_FIELD(RREG32_SOC15(THM, 0, mmCG_FDO_CTRL1),
1460                                 CG_FDO_CTRL1, FMAX_DUTY100);
1461         if (!duty100)
1462                 return -EINVAL;
1463
1464         tmp64 = (uint64_t)speed * duty100;
1465         do_div(tmp64, 100);
1466         duty = (uint32_t)tmp64;
1467
1468         WREG32_SOC15(THM, 0, mmCG_FDO_CTRL0,
1469                      REG_SET_FIELD(RREG32_SOC15(THM, 0, mmCG_FDO_CTRL0),
1470                                    CG_FDO_CTRL0, FDO_STATIC_DUTY, duty));
1471
1472         return smu_v11_0_set_fan_static_mode(smu, FDO_PWM_MODE_STATIC);
1473 }
1474
1475 int
1476 smu_v11_0_set_fan_control_mode(struct smu_context *smu,
1477                                uint32_t mode)
1478 {
1479         int ret = 0;
1480
1481         switch (mode) {
1482         case AMD_FAN_CTRL_NONE:
1483                 ret = smu_v11_0_set_fan_speed_percent(smu, 100);
1484                 break;
1485         case AMD_FAN_CTRL_MANUAL:
1486                 ret = smu_v11_0_auto_fan_control(smu, 0);
1487                 break;
1488         case AMD_FAN_CTRL_AUTO:
1489                 ret = smu_v11_0_auto_fan_control(smu, 1);
1490                 break;
1491         default:
1492                 break;
1493         }
1494
1495         if (ret) {
1496                 pr_err("[%s]Set fan control mode failed!", __func__);
1497                 return -EINVAL;
1498         }
1499
1500         return ret;
1501 }
1502
1503 int smu_v11_0_set_fan_speed_rpm(struct smu_context *smu,
1504                                        uint32_t speed)
1505 {
1506         struct amdgpu_device *adev = smu->adev;
1507         int ret;
1508         uint32_t tach_period, crystal_clock_freq;
1509
1510         if (!speed)
1511                 return -EINVAL;
1512
1513         ret = smu_v11_0_auto_fan_control(smu, 0);
1514         if (ret)
1515                 return ret;
1516
1517         crystal_clock_freq = amdgpu_asic_get_xclk(adev);
1518         tach_period = 60 * crystal_clock_freq * 10000 / (8 * speed);
1519         WREG32_SOC15(THM, 0, mmCG_TACH_CTRL,
1520                      REG_SET_FIELD(RREG32_SOC15(THM, 0, mmCG_TACH_CTRL),
1521                                    CG_TACH_CTRL, TARGET_PERIOD,
1522                                    tach_period));
1523
1524         ret = smu_v11_0_set_fan_static_mode(smu, FDO_PWM_MODE_STATIC_RPM);
1525
1526         return ret;
1527 }
1528
1529 int smu_v11_0_set_xgmi_pstate(struct smu_context *smu,
1530                                      uint32_t pstate)
1531 {
1532         int ret = 0;
1533         ret = smu_send_smc_msg_with_param(smu,
1534                                           SMU_MSG_SetXgmiMode,
1535                                           pstate ? XGMI_MODE_PSTATE_D0 : XGMI_MODE_PSTATE_D3,
1536                                           NULL);
1537         return ret;
1538 }
1539
1540 static int smu_v11_0_ack_ac_dc_interrupt(struct smu_context *smu)
1541 {
1542         return smu_send_smc_msg(smu,
1543                                 SMU_MSG_ReenableAcDcInterrupt,
1544                                 NULL);
1545 }
1546
1547 #define THM_11_0__SRCID__THM_DIG_THERM_L2H              0               /* ASIC_TEMP > CG_THERMAL_INT.DIG_THERM_INTH  */
1548 #define THM_11_0__SRCID__THM_DIG_THERM_H2L              1               /* ASIC_TEMP < CG_THERMAL_INT.DIG_THERM_INTL  */
1549
1550 static int smu_v11_0_irq_process(struct amdgpu_device *adev,
1551                                  struct amdgpu_irq_src *source,
1552                                  struct amdgpu_iv_entry *entry)
1553 {
1554         uint32_t client_id = entry->client_id;
1555         uint32_t src_id = entry->src_id;
1556
1557         if (client_id == SOC15_IH_CLIENTID_THM) {
1558                 switch (src_id) {
1559                 case THM_11_0__SRCID__THM_DIG_THERM_L2H:
1560                         pr_warn("GPU over temperature range detected on PCIe %d:%d.%d!\n",
1561                                 PCI_BUS_NUM(adev->pdev->devfn),
1562                                 PCI_SLOT(adev->pdev->devfn),
1563                                 PCI_FUNC(adev->pdev->devfn));
1564                 break;
1565                 case THM_11_0__SRCID__THM_DIG_THERM_H2L:
1566                         pr_warn("GPU under temperature range detected on PCIe %d:%d.%d!\n",
1567                                 PCI_BUS_NUM(adev->pdev->devfn),
1568                                 PCI_SLOT(adev->pdev->devfn),
1569                                 PCI_FUNC(adev->pdev->devfn));
1570                 break;
1571                 default:
1572                         pr_warn("GPU under temperature range unknown src id (%d), detected on PCIe %d:%d.%d!\n",
1573                                 src_id,
1574                                 PCI_BUS_NUM(adev->pdev->devfn),
1575                                 PCI_SLOT(adev->pdev->devfn),
1576                                 PCI_FUNC(adev->pdev->devfn));
1577                 break;
1578
1579                 }
1580         } else if (client_id == SOC15_IH_CLIENTID_MP1) {
1581                 if (src_id == 0xfe)
1582                         smu_v11_0_ack_ac_dc_interrupt(&adev->smu);
1583         }
1584
1585         return 0;
1586 }
1587
1588 static const struct amdgpu_irq_src_funcs smu_v11_0_irq_funcs =
1589 {
1590         .process = smu_v11_0_irq_process,
1591 };
1592
1593 int smu_v11_0_register_irq_handler(struct smu_context *smu)
1594 {
1595         struct amdgpu_device *adev = smu->adev;
1596         struct amdgpu_irq_src *irq_src = smu->irq_source;
1597         int ret = 0;
1598
1599         /* already register */
1600         if (irq_src)
1601                 return 0;
1602
1603         irq_src = kzalloc(sizeof(struct amdgpu_irq_src), GFP_KERNEL);
1604         if (!irq_src)
1605                 return -ENOMEM;
1606         smu->irq_source = irq_src;
1607
1608         irq_src->funcs = &smu_v11_0_irq_funcs;
1609
1610         ret = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_THM,
1611                                 THM_11_0__SRCID__THM_DIG_THERM_L2H,
1612                                 irq_src);
1613         if (ret)
1614                 return ret;
1615
1616         ret = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_THM,
1617                                 THM_11_0__SRCID__THM_DIG_THERM_H2L,
1618                                 irq_src);
1619         if (ret)
1620                 return ret;
1621
1622         ret = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_MP1,
1623                                 0xfe,
1624                                 irq_src);
1625         if (ret)
1626                 return ret;
1627
1628         return ret;
1629 }
1630
1631 int smu_v11_0_get_max_sustainable_clocks_by_dc(struct smu_context *smu,
1632                 struct pp_smu_nv_clock_table *max_clocks)
1633 {
1634         struct smu_table_context *table_context = &smu->smu_table;
1635         struct smu_11_0_max_sustainable_clocks *sustainable_clocks = NULL;
1636
1637         if (!max_clocks || !table_context->max_sustainable_clocks)
1638                 return -EINVAL;
1639
1640         sustainable_clocks = table_context->max_sustainable_clocks;
1641
1642         max_clocks->dcfClockInKhz =
1643                         (unsigned int) sustainable_clocks->dcef_clock * 1000;
1644         max_clocks->displayClockInKhz =
1645                         (unsigned int) sustainable_clocks->display_clock * 1000;
1646         max_clocks->phyClockInKhz =
1647                         (unsigned int) sustainable_clocks->phy_clock * 1000;
1648         max_clocks->pixelClockInKhz =
1649                         (unsigned int) sustainable_clocks->pixel_clock * 1000;
1650         max_clocks->uClockInKhz =
1651                         (unsigned int) sustainable_clocks->uclock * 1000;
1652         max_clocks->socClockInKhz =
1653                         (unsigned int) sustainable_clocks->soc_clock * 1000;
1654         max_clocks->dscClockInKhz = 0;
1655         max_clocks->dppClockInKhz = 0;
1656         max_clocks->fabricClockInKhz = 0;
1657
1658         return 0;
1659 }
1660
1661 int smu_v11_0_set_azalia_d3_pme(struct smu_context *smu)
1662 {
1663         int ret = 0;
1664
1665         ret = smu_send_smc_msg(smu, SMU_MSG_BacoAudioD3PME, NULL);
1666
1667         return ret;
1668 }
1669
1670 static int smu_v11_0_baco_set_armd3_sequence(struct smu_context *smu, enum smu_v11_0_baco_seq baco_seq)
1671 {
1672         return smu_send_smc_msg_with_param(smu, SMU_MSG_ArmD3, baco_seq, NULL);
1673 }
1674
1675 bool smu_v11_0_baco_is_support(struct smu_context *smu)
1676 {
1677         struct smu_baco_context *smu_baco = &smu->smu_baco;
1678         bool baco_support;
1679
1680         mutex_lock(&smu_baco->mutex);
1681         baco_support = smu_baco->platform_support;
1682         mutex_unlock(&smu_baco->mutex);
1683
1684         if (!baco_support)
1685                 return false;
1686
1687         /* Arcturus does not support this bit mask */
1688         if (smu_feature_is_supported(smu, SMU_FEATURE_BACO_BIT) &&
1689            !smu_feature_is_enabled(smu, SMU_FEATURE_BACO_BIT))
1690                 return false;
1691
1692         return true;
1693 }
1694
1695 enum smu_baco_state smu_v11_0_baco_get_state(struct smu_context *smu)
1696 {
1697         struct smu_baco_context *smu_baco = &smu->smu_baco;
1698         enum smu_baco_state baco_state;
1699
1700         mutex_lock(&smu_baco->mutex);
1701         baco_state = smu_baco->state;
1702         mutex_unlock(&smu_baco->mutex);
1703
1704         return baco_state;
1705 }
1706
1707 int smu_v11_0_baco_set_state(struct smu_context *smu, enum smu_baco_state state)
1708 {
1709         struct smu_baco_context *smu_baco = &smu->smu_baco;
1710         struct amdgpu_device *adev = smu->adev;
1711         struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
1712         uint32_t data;
1713         int ret = 0;
1714
1715         if (smu_v11_0_baco_get_state(smu) == state)
1716                 return 0;
1717
1718         mutex_lock(&smu_baco->mutex);
1719
1720         if (state == SMU_BACO_STATE_ENTER) {
1721                 if (!ras || !ras->supported) {
1722                         data = RREG32_SOC15(THM, 0, mmTHM_BACO_CNTL);
1723                         data |= 0x80000000;
1724                         WREG32_SOC15(THM, 0, mmTHM_BACO_CNTL, data);
1725
1726                         ret = smu_send_smc_msg_with_param(smu, SMU_MSG_EnterBaco, 0, NULL);
1727                 } else {
1728                         ret = smu_send_smc_msg_with_param(smu, SMU_MSG_EnterBaco, 1, NULL);
1729                 }
1730         } else {
1731                 ret = smu_send_smc_msg(smu, SMU_MSG_ExitBaco, NULL);
1732                 if (ret)
1733                         goto out;
1734
1735                 if (ras && ras->supported) {
1736                         ret = smu_send_smc_msg(smu, SMU_MSG_PrepareMp1ForUnload, NULL);
1737                         if (ret)
1738                                 goto out;
1739                 }
1740
1741                 /* clear vbios scratch 6 and 7 for coming asic reinit */
1742                 WREG32(adev->bios_scratch_reg_offset + 6, 0);
1743                 WREG32(adev->bios_scratch_reg_offset + 7, 0);
1744         }
1745         if (ret)
1746                 goto out;
1747
1748         smu_baco->state = state;
1749 out:
1750         mutex_unlock(&smu_baco->mutex);
1751         return ret;
1752 }
1753
1754 int smu_v11_0_baco_enter(struct smu_context *smu)
1755 {
1756         struct amdgpu_device *adev = smu->adev;
1757         int ret = 0;
1758
1759         /* Arcturus does not need this audio workaround */
1760         if (adev->asic_type != CHIP_ARCTURUS) {
1761                 ret = smu_v11_0_baco_set_armd3_sequence(smu, BACO_SEQ_BACO);
1762                 if (ret)
1763                         return ret;
1764         }
1765
1766         ret = smu_v11_0_baco_set_state(smu, SMU_BACO_STATE_ENTER);
1767         if (ret)
1768                 return ret;
1769
1770         msleep(10);
1771
1772         return ret;
1773 }
1774
1775 int smu_v11_0_baco_exit(struct smu_context *smu)
1776 {
1777         int ret = 0;
1778
1779         ret = smu_v11_0_baco_set_state(smu, SMU_BACO_STATE_EXIT);
1780         if (ret)
1781                 return ret;
1782
1783         return ret;
1784 }
1785
1786 int smu_v11_0_get_dpm_ultimate_freq(struct smu_context *smu, enum smu_clk_type clk_type,
1787                                                  uint32_t *min, uint32_t *max)
1788 {
1789         int ret = 0, clk_id = 0;
1790         uint32_t param = 0;
1791
1792         clk_id = smu_clk_get_index(smu, clk_type);
1793         if (clk_id < 0) {
1794                 ret = -EINVAL;
1795                 goto failed;
1796         }
1797         param = (clk_id & 0xffff) << 16;
1798
1799         if (max) {
1800                 ret = smu_send_smc_msg_with_param(smu, SMU_MSG_GetMaxDpmFreq, param, max);
1801                 if (ret)
1802                         goto failed;
1803         }
1804
1805         if (min) {
1806                 ret = smu_send_smc_msg_with_param(smu, SMU_MSG_GetMinDpmFreq, param, min);
1807                 if (ret)
1808                         goto failed;
1809         }
1810
1811 failed:
1812         return ret;
1813 }
1814
1815 int smu_v11_0_set_soft_freq_limited_range(struct smu_context *smu, enum smu_clk_type clk_type,
1816                             uint32_t min, uint32_t max)
1817 {
1818         int ret = 0, clk_id = 0;
1819         uint32_t param;
1820
1821         clk_id = smu_clk_get_index(smu, clk_type);
1822         if (clk_id < 0)
1823                 return clk_id;
1824
1825         if (max > 0) {
1826                 param = (uint32_t)((clk_id << 16) | (max & 0xffff));
1827                 ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxByFreq,
1828                                                   param, NULL);
1829                 if (ret)
1830                         return ret;
1831         }
1832
1833         if (min > 0) {
1834                 param = (uint32_t)((clk_id << 16) | (min & 0xffff));
1835                 ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMinByFreq,
1836                                                   param, NULL);
1837                 if (ret)
1838                         return ret;
1839         }
1840
1841         return ret;
1842 }
1843
1844 int smu_v11_0_override_pcie_parameters(struct smu_context *smu)
1845 {
1846         struct amdgpu_device *adev = smu->adev;
1847         uint32_t pcie_gen = 0, pcie_width = 0;
1848         int ret;
1849
1850         if (amdgpu_sriov_vf(smu->adev))
1851                 return 0;
1852
1853         if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN4)
1854                 pcie_gen = 3;
1855         else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3)
1856                 pcie_gen = 2;
1857         else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2)
1858                 pcie_gen = 1;
1859         else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1)
1860                 pcie_gen = 0;
1861
1862         /* Bit 31:16: LCLK DPM level. 0 is DPM0, and 1 is DPM1
1863          * Bit 15:8:  PCIE GEN, 0 to 3 corresponds to GEN1 to GEN4
1864          * Bit 7:0:   PCIE lane width, 1 to 7 corresponds is x1 to x32
1865          */
1866         if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X16)
1867                 pcie_width = 6;
1868         else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X12)
1869                 pcie_width = 5;
1870         else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X8)
1871                 pcie_width = 4;
1872         else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X4)
1873                 pcie_width = 3;
1874         else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X2)
1875                 pcie_width = 2;
1876         else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X1)
1877                 pcie_width = 1;
1878
1879         ret = smu_update_pcie_parameters(smu, pcie_gen, pcie_width);
1880
1881         if (ret)
1882                 pr_err("[%s] Attempt to override pcie params failed!\n", __func__);
1883
1884         return ret;
1885
1886 }
1887
1888 int smu_v11_0_set_default_od_settings(struct smu_context *smu, bool initialize, size_t overdrive_table_size)
1889 {
1890         struct smu_table_context *table_context = &smu->smu_table;
1891         int ret = 0;
1892
1893         if (initialize) {
1894                 if (table_context->overdrive_table) {
1895                         return -EINVAL;
1896                 }
1897                 table_context->overdrive_table = kzalloc(overdrive_table_size, GFP_KERNEL);
1898                 if (!table_context->overdrive_table) {
1899                         return -ENOMEM;
1900                 }
1901                 ret = smu_update_table(smu, SMU_TABLE_OVERDRIVE, 0, table_context->overdrive_table, false);
1902                 if (ret) {
1903                         pr_err("Failed to export overdrive table!\n");
1904                         return ret;
1905                 }
1906                 if (!table_context->boot_overdrive_table) {
1907                         table_context->boot_overdrive_table = kmemdup(table_context->overdrive_table, overdrive_table_size, GFP_KERNEL);
1908                         if (!table_context->boot_overdrive_table) {
1909                                 return -ENOMEM;
1910                         }
1911                 }
1912         }
1913         ret = smu_update_table(smu, SMU_TABLE_OVERDRIVE, 0, table_context->overdrive_table, true);
1914         if (ret) {
1915                 pr_err("Failed to import overdrive table!\n");
1916                 return ret;
1917         }
1918         return ret;
1919 }
1920
1921 int smu_v11_0_set_performance_level(struct smu_context *smu,
1922                                     enum amd_dpm_forced_level level)
1923 {
1924         int ret = 0;
1925         uint32_t sclk_mask, mclk_mask, soc_mask;
1926
1927         switch (level) {
1928         case AMD_DPM_FORCED_LEVEL_HIGH:
1929                 ret = smu_force_dpm_limit_value(smu, true);
1930                 break;
1931         case AMD_DPM_FORCED_LEVEL_LOW:
1932                 ret = smu_force_dpm_limit_value(smu, false);
1933                 break;
1934         case AMD_DPM_FORCED_LEVEL_AUTO:
1935         case AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD:
1936                 ret = smu_unforce_dpm_levels(smu);
1937                 break;
1938         case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK:
1939         case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK:
1940         case AMD_DPM_FORCED_LEVEL_PROFILE_PEAK:
1941                 ret = smu_get_profiling_clk_mask(smu, level,
1942                                                  &sclk_mask,
1943                                                  &mclk_mask,
1944                                                  &soc_mask);
1945                 if (ret)
1946                         return ret;
1947                 smu_force_clk_levels(smu, SMU_SCLK, 1 << sclk_mask, false);
1948                 smu_force_clk_levels(smu, SMU_MCLK, 1 << mclk_mask, false);
1949                 smu_force_clk_levels(smu, SMU_SOCCLK, 1 << soc_mask, false);
1950                 break;
1951         case AMD_DPM_FORCED_LEVEL_MANUAL:
1952         case AMD_DPM_FORCED_LEVEL_PROFILE_EXIT:
1953         default:
1954                 break;
1955         }
1956         return ret;
1957 }
1958
1959 int smu_v11_0_set_power_source(struct smu_context *smu,
1960                                enum smu_power_src_type power_src)
1961 {
1962         int pwr_source;
1963
1964         pwr_source = smu_power_get_index(smu, (uint32_t)power_src);
1965         if (pwr_source < 0)
1966                 return -EINVAL;
1967
1968         return smu_send_smc_msg_with_param(smu,
1969                                         SMU_MSG_NotifyPowerSource,
1970                                         pwr_source,
1971                                         NULL);
1972 }
1973