Merge tag 'printk-for-5.19-fixup' of git://git.kernel.org/pub/scm/linux/kernel/git...
[linux-block.git] / drivers / gpu / drm / amd / amdgpu / amdgpu_atomfirmware.c
CommitLineData
a5bde2f9
AD
1/*
2 * Copyright 2016 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
fdf2f6c5 23
a5bde2f9
AD
24#include <drm/amdgpu_drm.h>
25#include "amdgpu.h"
26#include "atomfirmware.h"
27#include "amdgpu_atomfirmware.h"
28#include "atom.h"
692bb1ac 29#include "atombios.h"
efe4f000 30#include "soc15_hw_ip.h"
a5bde2f9 31
5968c6a2
HZ
32union firmware_info {
33 struct atom_firmware_info_v3_1 v31;
34 struct atom_firmware_info_v3_2 v32;
35 struct atom_firmware_info_v3_3 v33;
36 struct atom_firmware_info_v3_4 v34;
37};
38
39/*
40 * Helper function to query firmware capability
41 *
42 * @adev: amdgpu_device pointer
43 *
44 * Return firmware_capability in firmwareinfo table on success or 0 if not
45 */
46uint32_t amdgpu_atomfirmware_query_firmware_capability(struct amdgpu_device *adev)
47{
48 struct amdgpu_mode_info *mode_info = &adev->mode_info;
49 int index;
50 u16 data_offset, size;
51 union firmware_info *firmware_info;
52 u8 frev, crev;
53 u32 fw_cap = 0;
54
55 index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
56 firmwareinfo);
57
58 if (amdgpu_atom_parse_data_header(adev->mode_info.atom_context,
59 index, &size, &frev, &crev, &data_offset)) {
60 /* support firmware_info 3.1 + */
61 if ((frev == 3 && crev >=1) || (frev > 3)) {
62 firmware_info = (union firmware_info *)
63 (mode_info->atom_context->bios + data_offset);
64 fw_cap = le32_to_cpu(firmware_info->v31.firmware_capability);
65 }
66 }
67
68 return fw_cap;
69}
70
58ff791a
HZ
71/*
72 * Helper function to query gpu virtualizaiton capability
73 *
74 * @adev: amdgpu_device pointer
75 *
76 * Return true if gpu virtualization is supported or false if not
77 */
78bool amdgpu_atomfirmware_gpu_virtualization_supported(struct amdgpu_device *adev)
a5bde2f9 79{
58ff791a 80 u32 fw_cap;
a5bde2f9 81
58ff791a 82 fw_cap = adev->mode_info.firmware_flags;
a5bde2f9 83
58ff791a 84 return (fw_cap & ATOM_FIRMWARE_CAP_GPU_VIRTUALIZATION) ? true : false;
a5bde2f9
AD
85}
86
87void amdgpu_atomfirmware_scratch_regs_init(struct amdgpu_device *adev)
88{
89 int index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
90 firmwareinfo);
91 uint16_t data_offset;
92
93 if (amdgpu_atom_parse_data_header(adev->mode_info.atom_context, index, NULL,
94 NULL, NULL, &data_offset)) {
95 struct atom_firmware_info_v3_1 *firmware_info =
96 (struct atom_firmware_info_v3_1 *)(adev->mode_info.atom_context->bios +
97 data_offset);
98
99 adev->bios_scratch_reg_offset =
100 le32_to_cpu(firmware_info->bios_scratch_reg_startaddr);
101 }
102}
103
a5bde2f9
AD
104int amdgpu_atomfirmware_allocate_fb_scratch(struct amdgpu_device *adev)
105{
106 struct atom_context *ctx = adev->mode_info.atom_context;
107 int index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
108 vram_usagebyfirmware);
c4c5ae67 109 struct vram_usagebyfirmware_v2_1 *firmware_usage;
24738d7c 110 uint32_t start_addr, size;
a5bde2f9
AD
111 uint16_t data_offset;
112 int usage_bytes = 0;
113
114 if (amdgpu_atom_parse_data_header(ctx, index, NULL, NULL, NULL, &data_offset)) {
24738d7c 115 firmware_usage = (struct vram_usagebyfirmware_v2_1 *)(ctx->bios + data_offset);
a5bde2f9
AD
116 DRM_DEBUG("atom firmware requested %08x %dkb fw %dkb drv\n",
117 le32_to_cpu(firmware_usage->start_address_in_kb),
118 le16_to_cpu(firmware_usage->used_by_firmware_in_kb),
119 le16_to_cpu(firmware_usage->used_by_driver_in_kb));
120
24738d7c
ML
121 start_addr = le32_to_cpu(firmware_usage->start_address_in_kb);
122 size = le16_to_cpu(firmware_usage->used_by_firmware_in_kb);
123
124 if ((uint32_t)(start_addr & ATOM_VRAM_OPERATION_FLAGS_MASK) ==
125 (uint32_t)(ATOM_VRAM_BLOCK_SRIOV_MSG_SHARE_RESERVATION <<
126 ATOM_VRAM_OPERATION_FLAGS_SHIFT)) {
127 /* Firmware request VRAM reservation for SR-IOV */
87ded5ca 128 adev->mman.fw_vram_usage_start_offset = (start_addr &
24738d7c 129 (~ATOM_VRAM_OPERATION_FLAGS_MASK)) << 10;
87ded5ca 130 adev->mman.fw_vram_usage_size = size << 10;
24738d7c
ML
131 /* Use the default scratch size */
132 usage_bytes = 0;
133 } else {
134 usage_bytes = le16_to_cpu(firmware_usage->used_by_driver_in_kb) << 10;
135 }
a5bde2f9
AD
136 }
137 ctx->scratch_size_bytes = 0;
138 if (usage_bytes == 0)
139 usage_bytes = 20 * 1024;
140 /* allocate some scratch memory */
141 ctx->scratch = kzalloc(usage_bytes, GFP_KERNEL);
142 if (!ctx->scratch)
143 return -ENOMEM;
144 ctx->scratch_size_bytes = usage_bytes;
145 return 0;
146}
21f6bcb6
AD
147
148union igp_info {
149 struct atom_integrated_system_info_v1_11 v11;
836dab85 150 struct atom_integrated_system_info_v1_12 v12;
78683229 151 struct atom_integrated_system_info_v2_1 v21;
21f6bcb6
AD
152};
153
1e09b053
HZ
154union umc_info {
155 struct atom_umc_info_v3_1 v31;
b69d5c7e
HZ
156 struct atom_umc_info_v3_2 v32;
157 struct atom_umc_info_v3_3 v33;
1e09b053 158};
27e39d3d
HZ
159
160union vram_info {
161 struct atom_vram_info_header_v2_3 v23;
89d7a79c 162 struct atom_vram_info_header_v2_4 v24;
8b41903a 163 struct atom_vram_info_header_v2_5 v25;
147d082d 164 struct atom_vram_info_header_v2_6 v26;
7089dd3c 165 struct atom_vram_info_header_v3_0 v30;
27e39d3d 166};
21f6bcb6 167
bd552027
AD
168union vram_module {
169 struct atom_vram_module_v9 v9;
170 struct atom_vram_module_v10 v10;
8b41903a 171 struct atom_vram_module_v11 v11;
7089dd3c 172 struct atom_vram_module_v3_0 v30;
bd552027 173};
79077ee1 174
bd552027
AD
175static int convert_atom_mem_type_to_vram_type(struct amdgpu_device *adev,
176 int atom_mem_type)
1e09b053
HZ
177{
178 int vram_type;
179
180 if (adev->flags & AMD_IS_APU) {
181 switch (atom_mem_type) {
182 case Ddr2MemType:
183 case LpDdr2MemType:
184 vram_type = AMDGPU_VRAM_TYPE_DDR2;
185 break;
186 case Ddr3MemType:
187 case LpDdr3MemType:
188 vram_type = AMDGPU_VRAM_TYPE_DDR3;
189 break;
190 case Ddr4MemType:
191 case LpDdr4MemType:
192 vram_type = AMDGPU_VRAM_TYPE_DDR4;
193 break;
15c90a1f
HR
194 case Ddr5MemType:
195 case LpDdr5MemType:
196 vram_type = AMDGPU_VRAM_TYPE_DDR5;
197 break;
1e09b053
HZ
198 default:
199 vram_type = AMDGPU_VRAM_TYPE_UNKNOWN;
200 break;
201 }
202 } else {
203 switch (atom_mem_type) {
204 case ATOM_DGPU_VRAM_TYPE_GDDR5:
205 vram_type = AMDGPU_VRAM_TYPE_GDDR5;
206 break;
801281fe 207 case ATOM_DGPU_VRAM_TYPE_HBM2:
8081f8fa 208 case ATOM_DGPU_VRAM_TYPE_HBM2E:
1e09b053
HZ
209 vram_type = AMDGPU_VRAM_TYPE_HBM;
210 break;
89d7a79c
HZ
211 case ATOM_DGPU_VRAM_TYPE_GDDR6:
212 vram_type = AMDGPU_VRAM_TYPE_GDDR6;
213 break;
1e09b053
HZ
214 default:
215 vram_type = AMDGPU_VRAM_TYPE_UNKNOWN;
216 break;
217 }
218 }
219
220 return vram_type;
221}
bd552027 222
ad02e08e
OM
223
224int
225amdgpu_atomfirmware_get_vram_info(struct amdgpu_device *adev,
226 int *vram_width, int *vram_type,
227 int *vram_vendor)
1e09b053
HZ
228{
229 struct amdgpu_mode_info *mode_info = &adev->mode_info;
bd552027 230 int index, i = 0;
1e09b053
HZ
231 u16 data_offset, size;
232 union igp_info *igp_info;
27e39d3d 233 union vram_info *vram_info;
bd552027 234 union vram_module *vram_module;
1e09b053
HZ
235 u8 frev, crev;
236 u8 mem_type;
ad02e08e 237 u8 mem_vendor;
bd552027
AD
238 u32 mem_channel_number;
239 u32 mem_channel_width;
240 u32 module_id;
241
1e09b053
HZ
242 if (adev->flags & AMD_IS_APU)
243 index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
244 integratedsysteminfo);
245 else
246 index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
27e39d3d 247 vram_info);
bd552027 248
1e09b053
HZ
249 if (amdgpu_atom_parse_data_header(mode_info->atom_context,
250 index, &size,
251 &frev, &crev, &data_offset)) {
252 if (adev->flags & AMD_IS_APU) {
253 igp_info = (union igp_info *)
254 (mode_info->atom_context->bios + data_offset);
78683229
HR
255 switch (frev) {
256 case 1:
257 switch (crev) {
258 case 11:
259 case 12:
260 mem_channel_number = igp_info->v11.umachannelnumber;
261 if (!mem_channel_number)
262 mem_channel_number = 1;
263 /* channel width is 64 */
264 if (vram_width)
265 *vram_width = mem_channel_number * 64;
266 mem_type = igp_info->v11.memorytype;
267 if (vram_type)
268 *vram_type = convert_atom_mem_type_to_vram_type(adev, mem_type);
269 break;
270 default:
271 return -EINVAL;
272 }
bd552027 273 break;
78683229
HR
274 case 2:
275 switch (crev) {
276 case 1:
277 case 2:
278 mem_channel_number = igp_info->v21.umachannelnumber;
279 if (!mem_channel_number)
280 mem_channel_number = 1;
281 /* channel width is 64 */
282 if (vram_width)
283 *vram_width = mem_channel_number * 64;
284 mem_type = igp_info->v21.memorytype;
285 if (vram_type)
286 *vram_type = convert_atom_mem_type_to_vram_type(adev, mem_type);
287 break;
288 default:
289 return -EINVAL;
290 }
836dab85 291 break;
1e09b053 292 default:
bd552027 293 return -EINVAL;
1e09b053
HZ
294 }
295 } else {
27e39d3d 296 vram_info = (union vram_info *)
1e09b053 297 (mode_info->atom_context->bios + data_offset);
bd552027 298 module_id = (RREG32(adev->bios_scratch_reg_offset + 4) & 0x00ff0000) >> 16;
7089dd3c
HZ
299 if (frev == 3) {
300 switch (crev) {
301 /* v30 */
302 case 0:
303 vram_module = (union vram_module *)vram_info->v30.vram_module;
304 mem_vendor = (vram_module->v30.dram_vendor_id) & 0xF;
305 if (vram_vendor)
306 *vram_vendor = mem_vendor;
307 mem_type = vram_info->v30.memory_type;
308 if (vram_type)
309 *vram_type = convert_atom_mem_type_to_vram_type(adev, mem_type);
310 mem_channel_number = vram_info->v30.channel_num;
311 mem_channel_width = vram_info->v30.channel_width;
312 if (vram_width)
313 *vram_width = mem_channel_number * mem_channel_width;
314 break;
315 default:
316 return -EINVAL;
8b41903a 317 }
7089dd3c
HZ
318 } else if (frev == 2) {
319 switch (crev) {
320 /* v23 */
321 case 3:
322 if (module_id > vram_info->v23.vram_module_num)
323 module_id = 0;
324 vram_module = (union vram_module *)vram_info->v23.vram_module;
325 while (i < module_id) {
326 vram_module = (union vram_module *)
327 ((u8 *)vram_module + vram_module->v9.vram_module_size);
328 i++;
329 }
330 mem_type = vram_module->v9.memory_type;
331 if (vram_type)
332 *vram_type = convert_atom_mem_type_to_vram_type(adev, mem_type);
333 mem_channel_number = vram_module->v9.channel_num;
334 mem_channel_width = vram_module->v9.channel_width;
335 if (vram_width)
336 *vram_width = mem_channel_number * (1 << mem_channel_width);
337 mem_vendor = (vram_module->v9.vender_rev_id) & 0xF;
338 if (vram_vendor)
339 *vram_vendor = mem_vendor;
340 break;
341 /* v24 */
342 case 4:
343 if (module_id > vram_info->v24.vram_module_num)
344 module_id = 0;
345 vram_module = (union vram_module *)vram_info->v24.vram_module;
346 while (i < module_id) {
347 vram_module = (union vram_module *)
348 ((u8 *)vram_module + vram_module->v10.vram_module_size);
349 i++;
350 }
351 mem_type = vram_module->v10.memory_type;
352 if (vram_type)
353 *vram_type = convert_atom_mem_type_to_vram_type(adev, mem_type);
354 mem_channel_number = vram_module->v10.channel_num;
355 mem_channel_width = vram_module->v10.channel_width;
356 if (vram_width)
357 *vram_width = mem_channel_number * (1 << mem_channel_width);
358 mem_vendor = (vram_module->v10.vender_rev_id) & 0xF;
359 if (vram_vendor)
360 *vram_vendor = mem_vendor;
361 break;
362 /* v25 */
363 case 5:
364 if (module_id > vram_info->v25.vram_module_num)
365 module_id = 0;
366 vram_module = (union vram_module *)vram_info->v25.vram_module;
367 while (i < module_id) {
368 vram_module = (union vram_module *)
369 ((u8 *)vram_module + vram_module->v11.vram_module_size);
370 i++;
371 }
372 mem_type = vram_module->v11.memory_type;
373 if (vram_type)
374 *vram_type = convert_atom_mem_type_to_vram_type(adev, mem_type);
375 mem_channel_number = vram_module->v11.channel_num;
376 mem_channel_width = vram_module->v11.channel_width;
377 if (vram_width)
378 *vram_width = mem_channel_number * (1 << mem_channel_width);
379 mem_vendor = (vram_module->v11.vender_rev_id) & 0xF;
380 if (vram_vendor)
381 *vram_vendor = mem_vendor;
382 break;
383 /* v26 */
384 case 6:
385 if (module_id > vram_info->v26.vram_module_num)
386 module_id = 0;
387 vram_module = (union vram_module *)vram_info->v26.vram_module;
388 while (i < module_id) {
389 vram_module = (union vram_module *)
390 ((u8 *)vram_module + vram_module->v9.vram_module_size);
391 i++;
392 }
393 mem_type = vram_module->v9.memory_type;
394 if (vram_type)
395 *vram_type = convert_atom_mem_type_to_vram_type(adev, mem_type);
396 mem_channel_number = vram_module->v9.channel_num;
397 mem_channel_width = vram_module->v9.channel_width;
398 if (vram_width)
399 *vram_width = mem_channel_number * (1 << mem_channel_width);
400 mem_vendor = (vram_module->v9.vender_rev_id) & 0xF;
401 if (vram_vendor)
402 *vram_vendor = mem_vendor;
403 break;
404 default:
405 return -EINVAL;
f31c4a11 406 }
7089dd3c
HZ
407 } else {
408 /* invalid frev */
bd552027 409 return -EINVAL;
1e09b053
HZ
410 }
411 }
bd552027 412
1e09b053
HZ
413 }
414
415 return 0;
416}
417
511c4348
HZ
418/*
419 * Return true if vbios enabled ecc by default, if umc info table is available
420 * or false if ecc is not enabled or umc info table is not available
421 */
422bool amdgpu_atomfirmware_mem_ecc_supported(struct amdgpu_device *adev)
423{
424 struct amdgpu_mode_info *mode_info = &adev->mode_info;
425 int index;
426 u16 data_offset, size;
427 union umc_info *umc_info;
428 u8 frev, crev;
429 bool ecc_default_enabled = false;
97e27292
HZ
430 u8 umc_config;
431 u32 umc_config1;
511c4348
HZ
432
433 index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
434 umc_info);
435
436 if (amdgpu_atom_parse_data_header(mode_info->atom_context,
437 index, &size, &frev, &crev, &data_offset)) {
b69d5c7e 438 if (frev == 3) {
511c4348
HZ
439 umc_info = (union umc_info *)
440 (mode_info->atom_context->bios + data_offset);
b69d5c7e
HZ
441 switch (crev) {
442 case 1:
97e27292 443 umc_config = le32_to_cpu(umc_info->v31.umc_config);
b69d5c7e 444 ecc_default_enabled =
97e27292 445 (umc_config & UMC_CONFIG__DEFAULT_MEM_ECC_ENABLE) ? true : false;
b69d5c7e
HZ
446 break;
447 case 2:
97e27292 448 umc_config = le32_to_cpu(umc_info->v32.umc_config);
b69d5c7e 449 ecc_default_enabled =
97e27292 450 (umc_config & UMC_CONFIG__DEFAULT_MEM_ECC_ENABLE) ? true : false;
b69d5c7e
HZ
451 break;
452 case 3:
97e27292
HZ
453 umc_config = le32_to_cpu(umc_info->v33.umc_config);
454 umc_config1 = le32_to_cpu(umc_info->v33.umc_config1);
b69d5c7e 455 ecc_default_enabled =
97e27292
HZ
456 ((umc_config & UMC_CONFIG__DEFAULT_MEM_ECC_ENABLE) ||
457 (umc_config1 & UMC_CONFIG1__ENABLE_ECC_CAPABLE)) ? true : false;
b69d5c7e
HZ
458 break;
459 default:
460 /* unsupported crev */
461 return false;
462 }
511c4348
HZ
463 }
464 }
465
466 return ecc_default_enabled;
467}
468
8b6da23f 469/*
698b1010
HZ
470 * Helper function to query sram ecc capablity
471 *
472 * @adev: amdgpu_device pointer
473 *
8b6da23f
HZ
474 * Return true if vbios supports sram ecc or false if not
475 */
476bool amdgpu_atomfirmware_sram_ecc_supported(struct amdgpu_device *adev)
477{
698b1010 478 u32 fw_cap;
8b6da23f 479
698b1010 480 fw_cap = adev->mode_info.firmware_flags;
8b6da23f 481
698b1010 482 return (fw_cap & ATOM_FIRMWARE_CAP_SRAM_ECC) ? true : false;
8b6da23f
HZ
483}
484
cffd6f9d
HZ
485/*
486 * Helper function to query dynamic boot config capability
487 *
488 * @adev: amdgpu_device pointer
489 *
490 * Return true if vbios supports dynamic boot config or false if not
491 */
492bool amdgpu_atomfirmware_dynamic_boot_config_supported(struct amdgpu_device *adev)
493{
494 u32 fw_cap;
495
496 fw_cap = adev->mode_info.firmware_flags;
497
498 return (fw_cap & ATOM_FIRMWARE_CAP_DYNAMIC_BOOT_CFG_ENABLE) ? true : false;
499}
500
a6a355a2
LT
501/**
502 * amdgpu_atomfirmware_ras_rom_addr -- Get the RAS EEPROM addr from VBIOS
bbe04dec
IB
503 * @adev: amdgpu_device pointer
504 * @i2c_address: pointer to u8; if not NULL, will contain
a6a355a2 505 * the RAS EEPROM address if the function returns true
14fb496a 506 *
a6a355a2
LT
507 * Return true if VBIOS supports RAS EEPROM address reporting,
508 * else return false. If true and @i2c_address is not NULL,
509 * will contain the RAS ROM address.
14fb496a 510 */
a6a355a2
LT
511bool amdgpu_atomfirmware_ras_rom_addr(struct amdgpu_device *adev,
512 u8 *i2c_address)
14fb496a
JC
513{
514 struct amdgpu_mode_info *mode_info = &adev->mode_info;
515 int index;
516 u16 data_offset, size;
517 union firmware_info *firmware_info;
518 u8 frev, crev;
519
14fb496a 520 index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
a6a355a2 521 firmwareinfo);
14fb496a
JC
522
523 if (amdgpu_atom_parse_data_header(adev->mode_info.atom_context,
a6a355a2
LT
524 index, &size, &frev, &crev,
525 &data_offset)) {
14fb496a
JC
526 /* support firmware_info 3.4 + */
527 if ((frev == 3 && crev >=4) || (frev > 3)) {
528 firmware_info = (union firmware_info *)
529 (mode_info->atom_context->bios + data_offset);
a6a355a2
LT
530 /* The ras_rom_i2c_slave_addr should ideally
531 * be a 19-bit EEPROM address, which would be
532 * used as is by the driver; see top of
533 * amdgpu_eeprom.c.
534 *
535 * When this is the case, 0 is of course a
536 * valid RAS EEPROM address, in which case,
537 * we'll drop the first "if (firm...)" and only
538 * leave the check for the pointer.
539 *
540 * The reason this works right now is because
541 * ras_rom_i2c_slave_addr contains the EEPROM
542 * device type qualifier 1010b in the top 4
543 * bits.
544 */
545 if (firmware_info->v34.ras_rom_i2c_slave_addr) {
546 if (i2c_address)
547 *i2c_address = firmware_info->v34.ras_rom_i2c_slave_addr;
548 return true;
549 }
14fb496a
JC
550 }
551 }
552
14fb496a
JC
553 return false;
554}
555
556
79077ee1
AD
557union smu_info {
558 struct atom_smu_info_v3_1 v31;
f0b0a1b8 559 struct atom_smu_info_v4_0 v40;
79077ee1
AD
560};
561
a8d59943
HZ
562union gfx_info {
563 struct atom_gfx_info_v2_2 v22;
564 struct atom_gfx_info_v2_4 v24;
565 struct atom_gfx_info_v2_7 v27;
566 struct atom_gfx_info_v3_0 v30;
567};
568
79077ee1
AD
569int amdgpu_atomfirmware_get_clock_info(struct amdgpu_device *adev)
570{
571 struct amdgpu_mode_info *mode_info = &adev->mode_info;
572 struct amdgpu_pll *spll = &adev->clock.spll;
573 struct amdgpu_pll *mpll = &adev->clock.mpll;
574 uint8_t frev, crev;
575 uint16_t data_offset;
576 int ret = -EINVAL, index;
577
578 index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
579 firmwareinfo);
580 if (amdgpu_atom_parse_data_header(mode_info->atom_context, index, NULL,
581 &frev, &crev, &data_offset)) {
582 union firmware_info *firmware_info =
583 (union firmware_info *)(mode_info->atom_context->bios +
584 data_offset);
585
586 adev->clock.default_sclk =
587 le32_to_cpu(firmware_info->v31.bootup_sclk_in10khz);
588 adev->clock.default_mclk =
589 le32_to_cpu(firmware_info->v31.bootup_mclk_in10khz);
590
591 adev->pm.current_sclk = adev->clock.default_sclk;
592 adev->pm.current_mclk = adev->clock.default_mclk;
593
79077ee1
AD
594 ret = 0;
595 }
596
597 index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
598 smu_info);
599 if (amdgpu_atom_parse_data_header(mode_info->atom_context, index, NULL,
600 &frev, &crev, &data_offset)) {
601 union smu_info *smu_info =
602 (union smu_info *)(mode_info->atom_context->bios +
603 data_offset);
604
605 /* system clock */
f0b0a1b8
HZ
606 if (frev == 3)
607 spll->reference_freq = le32_to_cpu(smu_info->v31.core_refclk_10khz);
608 else if (frev == 4)
609 spll->reference_freq = le32_to_cpu(smu_info->v40.core_refclk_10khz);
79077ee1
AD
610
611 spll->reference_div = 0;
612 spll->min_post_div = 1;
613 spll->max_post_div = 1;
614 spll->min_ref_div = 2;
615 spll->max_ref_div = 0xff;
616 spll->min_feedback_div = 4;
617 spll->max_feedback_div = 0xff;
618 spll->best_vco = 0;
619
620 ret = 0;
621 }
622
623 index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
624 umc_info);
625 if (amdgpu_atom_parse_data_header(mode_info->atom_context, index, NULL,
626 &frev, &crev, &data_offset)) {
627 union umc_info *umc_info =
628 (union umc_info *)(mode_info->atom_context->bios +
629 data_offset);
630
631 /* memory clock */
632 mpll->reference_freq = le32_to_cpu(umc_info->v31.mem_refclk_10khz);
633
634 mpll->reference_div = 0;
635 mpll->min_post_div = 1;
636 mpll->max_post_div = 1;
637 mpll->min_ref_div = 2;
638 mpll->max_ref_div = 0xff;
639 mpll->min_feedback_div = 4;
640 mpll->max_feedback_div = 0xff;
641 mpll->best_vco = 0;
642
643 ret = 0;
644 }
645
9a530062
AL
646 /* if asic is Navi+, the rlc reference clock is used for system clock
647 * from vbios gfx_info table */
648 if (adev->asic_type >= CHIP_NAVI10) {
649 index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
650 gfx_info);
651 if (amdgpu_atom_parse_data_header(mode_info->atom_context, index, NULL,
652 &frev, &crev, &data_offset)) {
a8d59943 653 union gfx_info *gfx_info = (union gfx_info *)
9a530062 654 (mode_info->atom_context->bios + data_offset);
a8d59943
HZ
655 if ((frev == 3) ||
656 (frev == 2 && crev == 6)) {
657 spll->reference_freq = le32_to_cpu(gfx_info->v30.golden_tsc_count_lower_refclk);
658 ret = 0;
659 } else if ((frev == 2) &&
660 (crev >= 2) &&
661 (crev != 6)) {
662 spll->reference_freq = le32_to_cpu(gfx_info->v22.rlc_gpu_timer_refclk);
663 ret = 0;
664 } else {
665 BUG();
666 }
9a530062
AL
667 }
668 }
669
79077ee1
AD
670 return ret;
671}
59b0b509 672
59b0b509
AD
673int amdgpu_atomfirmware_get_gfx_info(struct amdgpu_device *adev)
674{
675 struct amdgpu_mode_info *mode_info = &adev->mode_info;
676 int index;
677 uint8_t frev, crev;
678 uint16_t data_offset;
679
680 index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
681 gfx_info);
682 if (amdgpu_atom_parse_data_header(mode_info->atom_context, index, NULL,
683 &frev, &crev, &data_offset)) {
684 union gfx_info *gfx_info = (union gfx_info *)
685 (mode_info->atom_context->bios + data_offset);
f5fb30b6
HZ
686 if (frev == 2) {
687 switch (crev) {
688 case 4:
689 adev->gfx.config.max_shader_engines = gfx_info->v24.max_shader_engines;
690 adev->gfx.config.max_cu_per_sh = gfx_info->v24.max_cu_per_sh;
691 adev->gfx.config.max_sh_per_se = gfx_info->v24.max_sh_per_se;
692 adev->gfx.config.max_backends_per_se = gfx_info->v24.max_backends_per_se;
693 adev->gfx.config.max_texture_channel_caches = gfx_info->v24.max_texture_channel_caches;
694 adev->gfx.config.max_gprs = le16_to_cpu(gfx_info->v24.gc_num_gprs);
695 adev->gfx.config.max_gs_threads = gfx_info->v24.gc_num_max_gs_thds;
696 adev->gfx.config.gs_vgt_table_depth = gfx_info->v24.gc_gs_table_depth;
697 adev->gfx.config.gs_prim_buffer_depth =
698 le16_to_cpu(gfx_info->v24.gc_gsprim_buff_depth);
699 adev->gfx.config.double_offchip_lds_buf =
700 gfx_info->v24.gc_double_offchip_lds_buffer;
701 adev->gfx.cu_info.wave_front_size = le16_to_cpu(gfx_info->v24.gc_wave_size);
702 adev->gfx.cu_info.max_waves_per_simd = le16_to_cpu(gfx_info->v24.gc_max_waves_per_simd);
703 adev->gfx.cu_info.max_scratch_slots_per_cu = gfx_info->v24.gc_max_scratch_slots_per_cu;
704 adev->gfx.cu_info.lds_size = le16_to_cpu(gfx_info->v24.gc_lds_size);
705 return 0;
706 case 7:
707 adev->gfx.config.max_shader_engines = gfx_info->v27.max_shader_engines;
708 adev->gfx.config.max_cu_per_sh = gfx_info->v27.max_cu_per_sh;
709 adev->gfx.config.max_sh_per_se = gfx_info->v27.max_sh_per_se;
710 adev->gfx.config.max_backends_per_se = gfx_info->v27.max_backends_per_se;
711 adev->gfx.config.max_texture_channel_caches = gfx_info->v27.max_texture_channel_caches;
712 adev->gfx.config.max_gprs = le16_to_cpu(gfx_info->v27.gc_num_gprs);
713 adev->gfx.config.max_gs_threads = gfx_info->v27.gc_num_max_gs_thds;
714 adev->gfx.config.gs_vgt_table_depth = gfx_info->v27.gc_gs_table_depth;
715 adev->gfx.config.gs_prim_buffer_depth = le16_to_cpu(gfx_info->v27.gc_gsprim_buff_depth);
716 adev->gfx.config.double_offchip_lds_buf = gfx_info->v27.gc_double_offchip_lds_buffer;
717 adev->gfx.cu_info.wave_front_size = le16_to_cpu(gfx_info->v27.gc_wave_size);
718 adev->gfx.cu_info.max_waves_per_simd = le16_to_cpu(gfx_info->v27.gc_max_waves_per_simd);
719 adev->gfx.cu_info.max_scratch_slots_per_cu = gfx_info->v27.gc_max_scratch_slots_per_cu;
720 adev->gfx.cu_info.lds_size = le16_to_cpu(gfx_info->v27.gc_lds_size);
721 return 0;
722 default:
723 return -EINVAL;
724 }
725 } else if (frev == 3) {
726 switch (crev) {
727 case 0:
728 adev->gfx.config.max_shader_engines = gfx_info->v30.max_shader_engines;
729 adev->gfx.config.max_cu_per_sh = gfx_info->v30.max_cu_per_sh;
730 adev->gfx.config.max_sh_per_se = gfx_info->v30.max_sh_per_se;
731 adev->gfx.config.max_backends_per_se = gfx_info->v30.max_backends_per_se;
732 adev->gfx.config.max_texture_channel_caches = gfx_info->v30.max_texture_channel_caches;
733 return 0;
734 default:
735 return -EINVAL;
736 }
737 } else {
59b0b509
AD
738 return -EINVAL;
739 }
740
741 }
742 return -EINVAL;
743}
efe4f000
TY
744
745/*
82a52030
HZ
746 * Helper function to query two stage mem training capability
747 *
748 * @adev: amdgpu_device pointer
749 *
750 * Return true if two stage mem training is supported or false if not
efe4f000 751 */
82a52030 752bool amdgpu_atomfirmware_mem_training_supported(struct amdgpu_device *adev)
efe4f000 753{
82a52030 754 u32 fw_cap;
efe4f000 755
82a52030 756 fw_cap = adev->mode_info.firmware_flags;
efe4f000 757
82a52030 758 return (fw_cap & ATOM_FIRMWARE_CAP_ENABLE_2STAGE_BIST_TRAINING) ? true : false;
efe4f000
TY
759}
760
9a244ebe
HZ
761int amdgpu_atomfirmware_get_fw_reserved_fb_size(struct amdgpu_device *adev)
762{
763 struct atom_context *ctx = adev->mode_info.atom_context;
764 union firmware_info *firmware_info;
765 int index;
766 u16 data_offset, size;
767 u8 frev, crev;
768 int fw_reserved_fb_size;
769
770 index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
771 firmwareinfo);
772
773 if (!amdgpu_atom_parse_data_header(ctx, index, &size,
774 &frev, &crev, &data_offset))
775 /* fail to parse data_header */
776 return 0;
777
778 firmware_info = (union firmware_info *)(ctx->bios + data_offset);
779
780 if (frev !=3)
781 return -EINVAL;
782
783 switch (crev) {
784 case 4:
785 fw_reserved_fb_size =
786 (firmware_info->v34.fw_reserved_size_in_kb << 10);
787 break;
788 default:
789 fw_reserved_fb_size = 0;
790 break;
791 }
792
793 return fw_reserved_fb_size;
794}
ba75f6eb
HZ
795
796/*
797 * Helper function to execute asic_init table
798 *
799 * @adev: amdgpu_device pointer
800 * @fb_reset: flag to indicate whether fb is reset or not
801 *
802 * Return 0 if succeed, otherwise failed
803 */
804int amdgpu_atomfirmware_asic_init(struct amdgpu_device *adev, bool fb_reset)
805{
806 struct amdgpu_mode_info *mode_info = &adev->mode_info;
807 struct atom_context *ctx;
808 uint8_t frev, crev;
809 uint16_t data_offset;
810 uint32_t bootup_sclk_in10khz, bootup_mclk_in10khz;
811 struct asic_init_ps_allocation_v2_1 asic_init_ps_v2_1;
812 int index;
813
814 if (!mode_info)
815 return -EINVAL;
816
817 ctx = mode_info->atom_context;
818 if (!ctx)
819 return -EINVAL;
820
821 /* query bootup sclk/mclk from firmware_info table */
822 index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
823 firmwareinfo);
824 if (amdgpu_atom_parse_data_header(ctx, index, NULL,
825 &frev, &crev, &data_offset)) {
826 union firmware_info *firmware_info =
827 (union firmware_info *)(ctx->bios +
828 data_offset);
829
830 bootup_sclk_in10khz =
831 le32_to_cpu(firmware_info->v31.bootup_sclk_in10khz);
832 bootup_mclk_in10khz =
833 le32_to_cpu(firmware_info->v31.bootup_mclk_in10khz);
834 } else {
835 return -EINVAL;
836 }
837
838 index = get_index_into_master_table(atom_master_list_of_command_functions_v2_1,
839 asic_init);
840 if (amdgpu_atom_parse_cmd_header(mode_info->atom_context, index, &frev, &crev)) {
841 if (frev == 2 && crev >= 1) {
842 memset(&asic_init_ps_v2_1, 0, sizeof(asic_init_ps_v2_1));
843 asic_init_ps_v2_1.param.engineparam.sclkfreqin10khz = bootup_sclk_in10khz;
844 asic_init_ps_v2_1.param.memparam.mclkfreqin10khz = bootup_mclk_in10khz;
845 asic_init_ps_v2_1.param.engineparam.engineflag = b3NORMAL_ENGINE_INIT;
846 if (!fb_reset)
847 asic_init_ps_v2_1.param.memparam.memflag = b3DRAM_SELF_REFRESH_EXIT;
848 else
849 asic_init_ps_v2_1.param.memparam.memflag = 0;
850 } else {
851 return -EINVAL;
852 }
853 } else {
854 return -EINVAL;
855 }
856
857 return amdgpu_atom_execute_table(ctx, ATOM_CMD_INIT, (uint32_t *)&asic_init_ps_v2_1);
858}