drm/amdgpu/atomfirmware: use proper index for querying vram type (v3)
[linux-2.6-block.git] / drivers / gpu / drm / amd / amdgpu / amdgpu_atomfirmware.c
CommitLineData
a5bde2f9
AD
1/*
2 * Copyright 2016 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
fdf2f6c5 23
a5bde2f9
AD
24#include <drm/amdgpu_drm.h>
25#include "amdgpu.h"
26#include "atomfirmware.h"
27#include "amdgpu_atomfirmware.h"
28#include "atom.h"
692bb1ac 29#include "atombios.h"
a5bde2f9 30
a5bde2f9
AD
31bool amdgpu_atomfirmware_gpu_supports_virtualization(struct amdgpu_device *adev)
32{
33 int index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
34 firmwareinfo);
35 uint16_t data_offset;
36
37 if (amdgpu_atom_parse_data_header(adev->mode_info.atom_context, index, NULL,
38 NULL, NULL, &data_offset)) {
39 struct atom_firmware_info_v3_1 *firmware_info =
40 (struct atom_firmware_info_v3_1 *)(adev->mode_info.atom_context->bios +
41 data_offset);
42
43 if (le32_to_cpu(firmware_info->firmware_capability) &
44 ATOM_FIRMWARE_CAP_GPU_VIRTUALIZATION)
45 return true;
46 }
47 return false;
48}
49
50void amdgpu_atomfirmware_scratch_regs_init(struct amdgpu_device *adev)
51{
52 int index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
53 firmwareinfo);
54 uint16_t data_offset;
55
56 if (amdgpu_atom_parse_data_header(adev->mode_info.atom_context, index, NULL,
57 NULL, NULL, &data_offset)) {
58 struct atom_firmware_info_v3_1 *firmware_info =
59 (struct atom_firmware_info_v3_1 *)(adev->mode_info.atom_context->bios +
60 data_offset);
61
62 adev->bios_scratch_reg_offset =
63 le32_to_cpu(firmware_info->bios_scratch_reg_startaddr);
64 }
65}
66
a5bde2f9
AD
67int amdgpu_atomfirmware_allocate_fb_scratch(struct amdgpu_device *adev)
68{
69 struct atom_context *ctx = adev->mode_info.atom_context;
70 int index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
71 vram_usagebyfirmware);
24738d7c
ML
72 struct vram_usagebyfirmware_v2_1 * firmware_usage;
73 uint32_t start_addr, size;
a5bde2f9
AD
74 uint16_t data_offset;
75 int usage_bytes = 0;
76
77 if (amdgpu_atom_parse_data_header(ctx, index, NULL, NULL, NULL, &data_offset)) {
24738d7c 78 firmware_usage = (struct vram_usagebyfirmware_v2_1 *)(ctx->bios + data_offset);
a5bde2f9
AD
79 DRM_DEBUG("atom firmware requested %08x %dkb fw %dkb drv\n",
80 le32_to_cpu(firmware_usage->start_address_in_kb),
81 le16_to_cpu(firmware_usage->used_by_firmware_in_kb),
82 le16_to_cpu(firmware_usage->used_by_driver_in_kb));
83
24738d7c
ML
84 start_addr = le32_to_cpu(firmware_usage->start_address_in_kb);
85 size = le16_to_cpu(firmware_usage->used_by_firmware_in_kb);
86
87 if ((uint32_t)(start_addr & ATOM_VRAM_OPERATION_FLAGS_MASK) ==
88 (uint32_t)(ATOM_VRAM_BLOCK_SRIOV_MSG_SHARE_RESERVATION <<
89 ATOM_VRAM_OPERATION_FLAGS_SHIFT)) {
90 /* Firmware request VRAM reservation for SR-IOV */
91 adev->fw_vram_usage.start_offset = (start_addr &
92 (~ATOM_VRAM_OPERATION_FLAGS_MASK)) << 10;
93 adev->fw_vram_usage.size = size << 10;
94 /* Use the default scratch size */
95 usage_bytes = 0;
96 } else {
97 usage_bytes = le16_to_cpu(firmware_usage->used_by_driver_in_kb) << 10;
98 }
a5bde2f9
AD
99 }
100 ctx->scratch_size_bytes = 0;
101 if (usage_bytes == 0)
102 usage_bytes = 20 * 1024;
103 /* allocate some scratch memory */
104 ctx->scratch = kzalloc(usage_bytes, GFP_KERNEL);
105 if (!ctx->scratch)
106 return -ENOMEM;
107 ctx->scratch_size_bytes = usage_bytes;
108 return 0;
109}
21f6bcb6
AD
110
111union igp_info {
112 struct atom_integrated_system_info_v1_11 v11;
113};
114
1e09b053
HZ
115union umc_info {
116 struct atom_umc_info_v3_1 v31;
117};
27e39d3d
HZ
118
119union vram_info {
120 struct atom_vram_info_header_v2_3 v23;
89d7a79c 121 struct atom_vram_info_header_v2_4 v24;
27e39d3d 122};
21f6bcb6 123
bd552027
AD
124union vram_module {
125 struct atom_vram_module_v9 v9;
126 struct atom_vram_module_v10 v10;
127};
79077ee1 128
bd552027
AD
129static int convert_atom_mem_type_to_vram_type(struct amdgpu_device *adev,
130 int atom_mem_type)
1e09b053
HZ
131{
132 int vram_type;
133
134 if (adev->flags & AMD_IS_APU) {
135 switch (atom_mem_type) {
136 case Ddr2MemType:
137 case LpDdr2MemType:
138 vram_type = AMDGPU_VRAM_TYPE_DDR2;
139 break;
140 case Ddr3MemType:
141 case LpDdr3MemType:
142 vram_type = AMDGPU_VRAM_TYPE_DDR3;
143 break;
144 case Ddr4MemType:
145 case LpDdr4MemType:
146 vram_type = AMDGPU_VRAM_TYPE_DDR4;
147 break;
148 default:
149 vram_type = AMDGPU_VRAM_TYPE_UNKNOWN;
150 break;
151 }
152 } else {
153 switch (atom_mem_type) {
154 case ATOM_DGPU_VRAM_TYPE_GDDR5:
155 vram_type = AMDGPU_VRAM_TYPE_GDDR5;
156 break;
801281fe 157 case ATOM_DGPU_VRAM_TYPE_HBM2:
1e09b053
HZ
158 vram_type = AMDGPU_VRAM_TYPE_HBM;
159 break;
89d7a79c
HZ
160 case ATOM_DGPU_VRAM_TYPE_GDDR6:
161 vram_type = AMDGPU_VRAM_TYPE_GDDR6;
162 break;
1e09b053
HZ
163 default:
164 vram_type = AMDGPU_VRAM_TYPE_UNKNOWN;
165 break;
166 }
167 }
168
169 return vram_type;
170}
bd552027
AD
171
172static int
173amdgpu_atomfirmware_get_vram_info(struct amdgpu_device *adev,
174 int *vram_width, int *vram_type)
1e09b053
HZ
175{
176 struct amdgpu_mode_info *mode_info = &adev->mode_info;
bd552027 177 int index, i = 0;
1e09b053
HZ
178 u16 data_offset, size;
179 union igp_info *igp_info;
27e39d3d 180 union vram_info *vram_info;
bd552027 181 union vram_module *vram_module;
1e09b053
HZ
182 u8 frev, crev;
183 u8 mem_type;
bd552027
AD
184 u32 mem_channel_number;
185 u32 mem_channel_width;
186 u32 module_id;
187
1e09b053
HZ
188
189 if (adev->flags & AMD_IS_APU)
190 index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
191 integratedsysteminfo);
192 else
193 index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
27e39d3d 194 vram_info);
bd552027 195
1e09b053
HZ
196 if (amdgpu_atom_parse_data_header(mode_info->atom_context,
197 index, &size,
198 &frev, &crev, &data_offset)) {
199 if (adev->flags & AMD_IS_APU) {
200 igp_info = (union igp_info *)
201 (mode_info->atom_context->bios + data_offset);
202 switch (crev) {
203 case 11:
bd552027
AD
204 mem_channel_number = igp_info->v11.umachannelnumber;
205 /* channel width is 64 */
206 if (vram_width)
207 *vram_width = mem_channel_number * 64;
1e09b053 208 mem_type = igp_info->v11.memorytype;
bd552027
AD
209 if (vram_type)
210 *vram_type = convert_atom_mem_type_to_vram_type(adev, mem_type);
211 break;
1e09b053 212 default:
bd552027 213 return -EINVAL;
1e09b053
HZ
214 }
215 } else {
27e39d3d 216 vram_info = (union vram_info *)
1e09b053 217 (mode_info->atom_context->bios + data_offset);
bd552027 218 module_id = (RREG32(adev->bios_scratch_reg_offset + 4) & 0x00ff0000) >> 16;
1e09b053 219 switch (crev) {
27e39d3d 220 case 3:
bd552027
AD
221 if (module_id > vram_info->v23.vram_module_num)
222 module_id = 0;
223 vram_module = (union vram_module *)vram_info->v23.vram_module;
224 while (i < module_id) {
225 vram_module = (union vram_module *)
226 ((u8 *)vram_module + vram_module->v9.vram_module_size);
227 i++;
228 }
229 mem_type = vram_module->v9.memory_type;
230 if (vram_type)
231 *vram_type = convert_atom_mem_type_to_vram_type(adev, mem_type);
232 mem_channel_number = vram_module->v9.channel_num;
233 mem_channel_width = vram_module->v9.channel_width;
234 if (vram_width)
235 *vram_width = mem_channel_number * (1 << mem_channel_width);
236 break;
89d7a79c 237 case 4:
bd552027
AD
238 if (module_id > vram_info->v24.vram_module_num)
239 module_id = 0;
240 vram_module = (union vram_module *)vram_info->v24.vram_module;
241 while (i < module_id) {
242 vram_module = (union vram_module *)
243 ((u8 *)vram_module + vram_module->v10.vram_module_size);
244 i++;
245 }
246 mem_type = vram_module->v10.memory_type;
247 if (vram_type)
248 *vram_type = convert_atom_mem_type_to_vram_type(adev, mem_type);
249 mem_channel_number = vram_module->v10.channel_num;
250 mem_channel_width = vram_module->v10.channel_width;
251 if (vram_width)
252 *vram_width = mem_channel_number * (1 << mem_channel_width);
253 break;
1e09b053 254 default:
bd552027 255 return -EINVAL;
1e09b053
HZ
256 }
257 }
bd552027 258
1e09b053
HZ
259 }
260
261 return 0;
262}
263
bd552027
AD
264/*
265 * Return vram width from integrated system info table, if available,
266 * or 0 if not.
267 */
268int amdgpu_atomfirmware_get_vram_width(struct amdgpu_device *adev)
269{
270 int vram_width = 0, vram_type = 0;
271 int r = amdgpu_atomfirmware_get_vram_info(adev, &vram_width, &vram_type);
272 if (r)
273 return 0;
274
275 return vram_width;
276}
277
278/*
279 * Return vram type from either integrated system info table
280 * or umc info table, if available, or 0 (TYPE_UNKNOWN) if not
281 */
282int amdgpu_atomfirmware_get_vram_type(struct amdgpu_device *adev)
283{
284 int vram_width = 0, vram_type = 0;
285 int r = amdgpu_atomfirmware_get_vram_info(adev, &vram_width, &vram_type);
286 if (r)
287 return 0;
288
289 return vram_type;
290}
291
511c4348
HZ
292/*
293 * Return true if vbios enabled ecc by default, if umc info table is available
294 * or false if ecc is not enabled or umc info table is not available
295 */
296bool amdgpu_atomfirmware_mem_ecc_supported(struct amdgpu_device *adev)
297{
298 struct amdgpu_mode_info *mode_info = &adev->mode_info;
299 int index;
300 u16 data_offset, size;
301 union umc_info *umc_info;
302 u8 frev, crev;
303 bool ecc_default_enabled = false;
304
305 index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
306 umc_info);
307
308 if (amdgpu_atom_parse_data_header(mode_info->atom_context,
309 index, &size, &frev, &crev, &data_offset)) {
310 /* support umc_info 3.1+ */
311 if ((frev == 3 && crev >= 1) || (frev > 3)) {
312 umc_info = (union umc_info *)
313 (mode_info->atom_context->bios + data_offset);
314 ecc_default_enabled =
315 (le32_to_cpu(umc_info->v31.umc_config) &
316 UMC_CONFIG__DEFAULT_MEM_ECC_ENABLE) ? true : false;
317 }
318 }
319
320 return ecc_default_enabled;
321}
322
79077ee1
AD
323union firmware_info {
324 struct atom_firmware_info_v3_1 v31;
325};
326
8b6da23f
HZ
327/*
328 * Return true if vbios supports sram ecc or false if not
329 */
330bool amdgpu_atomfirmware_sram_ecc_supported(struct amdgpu_device *adev)
331{
332 struct amdgpu_mode_info *mode_info = &adev->mode_info;
333 int index;
334 u16 data_offset, size;
335 union firmware_info *firmware_info;
336 u8 frev, crev;
337 bool sram_ecc_supported = false;
338
339 index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
340 firmwareinfo);
341
342 if (amdgpu_atom_parse_data_header(adev->mode_info.atom_context,
343 index, &size, &frev, &crev, &data_offset)) {
344 /* support firmware_info 3.1 + */
345 if ((frev == 3 && crev >=1) || (frev > 3)) {
346 firmware_info = (union firmware_info *)
347 (mode_info->atom_context->bios + data_offset);
348 sram_ecc_supported =
349 (le32_to_cpu(firmware_info->v31.firmware_capability) &
350 ATOM_FIRMWARE_CAP_SRAM_ECC) ? true : false;
351 }
352 }
353
354 return sram_ecc_supported;
355}
356
79077ee1
AD
357union smu_info {
358 struct atom_smu_info_v3_1 v31;
359};
360
79077ee1
AD
361int amdgpu_atomfirmware_get_clock_info(struct amdgpu_device *adev)
362{
363 struct amdgpu_mode_info *mode_info = &adev->mode_info;
364 struct amdgpu_pll *spll = &adev->clock.spll;
365 struct amdgpu_pll *mpll = &adev->clock.mpll;
366 uint8_t frev, crev;
367 uint16_t data_offset;
368 int ret = -EINVAL, index;
369
370 index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
371 firmwareinfo);
372 if (amdgpu_atom_parse_data_header(mode_info->atom_context, index, NULL,
373 &frev, &crev, &data_offset)) {
374 union firmware_info *firmware_info =
375 (union firmware_info *)(mode_info->atom_context->bios +
376 data_offset);
377
378 adev->clock.default_sclk =
379 le32_to_cpu(firmware_info->v31.bootup_sclk_in10khz);
380 adev->clock.default_mclk =
381 le32_to_cpu(firmware_info->v31.bootup_mclk_in10khz);
382
383 adev->pm.current_sclk = adev->clock.default_sclk;
384 adev->pm.current_mclk = adev->clock.default_mclk;
385
386 /* not technically a clock, but... */
387 adev->mode_info.firmware_flags =
388 le32_to_cpu(firmware_info->v31.firmware_capability);
389
390 ret = 0;
391 }
392
393 index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
394 smu_info);
395 if (amdgpu_atom_parse_data_header(mode_info->atom_context, index, NULL,
396 &frev, &crev, &data_offset)) {
397 union smu_info *smu_info =
398 (union smu_info *)(mode_info->atom_context->bios +
399 data_offset);
400
401 /* system clock */
402 spll->reference_freq = le32_to_cpu(smu_info->v31.core_refclk_10khz);
403
404 spll->reference_div = 0;
405 spll->min_post_div = 1;
406 spll->max_post_div = 1;
407 spll->min_ref_div = 2;
408 spll->max_ref_div = 0xff;
409 spll->min_feedback_div = 4;
410 spll->max_feedback_div = 0xff;
411 spll->best_vco = 0;
412
413 ret = 0;
414 }
415
416 index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
417 umc_info);
418 if (amdgpu_atom_parse_data_header(mode_info->atom_context, index, NULL,
419 &frev, &crev, &data_offset)) {
420 union umc_info *umc_info =
421 (union umc_info *)(mode_info->atom_context->bios +
422 data_offset);
423
424 /* memory clock */
425 mpll->reference_freq = le32_to_cpu(umc_info->v31.mem_refclk_10khz);
426
427 mpll->reference_div = 0;
428 mpll->min_post_div = 1;
429 mpll->max_post_div = 1;
430 mpll->min_ref_div = 2;
431 mpll->max_ref_div = 0xff;
432 mpll->min_feedback_div = 4;
433 mpll->max_feedback_div = 0xff;
434 mpll->best_vco = 0;
435
436 ret = 0;
437 }
438
439 return ret;
440}
59b0b509
AD
441
442union gfx_info {
443 struct atom_gfx_info_v2_4 v24;
444};
445
446int amdgpu_atomfirmware_get_gfx_info(struct amdgpu_device *adev)
447{
448 struct amdgpu_mode_info *mode_info = &adev->mode_info;
449 int index;
450 uint8_t frev, crev;
451 uint16_t data_offset;
452
453 index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
454 gfx_info);
455 if (amdgpu_atom_parse_data_header(mode_info->atom_context, index, NULL,
456 &frev, &crev, &data_offset)) {
457 union gfx_info *gfx_info = (union gfx_info *)
458 (mode_info->atom_context->bios + data_offset);
459 switch (crev) {
460 case 4:
0ae6afbf
HR
461 adev->gfx.config.max_shader_engines = gfx_info->v24.max_shader_engines;
462 adev->gfx.config.max_cu_per_sh = gfx_info->v24.max_cu_per_sh;
463 adev->gfx.config.max_sh_per_se = gfx_info->v24.max_sh_per_se;
464 adev->gfx.config.max_backends_per_se = gfx_info->v24.max_backends_per_se;
465 adev->gfx.config.max_texture_channel_caches = gfx_info->v24.max_texture_channel_caches;
59b0b509
AD
466 adev->gfx.config.max_gprs = le16_to_cpu(gfx_info->v24.gc_num_gprs);
467 adev->gfx.config.max_gs_threads = gfx_info->v24.gc_num_max_gs_thds;
468 adev->gfx.config.gs_vgt_table_depth = gfx_info->v24.gc_gs_table_depth;
469 adev->gfx.config.gs_prim_buffer_depth =
470 le16_to_cpu(gfx_info->v24.gc_gsprim_buff_depth);
471 adev->gfx.config.double_offchip_lds_buf =
472 gfx_info->v24.gc_double_offchip_lds_buffer;
f9fb22a2
SL
473 adev->gfx.cu_info.wave_front_size = le16_to_cpu(gfx_info->v24.gc_wave_size);
474 adev->gfx.cu_info.max_waves_per_simd = le16_to_cpu(gfx_info->v24.gc_max_waves_per_simd);
475 adev->gfx.cu_info.max_scratch_slots_per_cu = gfx_info->v24.gc_max_scratch_slots_per_cu;
59b0b509
AD
476 adev->gfx.cu_info.lds_size = le16_to_cpu(gfx_info->v24.gc_lds_size);
477 return 0;
478 default:
479 return -EINVAL;
480 }
481
482 }
483 return -EINVAL;
484}