Commit | Line | Data |
---|---|---|
d38ceaf9 AD |
1 | /* |
2 | * Copyright 2014 Advanced Micro Devices, Inc. | |
3 | * | |
4 | * Permission is hereby granted, free of charge, to any person obtaining a | |
5 | * copy of this software and associated documentation files (the "Software"), | |
6 | * to deal in the Software without restriction, including without limitation | |
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | |
8 | * and/or sell copies of the Software, and to permit persons to whom the | |
9 | * Software is furnished to do so, subject to the following conditions: | |
10 | * | |
11 | * The above copyright notice and this permission notice shall be included in | |
12 | * all copies or substantial portions of the Software. | |
13 | * | |
14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |
15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |
16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | |
17 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR | |
18 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | |
19 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | |
20 | * OTHER DEALINGS IN THE SOFTWARE. | |
21 | * | |
22 | */ | |
23 | ||
24 | #include <linux/firmware.h> | |
25 | #include <linux/slab.h> | |
26 | #include <linux/module.h> | |
27 | #include <drm/drmP.h> | |
28 | #include "amdgpu.h" | |
29 | #include "amdgpu_ucode.h" | |
30 | ||
31 | static void amdgpu_ucode_print_common_hdr(const struct common_firmware_header *hdr) | |
32 | { | |
33 | DRM_DEBUG("size_bytes: %u\n", le32_to_cpu(hdr->size_bytes)); | |
34 | DRM_DEBUG("header_size_bytes: %u\n", le32_to_cpu(hdr->header_size_bytes)); | |
35 | DRM_DEBUG("header_version_major: %u\n", le16_to_cpu(hdr->header_version_major)); | |
36 | DRM_DEBUG("header_version_minor: %u\n", le16_to_cpu(hdr->header_version_minor)); | |
37 | DRM_DEBUG("ip_version_major: %u\n", le16_to_cpu(hdr->ip_version_major)); | |
38 | DRM_DEBUG("ip_version_minor: %u\n", le16_to_cpu(hdr->ip_version_minor)); | |
39 | DRM_DEBUG("ucode_version: 0x%08x\n", le32_to_cpu(hdr->ucode_version)); | |
40 | DRM_DEBUG("ucode_size_bytes: %u\n", le32_to_cpu(hdr->ucode_size_bytes)); | |
41 | DRM_DEBUG("ucode_array_offset_bytes: %u\n", | |
42 | le32_to_cpu(hdr->ucode_array_offset_bytes)); | |
43 | DRM_DEBUG("crc32: 0x%08x\n", le32_to_cpu(hdr->crc32)); | |
44 | } | |
45 | ||
46 | void amdgpu_ucode_print_mc_hdr(const struct common_firmware_header *hdr) | |
47 | { | |
48 | uint16_t version_major = le16_to_cpu(hdr->header_version_major); | |
49 | uint16_t version_minor = le16_to_cpu(hdr->header_version_minor); | |
50 | ||
51 | DRM_DEBUG("MC\n"); | |
52 | amdgpu_ucode_print_common_hdr(hdr); | |
53 | ||
54 | if (version_major == 1) { | |
55 | const struct mc_firmware_header_v1_0 *mc_hdr = | |
56 | container_of(hdr, struct mc_firmware_header_v1_0, header); | |
57 | ||
58 | DRM_DEBUG("io_debug_size_bytes: %u\n", | |
59 | le32_to_cpu(mc_hdr->io_debug_size_bytes)); | |
60 | DRM_DEBUG("io_debug_array_offset_bytes: %u\n", | |
61 | le32_to_cpu(mc_hdr->io_debug_array_offset_bytes)); | |
62 | } else { | |
63 | DRM_ERROR("Unknown MC ucode version: %u.%u\n", version_major, version_minor); | |
64 | } | |
65 | } | |
66 | ||
67 | void amdgpu_ucode_print_smc_hdr(const struct common_firmware_header *hdr) | |
68 | { | |
69 | uint16_t version_major = le16_to_cpu(hdr->header_version_major); | |
70 | uint16_t version_minor = le16_to_cpu(hdr->header_version_minor); | |
71 | ||
72 | DRM_DEBUG("SMC\n"); | |
73 | amdgpu_ucode_print_common_hdr(hdr); | |
74 | ||
75 | if (version_major == 1) { | |
76 | const struct smc_firmware_header_v1_0 *smc_hdr = | |
77 | container_of(hdr, struct smc_firmware_header_v1_0, header); | |
78 | ||
79 | DRM_DEBUG("ucode_start_addr: %u\n", le32_to_cpu(smc_hdr->ucode_start_addr)); | |
80 | } else { | |
81 | DRM_ERROR("Unknown SMC ucode version: %u.%u\n", version_major, version_minor); | |
82 | } | |
83 | } | |
84 | ||
85 | void amdgpu_ucode_print_gfx_hdr(const struct common_firmware_header *hdr) | |
86 | { | |
87 | uint16_t version_major = le16_to_cpu(hdr->header_version_major); | |
88 | uint16_t version_minor = le16_to_cpu(hdr->header_version_minor); | |
89 | ||
90 | DRM_DEBUG("GFX\n"); | |
91 | amdgpu_ucode_print_common_hdr(hdr); | |
92 | ||
93 | if (version_major == 1) { | |
94 | const struct gfx_firmware_header_v1_0 *gfx_hdr = | |
95 | container_of(hdr, struct gfx_firmware_header_v1_0, header); | |
96 | ||
97 | DRM_DEBUG("ucode_feature_version: %u\n", | |
98 | le32_to_cpu(gfx_hdr->ucode_feature_version)); | |
99 | DRM_DEBUG("jt_offset: %u\n", le32_to_cpu(gfx_hdr->jt_offset)); | |
100 | DRM_DEBUG("jt_size: %u\n", le32_to_cpu(gfx_hdr->jt_size)); | |
101 | } else { | |
102 | DRM_ERROR("Unknown GFX ucode version: %u.%u\n", version_major, version_minor); | |
103 | } | |
104 | } | |
105 | ||
106 | void amdgpu_ucode_print_rlc_hdr(const struct common_firmware_header *hdr) | |
107 | { | |
108 | uint16_t version_major = le16_to_cpu(hdr->header_version_major); | |
109 | uint16_t version_minor = le16_to_cpu(hdr->header_version_minor); | |
110 | ||
111 | DRM_DEBUG("RLC\n"); | |
112 | amdgpu_ucode_print_common_hdr(hdr); | |
113 | ||
114 | if (version_major == 1) { | |
115 | const struct rlc_firmware_header_v1_0 *rlc_hdr = | |
116 | container_of(hdr, struct rlc_firmware_header_v1_0, header); | |
117 | ||
118 | DRM_DEBUG("ucode_feature_version: %u\n", | |
119 | le32_to_cpu(rlc_hdr->ucode_feature_version)); | |
120 | DRM_DEBUG("save_and_restore_offset: %u\n", | |
121 | le32_to_cpu(rlc_hdr->save_and_restore_offset)); | |
122 | DRM_DEBUG("clear_state_descriptor_offset: %u\n", | |
123 | le32_to_cpu(rlc_hdr->clear_state_descriptor_offset)); | |
124 | DRM_DEBUG("avail_scratch_ram_locations: %u\n", | |
125 | le32_to_cpu(rlc_hdr->avail_scratch_ram_locations)); | |
126 | DRM_DEBUG("master_pkt_description_offset: %u\n", | |
127 | le32_to_cpu(rlc_hdr->master_pkt_description_offset)); | |
128 | } else if (version_major == 2) { | |
129 | const struct rlc_firmware_header_v2_0 *rlc_hdr = | |
130 | container_of(hdr, struct rlc_firmware_header_v2_0, header); | |
131 | ||
132 | DRM_DEBUG("ucode_feature_version: %u\n", | |
133 | le32_to_cpu(rlc_hdr->ucode_feature_version)); | |
134 | DRM_DEBUG("jt_offset: %u\n", le32_to_cpu(rlc_hdr->jt_offset)); | |
135 | DRM_DEBUG("jt_size: %u\n", le32_to_cpu(rlc_hdr->jt_size)); | |
136 | DRM_DEBUG("save_and_restore_offset: %u\n", | |
137 | le32_to_cpu(rlc_hdr->save_and_restore_offset)); | |
138 | DRM_DEBUG("clear_state_descriptor_offset: %u\n", | |
139 | le32_to_cpu(rlc_hdr->clear_state_descriptor_offset)); | |
140 | DRM_DEBUG("avail_scratch_ram_locations: %u\n", | |
141 | le32_to_cpu(rlc_hdr->avail_scratch_ram_locations)); | |
142 | DRM_DEBUG("reg_restore_list_size: %u\n", | |
143 | le32_to_cpu(rlc_hdr->reg_restore_list_size)); | |
144 | DRM_DEBUG("reg_list_format_start: %u\n", | |
145 | le32_to_cpu(rlc_hdr->reg_list_format_start)); | |
146 | DRM_DEBUG("reg_list_format_separate_start: %u\n", | |
147 | le32_to_cpu(rlc_hdr->reg_list_format_separate_start)); | |
148 | DRM_DEBUG("starting_offsets_start: %u\n", | |
149 | le32_to_cpu(rlc_hdr->starting_offsets_start)); | |
150 | DRM_DEBUG("reg_list_format_size_bytes: %u\n", | |
151 | le32_to_cpu(rlc_hdr->reg_list_format_size_bytes)); | |
152 | DRM_DEBUG("reg_list_format_array_offset_bytes: %u\n", | |
153 | le32_to_cpu(rlc_hdr->reg_list_format_array_offset_bytes)); | |
154 | DRM_DEBUG("reg_list_size_bytes: %u\n", | |
155 | le32_to_cpu(rlc_hdr->reg_list_size_bytes)); | |
156 | DRM_DEBUG("reg_list_array_offset_bytes: %u\n", | |
157 | le32_to_cpu(rlc_hdr->reg_list_array_offset_bytes)); | |
158 | DRM_DEBUG("reg_list_format_separate_size_bytes: %u\n", | |
159 | le32_to_cpu(rlc_hdr->reg_list_format_separate_size_bytes)); | |
160 | DRM_DEBUG("reg_list_format_separate_array_offset_bytes: %u\n", | |
161 | le32_to_cpu(rlc_hdr->reg_list_format_separate_array_offset_bytes)); | |
162 | DRM_DEBUG("reg_list_separate_size_bytes: %u\n", | |
163 | le32_to_cpu(rlc_hdr->reg_list_separate_size_bytes)); | |
164 | DRM_DEBUG("reg_list_separate_size_bytes: %u\n", | |
165 | le32_to_cpu(rlc_hdr->reg_list_separate_size_bytes)); | |
166 | } else { | |
167 | DRM_ERROR("Unknown RLC ucode version: %u.%u\n", version_major, version_minor); | |
168 | } | |
169 | } | |
170 | ||
171 | void amdgpu_ucode_print_sdma_hdr(const struct common_firmware_header *hdr) | |
172 | { | |
173 | uint16_t version_major = le16_to_cpu(hdr->header_version_major); | |
174 | uint16_t version_minor = le16_to_cpu(hdr->header_version_minor); | |
175 | ||
176 | DRM_DEBUG("SDMA\n"); | |
177 | amdgpu_ucode_print_common_hdr(hdr); | |
178 | ||
179 | if (version_major == 1) { | |
180 | const struct sdma_firmware_header_v1_0 *sdma_hdr = | |
181 | container_of(hdr, struct sdma_firmware_header_v1_0, header); | |
182 | ||
183 | DRM_DEBUG("ucode_feature_version: %u\n", | |
184 | le32_to_cpu(sdma_hdr->ucode_feature_version)); | |
185 | DRM_DEBUG("ucode_change_version: %u\n", | |
186 | le32_to_cpu(sdma_hdr->ucode_change_version)); | |
187 | DRM_DEBUG("jt_offset: %u\n", le32_to_cpu(sdma_hdr->jt_offset)); | |
188 | DRM_DEBUG("jt_size: %u\n", le32_to_cpu(sdma_hdr->jt_size)); | |
189 | if (version_minor >= 1) { | |
190 | const struct sdma_firmware_header_v1_1 *sdma_v1_1_hdr = | |
191 | container_of(sdma_hdr, struct sdma_firmware_header_v1_1, v1_0); | |
192 | DRM_DEBUG("digest_size: %u\n", le32_to_cpu(sdma_v1_1_hdr->digest_size)); | |
193 | } | |
194 | } else { | |
195 | DRM_ERROR("Unknown SDMA ucode version: %u.%u\n", | |
196 | version_major, version_minor); | |
197 | } | |
198 | } | |
199 | ||
8ae1a336 AD |
200 | void amdgpu_ucode_print_gpu_info_hdr(const struct common_firmware_header *hdr) |
201 | { | |
202 | uint16_t version_major = le16_to_cpu(hdr->header_version_major); | |
203 | uint16_t version_minor = le16_to_cpu(hdr->header_version_minor); | |
204 | ||
205 | DRM_DEBUG("GPU_INFO\n"); | |
206 | amdgpu_ucode_print_common_hdr(hdr); | |
207 | ||
208 | if (version_major == 1) { | |
209 | const struct gpu_info_firmware_header_v1_0 *gpu_info_hdr = | |
210 | container_of(hdr, struct gpu_info_firmware_header_v1_0, header); | |
211 | ||
212 | DRM_DEBUG("version_major: %u\n", | |
213 | le16_to_cpu(gpu_info_hdr->version_major)); | |
214 | DRM_DEBUG("version_minor: %u\n", | |
215 | le16_to_cpu(gpu_info_hdr->version_minor)); | |
216 | } else { | |
217 | DRM_ERROR("Unknown gpu_info ucode version: %u.%u\n", version_major, version_minor); | |
218 | } | |
219 | } | |
220 | ||
d38ceaf9 AD |
221 | int amdgpu_ucode_validate(const struct firmware *fw) |
222 | { | |
223 | const struct common_firmware_header *hdr = | |
224 | (const struct common_firmware_header *)fw->data; | |
225 | ||
226 | if (fw->size == le32_to_cpu(hdr->size_bytes)) | |
227 | return 0; | |
228 | ||
229 | return -EINVAL; | |
230 | } | |
231 | ||
232 | bool amdgpu_ucode_hdr_version(union amdgpu_firmware_header *hdr, | |
233 | uint16_t hdr_major, uint16_t hdr_minor) | |
234 | { | |
235 | if ((hdr->common.header_version_major == hdr_major) && | |
236 | (hdr->common.header_version_minor == hdr_minor)) | |
237 | return false; | |
238 | return true; | |
239 | } | |
240 | ||
e635ee07 HR |
241 | enum amdgpu_firmware_load_type |
242 | amdgpu_ucode_get_load_type(struct amdgpu_device *adev, int load_type) | |
243 | { | |
244 | switch (adev->asic_type) { | |
245 | #ifdef CONFIG_DRM_AMDGPU_SI | |
246 | case CHIP_TAHITI: | |
247 | case CHIP_PITCAIRN: | |
248 | case CHIP_VERDE: | |
249 | case CHIP_OLAND: | |
250 | return AMDGPU_FW_LOAD_DIRECT; | |
251 | #endif | |
252 | #ifdef CONFIG_DRM_AMDGPU_CIK | |
253 | case CHIP_BONAIRE: | |
254 | case CHIP_KAVERI: | |
255 | case CHIP_KABINI: | |
256 | case CHIP_HAWAII: | |
257 | case CHIP_MULLINS: | |
258 | return AMDGPU_FW_LOAD_DIRECT; | |
259 | #endif | |
260 | case CHIP_TOPAZ: | |
261 | case CHIP_TONGA: | |
262 | case CHIP_FIJI: | |
263 | case CHIP_CARRIZO: | |
264 | case CHIP_STONEY: | |
265 | case CHIP_POLARIS10: | |
266 | case CHIP_POLARIS11: | |
267 | case CHIP_POLARIS12: | |
268 | if (!load_type) | |
269 | return AMDGPU_FW_LOAD_DIRECT; | |
270 | else | |
271 | return AMDGPU_FW_LOAD_SMU; | |
272 | case CHIP_VEGA10: | |
273 | if (!load_type) | |
274 | return AMDGPU_FW_LOAD_DIRECT; | |
275 | else | |
276 | return AMDGPU_FW_LOAD_PSP; | |
4456ef4e CZ |
277 | case CHIP_RAVEN: |
278 | #if 0 | |
279 | if (!load_type) | |
280 | return AMDGPU_FW_LOAD_DIRECT; | |
281 | else | |
282 | return AMDGPU_FW_LOAD_PSP; | |
283 | #else | |
284 | return AMDGPU_FW_LOAD_DIRECT; | |
285 | #endif | |
e635ee07 HR |
286 | default: |
287 | DRM_ERROR("Unknow firmware load type\n"); | |
288 | } | |
289 | ||
290 | return AMDGPU_FW_LOAD_DIRECT; | |
291 | } | |
292 | ||
2445b227 HR |
293 | static int amdgpu_ucode_init_single_fw(struct amdgpu_device *adev, |
294 | struct amdgpu_firmware_info *ucode, | |
295 | uint64_t mc_addr, void *kptr) | |
d38ceaf9 AD |
296 | { |
297 | const struct common_firmware_header *header = NULL; | |
2445b227 | 298 | const struct gfx_firmware_header_v1_0 *cp_hdr = NULL; |
d38ceaf9 AD |
299 | |
300 | if (NULL == ucode->fw) | |
301 | return 0; | |
302 | ||
303 | ucode->mc_addr = mc_addr; | |
304 | ucode->kaddr = kptr; | |
305 | ||
bed5712e ML |
306 | if (ucode->ucode_id == AMDGPU_UCODE_ID_STORAGE) |
307 | return 0; | |
308 | ||
d38ceaf9 | 309 | header = (const struct common_firmware_header *)ucode->fw->data; |
daf42c31 | 310 | |
2445b227 HR |
311 | cp_hdr = (const struct gfx_firmware_header_v1_0 *)ucode->fw->data; |
312 | ||
313 | if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP || | |
314 | (ucode->ucode_id != AMDGPU_UCODE_ID_CP_MEC1 && | |
315 | ucode->ucode_id != AMDGPU_UCODE_ID_CP_MEC2 && | |
316 | ucode->ucode_id != AMDGPU_UCODE_ID_CP_MEC1_JT && | |
317 | ucode->ucode_id != AMDGPU_UCODE_ID_CP_MEC2_JT)) { | |
318 | ucode->ucode_size = le32_to_cpu(header->ucode_size_bytes); | |
319 | ||
320 | memcpy(ucode->kaddr, (void *)((uint8_t *)ucode->fw->data + | |
321 | le32_to_cpu(header->ucode_array_offset_bytes)), | |
322 | ucode->ucode_size); | |
323 | } else if (ucode->ucode_id == AMDGPU_UCODE_ID_CP_MEC1 || | |
324 | ucode->ucode_id == AMDGPU_UCODE_ID_CP_MEC2) { | |
325 | ucode->ucode_size = le32_to_cpu(header->ucode_size_bytes) - | |
326 | le32_to_cpu(cp_hdr->jt_size) * 4; | |
327 | ||
328 | memcpy(ucode->kaddr, (void *)((uint8_t *)ucode->fw->data + | |
329 | le32_to_cpu(header->ucode_array_offset_bytes)), | |
330 | ucode->ucode_size); | |
331 | } else if (ucode->ucode_id == AMDGPU_UCODE_ID_CP_MEC1_JT || | |
332 | ucode->ucode_id == AMDGPU_UCODE_ID_CP_MEC2_JT) { | |
333 | ucode->ucode_size = le32_to_cpu(cp_hdr->jt_size) * 4; | |
334 | ||
335 | memcpy(ucode->kaddr, (void *)((uint8_t *)ucode->fw->data + | |
336 | le32_to_cpu(header->ucode_array_offset_bytes) + | |
337 | le32_to_cpu(cp_hdr->jt_offset) * 4), | |
338 | ucode->ucode_size); | |
339 | } | |
d38ceaf9 AD |
340 | |
341 | return 0; | |
342 | } | |
343 | ||
4c2b2453 ML |
344 | static int amdgpu_ucode_patch_jt(struct amdgpu_firmware_info *ucode, |
345 | uint64_t mc_addr, void *kptr) | |
346 | { | |
347 | const struct gfx_firmware_header_v1_0 *header = NULL; | |
348 | const struct common_firmware_header *comm_hdr = NULL; | |
349 | uint8_t* src_addr = NULL; | |
350 | uint8_t* dst_addr = NULL; | |
351 | ||
352 | if (NULL == ucode->fw) | |
353 | return 0; | |
354 | ||
355 | comm_hdr = (const struct common_firmware_header *)ucode->fw->data; | |
356 | header = (const struct gfx_firmware_header_v1_0 *)ucode->fw->data; | |
357 | dst_addr = ucode->kaddr + | |
358 | ALIGN(le32_to_cpu(comm_hdr->ucode_size_bytes), | |
359 | PAGE_SIZE); | |
360 | src_addr = (uint8_t *)ucode->fw->data + | |
361 | le32_to_cpu(comm_hdr->ucode_array_offset_bytes) + | |
362 | (le32_to_cpu(header->jt_offset) * 4); | |
363 | memcpy(dst_addr, src_addr, le32_to_cpu(header->jt_size) * 4); | |
364 | ||
2445b227 HR |
365 | ucode->ucode_size += le32_to_cpu(header->jt_size) * 4; |
366 | ||
4c2b2453 ML |
367 | return 0; |
368 | } | |
369 | ||
d38ceaf9 AD |
370 | int amdgpu_ucode_init_bo(struct amdgpu_device *adev) |
371 | { | |
372 | struct amdgpu_bo **bo = &adev->firmware.fw_buf; | |
373 | uint64_t fw_mc_addr; | |
374 | void *fw_buf_ptr = NULL; | |
375 | uint64_t fw_offset = 0; | |
2445b227 | 376 | int i, err; |
d38ceaf9 AD |
377 | struct amdgpu_firmware_info *ucode = NULL; |
378 | const struct common_firmware_header *header = NULL; | |
379 | ||
380 | err = amdgpu_bo_create(adev, adev->firmware.fw_size, PAGE_SIZE, true, | |
f501a7e5 FM |
381 | amdgpu_sriov_vf(adev) ? AMDGPU_GEM_DOMAIN_VRAM : AMDGPU_GEM_DOMAIN_GTT, |
382 | 0, NULL, NULL, bo); | |
d38ceaf9 AD |
383 | if (err) { |
384 | dev_err(adev->dev, "(%d) Firmware buffer allocate failed\n", err); | |
d38ceaf9 AD |
385 | goto failed; |
386 | } | |
387 | ||
388 | err = amdgpu_bo_reserve(*bo, false); | |
389 | if (err) { | |
d38ceaf9 | 390 | dev_err(adev->dev, "(%d) Firmware buffer reserve failed\n", err); |
fd506558 | 391 | goto failed_reserve; |
d38ceaf9 AD |
392 | } |
393 | ||
f501a7e5 FM |
394 | err = amdgpu_bo_pin(*bo, amdgpu_sriov_vf(adev) ? AMDGPU_GEM_DOMAIN_VRAM : AMDGPU_GEM_DOMAIN_GTT, |
395 | &fw_mc_addr); | |
d38ceaf9 | 396 | if (err) { |
d38ceaf9 | 397 | dev_err(adev->dev, "(%d) Firmware buffer pin failed\n", err); |
fd506558 | 398 | goto failed_pin; |
d38ceaf9 AD |
399 | } |
400 | ||
401 | err = amdgpu_bo_kmap(*bo, &fw_buf_ptr); | |
402 | if (err) { | |
403 | dev_err(adev->dev, "(%d) Firmware buffer kmap failed\n", err); | |
fd506558 | 404 | goto failed_kmap; |
d38ceaf9 AD |
405 | } |
406 | ||
407 | amdgpu_bo_unreserve(*bo); | |
408 | ||
2445b227 HR |
409 | memset(fw_buf_ptr, 0, adev->firmware.fw_size); |
410 | ||
e635ee07 HR |
411 | /* |
412 | * if SMU loaded firmware, it needn't add SMC, UVD, and VCE | |
413 | * ucode info here | |
414 | */ | |
bc108ec7 TH |
415 | if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) { |
416 | if (amdgpu_sriov_vf(adev)) | |
417 | adev->firmware.max_ucodes = AMDGPU_UCODE_ID_MAXIMUM - 3; | |
418 | else | |
419 | adev->firmware.max_ucodes = AMDGPU_UCODE_ID_MAXIMUM - 4; | |
420 | } else { | |
2445b227 | 421 | adev->firmware.max_ucodes = AMDGPU_UCODE_ID_MAXIMUM; |
bc108ec7 | 422 | } |
e635ee07 | 423 | |
2445b227 | 424 | for (i = 0; i < adev->firmware.max_ucodes; i++) { |
d38ceaf9 AD |
425 | ucode = &adev->firmware.ucode[i]; |
426 | if (ucode->fw) { | |
427 | header = (const struct common_firmware_header *)ucode->fw->data; | |
2445b227 HR |
428 | amdgpu_ucode_init_single_fw(adev, ucode, fw_mc_addr + fw_offset, |
429 | (void *)((uint8_t *)fw_buf_ptr + fw_offset)); | |
430 | if (i == AMDGPU_UCODE_ID_CP_MEC1 && | |
431 | adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) { | |
4c2b2453 ML |
432 | const struct gfx_firmware_header_v1_0 *cp_hdr; |
433 | cp_hdr = (const struct gfx_firmware_header_v1_0 *)ucode->fw->data; | |
434 | amdgpu_ucode_patch_jt(ucode, fw_mc_addr + fw_offset, | |
435 | fw_buf_ptr + fw_offset); | |
436 | fw_offset += ALIGN(le32_to_cpu(cp_hdr->jt_size) << 2, PAGE_SIZE); | |
437 | } | |
2445b227 | 438 | fw_offset += ALIGN(ucode->ucode_size, PAGE_SIZE); |
d38ceaf9 AD |
439 | } |
440 | } | |
fd506558 | 441 | return 0; |
d38ceaf9 | 442 | |
fd506558 HR |
443 | failed_kmap: |
444 | amdgpu_bo_unpin(*bo); | |
445 | failed_pin: | |
446 | amdgpu_bo_unreserve(*bo); | |
447 | failed_reserve: | |
448 | amdgpu_bo_unref(bo); | |
d38ceaf9 | 449 | failed: |
e635ee07 HR |
450 | if (err) |
451 | adev->firmware.load_type = AMDGPU_FW_LOAD_DIRECT; | |
d38ceaf9 AD |
452 | |
453 | return err; | |
454 | } | |
455 | ||
456 | int amdgpu_ucode_fini_bo(struct amdgpu_device *adev) | |
457 | { | |
458 | int i; | |
459 | struct amdgpu_firmware_info *ucode = NULL; | |
460 | ||
2445b227 | 461 | for (i = 0; i < adev->firmware.max_ucodes; i++) { |
d38ceaf9 AD |
462 | ucode = &adev->firmware.ucode[i]; |
463 | if (ucode->fw) { | |
464 | ucode->mc_addr = 0; | |
465 | ucode->kaddr = NULL; | |
466 | } | |
467 | } | |
468 | amdgpu_bo_unref(&adev->firmware.fw_buf); | |
469 | adev->firmware.fw_buf = NULL; | |
470 | ||
471 | return 0; | |
472 | } |