2 * Copyright 2016 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
24 #include <linux/module.h>
26 #include <drm/drm_drv.h>
29 #include "amdgpu_ras.h"
34 #define POPULATE_UCODE_INFO(vf2pf_info, ucode, ver) \
36 vf2pf_info->ucode_info[ucode].id = ucode; \
37 vf2pf_info->ucode_info[ucode].version = ver; \
40 bool amdgpu_virt_mmio_blocked(struct amdgpu_device *adev)
42 /* By now all MMIO pages except mailbox are blocked */
43 /* if blocking is enabled in hypervisor. Choose the */
44 /* SCRATCH_REG0 to test. */
45 return RREG32_NO_KIQ(0xc040) == 0xffffffff;
48 void amdgpu_virt_init_setting(struct amdgpu_device *adev)
50 struct drm_device *ddev = adev_to_drm(adev);
52 /* enable virtual display */
53 if (adev->asic_type != CHIP_ALDEBARAN &&
54 adev->asic_type != CHIP_ARCTURUS) {
55 if (adev->mode_info.num_crtc == 0)
56 adev->mode_info.num_crtc = 1;
57 adev->enable_virtual_display = true;
59 ddev->driver_features &= ~DRIVER_ATOMIC;
64 void amdgpu_virt_kiq_reg_write_reg_wait(struct amdgpu_device *adev,
65 uint32_t reg0, uint32_t reg1,
66 uint32_t ref, uint32_t mask)
68 struct amdgpu_kiq *kiq = &adev->gfx.kiq;
69 struct amdgpu_ring *ring = &kiq->ring;
70 signed long r, cnt = 0;
74 spin_lock_irqsave(&kiq->ring_lock, flags);
75 amdgpu_ring_alloc(ring, 32);
76 amdgpu_ring_emit_reg_write_reg_wait(ring, reg0, reg1,
78 r = amdgpu_fence_emit_polling(ring, &seq, MAX_KIQ_REG_WAIT);
82 amdgpu_ring_commit(ring);
83 spin_unlock_irqrestore(&kiq->ring_lock, flags);
85 r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT);
87 /* don't wait anymore for IRQ context */
88 if (r < 1 && in_interrupt())
92 while (r < 1 && cnt++ < MAX_KIQ_REG_TRY) {
94 msleep(MAX_KIQ_REG_BAILOUT_INTERVAL);
95 r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT);
98 if (cnt > MAX_KIQ_REG_TRY)
104 amdgpu_ring_undo(ring);
105 spin_unlock_irqrestore(&kiq->ring_lock, flags);
107 dev_err(adev->dev, "failed to write reg %x wait reg %x\n", reg0, reg1);
111 * amdgpu_virt_request_full_gpu() - request full gpu access
112 * @adev: amdgpu device.
113 * @init: is driver init time.
114 * When start to init/fini driver, first need to request full gpu access.
115 * Return: Zero if request success, otherwise will return error.
117 int amdgpu_virt_request_full_gpu(struct amdgpu_device *adev, bool init)
119 struct amdgpu_virt *virt = &adev->virt;
122 if (virt->ops && virt->ops->req_full_gpu) {
123 r = virt->ops->req_full_gpu(adev, init);
127 adev->virt.caps &= ~AMDGPU_SRIOV_CAPS_RUNTIME;
134 * amdgpu_virt_release_full_gpu() - release full gpu access
135 * @adev: amdgpu device.
136 * @init: is driver init time.
137 * When finishing driver init/fini, need to release full gpu access.
138 * Return: Zero if release success, otherwise will returen error.
140 int amdgpu_virt_release_full_gpu(struct amdgpu_device *adev, bool init)
142 struct amdgpu_virt *virt = &adev->virt;
145 if (virt->ops && virt->ops->rel_full_gpu) {
146 r = virt->ops->rel_full_gpu(adev, init);
150 adev->virt.caps |= AMDGPU_SRIOV_CAPS_RUNTIME;
156 * amdgpu_virt_reset_gpu() - reset gpu
157 * @adev: amdgpu device.
158 * Send reset command to GPU hypervisor to reset GPU that VM is using
159 * Return: Zero if reset success, otherwise will return error.
161 int amdgpu_virt_reset_gpu(struct amdgpu_device *adev)
163 struct amdgpu_virt *virt = &adev->virt;
166 if (virt->ops && virt->ops->reset_gpu) {
167 r = virt->ops->reset_gpu(adev);
171 adev->virt.caps &= ~AMDGPU_SRIOV_CAPS_RUNTIME;
177 void amdgpu_virt_request_init_data(struct amdgpu_device *adev)
179 struct amdgpu_virt *virt = &adev->virt;
181 if (virt->ops && virt->ops->req_init_data)
182 virt->ops->req_init_data(adev);
184 if (adev->virt.req_init_data_ver > 0)
185 DRM_INFO("host supports REQ_INIT_DATA handshake\n");
187 DRM_WARN("host doesn't support REQ_INIT_DATA handshake\n");
191 * amdgpu_virt_wait_reset() - wait for reset gpu completed
192 * @adev: amdgpu device.
193 * Wait for GPU reset completed.
194 * Return: Zero if reset success, otherwise will return error.
196 int amdgpu_virt_wait_reset(struct amdgpu_device *adev)
198 struct amdgpu_virt *virt = &adev->virt;
200 if (!virt->ops || !virt->ops->wait_reset)
203 return virt->ops->wait_reset(adev);
207 * amdgpu_virt_alloc_mm_table() - alloc memory for mm table
208 * @adev: amdgpu device.
209 * MM table is used by UVD and VCE for its initialization
210 * Return: Zero if allocate success.
212 int amdgpu_virt_alloc_mm_table(struct amdgpu_device *adev)
216 if (!amdgpu_sriov_vf(adev) || adev->virt.mm_table.gpu_addr)
219 r = amdgpu_bo_create_kernel(adev, PAGE_SIZE, PAGE_SIZE,
220 AMDGPU_GEM_DOMAIN_VRAM,
221 &adev->virt.mm_table.bo,
222 &adev->virt.mm_table.gpu_addr,
223 (void *)&adev->virt.mm_table.cpu_addr);
225 DRM_ERROR("failed to alloc mm table and error = %d.\n", r);
229 memset((void *)adev->virt.mm_table.cpu_addr, 0, PAGE_SIZE);
230 DRM_INFO("MM table gpu addr = 0x%llx, cpu addr = %p.\n",
231 adev->virt.mm_table.gpu_addr,
232 adev->virt.mm_table.cpu_addr);
237 * amdgpu_virt_free_mm_table() - free mm table memory
238 * @adev: amdgpu device.
239 * Free MM table memory
241 void amdgpu_virt_free_mm_table(struct amdgpu_device *adev)
243 if (!amdgpu_sriov_vf(adev) || !adev->virt.mm_table.gpu_addr)
246 amdgpu_bo_free_kernel(&adev->virt.mm_table.bo,
247 &adev->virt.mm_table.gpu_addr,
248 (void *)&adev->virt.mm_table.cpu_addr);
249 adev->virt.mm_table.gpu_addr = 0;
253 unsigned int amd_sriov_msg_checksum(void *obj,
254 unsigned long obj_size,
256 unsigned int checksum)
258 unsigned int ret = key;
263 /* calculate checksum */
264 for (i = 0; i < obj_size; ++i)
266 /* minus the checksum itself */
267 pos = (char *)&checksum;
268 for (i = 0; i < sizeof(checksum); ++i)
273 static int amdgpu_virt_init_ras_err_handler_data(struct amdgpu_device *adev)
275 struct amdgpu_virt *virt = &adev->virt;
276 struct amdgpu_virt_ras_err_handler_data **data = &virt->virt_eh_data;
277 /* GPU will be marked bad on host if bp count more then 10,
278 * so alloc 512 is enough.
280 unsigned int align_space = 512;
282 struct amdgpu_bo **bps_bo = NULL;
284 *data = kmalloc(sizeof(struct amdgpu_virt_ras_err_handler_data), GFP_KERNEL);
288 bps = kmalloc_array(align_space, sizeof((*data)->bps), GFP_KERNEL);
292 bps_bo = kmalloc_array(align_space, sizeof((*data)->bps_bo), GFP_KERNEL);
297 (*data)->bps_bo = bps_bo;
299 (*data)->last_reserved = 0;
301 virt->ras_init_done = true;
313 static void amdgpu_virt_ras_release_bp(struct amdgpu_device *adev)
315 struct amdgpu_virt *virt = &adev->virt;
316 struct amdgpu_virt_ras_err_handler_data *data = virt->virt_eh_data;
317 struct amdgpu_bo *bo;
323 for (i = data->last_reserved - 1; i >= 0; i--) {
324 bo = data->bps_bo[i];
325 amdgpu_bo_free_kernel(&bo, NULL, NULL);
326 data->bps_bo[i] = bo;
327 data->last_reserved = i;
331 void amdgpu_virt_release_ras_err_handler_data(struct amdgpu_device *adev)
333 struct amdgpu_virt *virt = &adev->virt;
334 struct amdgpu_virt_ras_err_handler_data *data = virt->virt_eh_data;
336 virt->ras_init_done = false;
341 amdgpu_virt_ras_release_bp(adev);
346 virt->virt_eh_data = NULL;
349 static void amdgpu_virt_ras_add_bps(struct amdgpu_device *adev,
350 struct eeprom_table_record *bps, int pages)
352 struct amdgpu_virt *virt = &adev->virt;
353 struct amdgpu_virt_ras_err_handler_data *data = virt->virt_eh_data;
358 memcpy(&data->bps[data->count], bps, pages * sizeof(*data->bps));
359 data->count += pages;
362 static void amdgpu_virt_ras_reserve_bps(struct amdgpu_device *adev)
364 struct amdgpu_virt *virt = &adev->virt;
365 struct amdgpu_virt_ras_err_handler_data *data = virt->virt_eh_data;
366 struct amdgpu_bo *bo = NULL;
373 for (i = data->last_reserved; i < data->count; i++) {
374 bp = data->bps[i].retired_page;
376 /* There are two cases of reserve error should be ignored:
377 * 1) a ras bad page has been allocated (used by someone);
378 * 2) a ras bad page has been reserved (duplicate error injection
381 if (amdgpu_bo_create_kernel_at(adev, bp << AMDGPU_GPU_PAGE_SHIFT,
382 AMDGPU_GPU_PAGE_SIZE,
383 AMDGPU_GEM_DOMAIN_VRAM,
385 DRM_DEBUG("RAS WARN: reserve vram for retired page %llx fail\n", bp);
387 data->bps_bo[i] = bo;
388 data->last_reserved = i + 1;
393 static bool amdgpu_virt_ras_check_bad_page(struct amdgpu_device *adev,
394 uint64_t retired_page)
396 struct amdgpu_virt *virt = &adev->virt;
397 struct amdgpu_virt_ras_err_handler_data *data = virt->virt_eh_data;
403 for (i = 0; i < data->count; i++)
404 if (retired_page == data->bps[i].retired_page)
410 static void amdgpu_virt_add_bad_page(struct amdgpu_device *adev,
411 uint64_t bp_block_offset, uint32_t bp_block_size)
413 struct eeprom_table_record bp;
414 uint64_t retired_page;
415 uint32_t bp_idx, bp_cnt;
418 bp_cnt = bp_block_size / sizeof(uint64_t);
419 for (bp_idx = 0; bp_idx < bp_cnt; bp_idx++) {
420 retired_page = *(uint64_t *)(adev->mman.fw_vram_usage_va +
421 bp_block_offset + bp_idx * sizeof(uint64_t));
422 bp.retired_page = retired_page;
424 if (amdgpu_virt_ras_check_bad_page(adev, retired_page))
427 amdgpu_virt_ras_add_bps(adev, &bp, 1);
429 amdgpu_virt_ras_reserve_bps(adev);
434 static int amdgpu_virt_read_pf2vf_data(struct amdgpu_device *adev)
436 struct amd_sriov_msg_pf2vf_info_header *pf2vf_info = adev->virt.fw_reserve.p_pf2vf;
443 if (adev->virt.fw_reserve.p_pf2vf == NULL)
446 if (pf2vf_info->size > 1024) {
447 DRM_ERROR("invalid pf2vf message size\n");
451 switch (pf2vf_info->version) {
453 checksum = ((struct amdgim_pf2vf_info_v1 *)pf2vf_info)->checksum;
454 checkval = amd_sriov_msg_checksum(
455 adev->virt.fw_reserve.p_pf2vf, pf2vf_info->size,
456 adev->virt.fw_reserve.checksum_key, checksum);
457 if (checksum != checkval) {
458 DRM_ERROR("invalid pf2vf message\n");
462 adev->virt.gim_feature =
463 ((struct amdgim_pf2vf_info_v1 *)pf2vf_info)->feature_flags;
466 /* TODO: missing key, need to add it later */
467 checksum = ((struct amd_sriov_msg_pf2vf_info *)pf2vf_info)->checksum;
468 checkval = amd_sriov_msg_checksum(
469 adev->virt.fw_reserve.p_pf2vf, pf2vf_info->size,
471 if (checksum != checkval) {
472 DRM_ERROR("invalid pf2vf message\n");
476 adev->virt.vf2pf_update_interval_ms =
477 ((struct amd_sriov_msg_pf2vf_info *)pf2vf_info)->vf2pf_update_interval_ms;
478 adev->virt.gim_feature =
479 ((struct amd_sriov_msg_pf2vf_info *)pf2vf_info)->feature_flags.all;
480 adev->virt.reg_access =
481 ((struct amd_sriov_msg_pf2vf_info *)pf2vf_info)->reg_access_flags.all;
483 adev->virt.decode_max_dimension_pixels = 0;
484 adev->virt.decode_max_frame_pixels = 0;
485 adev->virt.encode_max_dimension_pixels = 0;
486 adev->virt.encode_max_frame_pixels = 0;
487 adev->virt.is_mm_bw_enabled = false;
488 for (i = 0; i < AMD_SRIOV_MSG_RESERVE_VCN_INST; i++) {
489 tmp = ((struct amd_sriov_msg_pf2vf_info *)pf2vf_info)->mm_bw_management[i].decode_max_dimension_pixels;
490 adev->virt.decode_max_dimension_pixels = max(tmp, adev->virt.decode_max_dimension_pixels);
492 tmp = ((struct amd_sriov_msg_pf2vf_info *)pf2vf_info)->mm_bw_management[i].decode_max_frame_pixels;
493 adev->virt.decode_max_frame_pixels = max(tmp, adev->virt.decode_max_frame_pixels);
495 tmp = ((struct amd_sriov_msg_pf2vf_info *)pf2vf_info)->mm_bw_management[i].encode_max_dimension_pixels;
496 adev->virt.encode_max_dimension_pixels = max(tmp, adev->virt.encode_max_dimension_pixels);
498 tmp = ((struct amd_sriov_msg_pf2vf_info *)pf2vf_info)->mm_bw_management[i].encode_max_frame_pixels;
499 adev->virt.encode_max_frame_pixels = max(tmp, adev->virt.encode_max_frame_pixels);
501 if((adev->virt.decode_max_dimension_pixels > 0) || (adev->virt.encode_max_dimension_pixels > 0))
502 adev->virt.is_mm_bw_enabled = true;
505 ((struct amd_sriov_msg_pf2vf_info *)pf2vf_info)->uuid;
508 DRM_ERROR("invalid pf2vf version\n");
512 /* correct too large or too little interval value */
513 if (adev->virt.vf2pf_update_interval_ms < 200 || adev->virt.vf2pf_update_interval_ms > 10000)
514 adev->virt.vf2pf_update_interval_ms = 2000;
519 static void amdgpu_virt_populate_vf2pf_ucode_info(struct amdgpu_device *adev)
521 struct amd_sriov_msg_vf2pf_info *vf2pf_info;
522 vf2pf_info = (struct amd_sriov_msg_vf2pf_info *) adev->virt.fw_reserve.p_vf2pf;
524 if (adev->virt.fw_reserve.p_vf2pf == NULL)
527 POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_VCE, adev->vce.fw_version);
528 POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_UVD, adev->uvd.fw_version);
529 POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_MC, adev->gmc.fw_version);
530 POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_ME, adev->gfx.me_fw_version);
531 POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_PFP, adev->gfx.pfp_fw_version);
532 POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_CE, adev->gfx.ce_fw_version);
533 POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_RLC, adev->gfx.rlc_fw_version);
534 POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_RLC_SRLC, adev->gfx.rlc_srlc_fw_version);
535 POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_RLC_SRLG, adev->gfx.rlc_srlg_fw_version);
536 POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_RLC_SRLS, adev->gfx.rlc_srls_fw_version);
537 POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_MEC, adev->gfx.mec_fw_version);
538 POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_MEC2, adev->gfx.mec2_fw_version);
539 POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_SOS, adev->psp.sos.fw_version);
540 POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_ASD,
541 adev->psp.asd_context.bin_desc.fw_version);
542 POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_TA_RAS,
543 adev->psp.ras_context.context.bin_desc.fw_version);
544 POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_TA_XGMI,
545 adev->psp.xgmi_context.context.bin_desc.fw_version);
546 POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_SMC, adev->pm.fw_version);
547 POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_SDMA, adev->sdma.instance[0].fw_version);
548 POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_SDMA2, adev->sdma.instance[1].fw_version);
549 POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_VCN, adev->vcn.fw_version);
550 POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_DMCU, adev->dm.dmcu_fw_version);
553 static int amdgpu_virt_write_vf2pf_data(struct amdgpu_device *adev)
555 struct amd_sriov_msg_vf2pf_info *vf2pf_info;
557 vf2pf_info = (struct amd_sriov_msg_vf2pf_info *) adev->virt.fw_reserve.p_vf2pf;
559 if (adev->virt.fw_reserve.p_vf2pf == NULL)
562 memset(vf2pf_info, 0, sizeof(struct amd_sriov_msg_vf2pf_info));
564 vf2pf_info->header.size = sizeof(struct amd_sriov_msg_vf2pf_info);
565 vf2pf_info->header.version = AMD_SRIOV_MSG_FW_VRAM_VF2PF_VER;
568 if (THIS_MODULE->version != NULL)
569 strcpy(vf2pf_info->driver_version, THIS_MODULE->version);
572 strcpy(vf2pf_info->driver_version, "N/A");
574 vf2pf_info->pf2vf_version_required = 0; // no requirement, guest understands all
575 vf2pf_info->driver_cert = 0;
576 vf2pf_info->os_info.all = 0;
578 vf2pf_info->fb_usage =
579 ttm_resource_manager_usage(&adev->mman.vram_mgr.manager) >> 20;
580 vf2pf_info->fb_vis_usage =
581 amdgpu_vram_mgr_vis_usage(&adev->mman.vram_mgr) >> 20;
582 vf2pf_info->fb_size = adev->gmc.real_vram_size >> 20;
583 vf2pf_info->fb_vis_size = adev->gmc.visible_vram_size >> 20;
585 amdgpu_virt_populate_vf2pf_ucode_info(adev);
587 /* TODO: read dynamic info */
588 vf2pf_info->gfx_usage = 0;
589 vf2pf_info->compute_usage = 0;
590 vf2pf_info->encode_usage = 0;
591 vf2pf_info->decode_usage = 0;
593 vf2pf_info->dummy_page_addr = (uint64_t)adev->dummy_page_addr;
594 vf2pf_info->checksum =
595 amd_sriov_msg_checksum(
596 vf2pf_info, vf2pf_info->header.size, 0, 0);
601 static void amdgpu_virt_update_vf2pf_work_item(struct work_struct *work)
603 struct amdgpu_device *adev = container_of(work, struct amdgpu_device, virt.vf2pf_work.work);
606 ret = amdgpu_virt_read_pf2vf_data(adev);
609 amdgpu_virt_write_vf2pf_data(adev);
612 schedule_delayed_work(&(adev->virt.vf2pf_work), adev->virt.vf2pf_update_interval_ms);
615 void amdgpu_virt_fini_data_exchange(struct amdgpu_device *adev)
617 if (adev->virt.vf2pf_update_interval_ms != 0) {
618 DRM_INFO("clean up the vf2pf work item\n");
619 cancel_delayed_work_sync(&adev->virt.vf2pf_work);
620 adev->virt.vf2pf_update_interval_ms = 0;
624 void amdgpu_virt_init_data_exchange(struct amdgpu_device *adev)
626 adev->virt.fw_reserve.p_pf2vf = NULL;
627 adev->virt.fw_reserve.p_vf2pf = NULL;
628 adev->virt.vf2pf_update_interval_ms = 0;
630 if (adev->mman.fw_vram_usage_va != NULL) {
631 /* go through this logic in ip_init and reset to init workqueue*/
632 amdgpu_virt_exchange_data(adev);
634 INIT_DELAYED_WORK(&adev->virt.vf2pf_work, amdgpu_virt_update_vf2pf_work_item);
635 schedule_delayed_work(&(adev->virt.vf2pf_work), msecs_to_jiffies(adev->virt.vf2pf_update_interval_ms));
636 } else if (adev->bios != NULL) {
637 /* got through this logic in early init stage to get necessary flags, e.g. rlcg_acc related*/
638 adev->virt.fw_reserve.p_pf2vf =
639 (struct amd_sriov_msg_pf2vf_info_header *)
640 (adev->bios + (AMD_SRIOV_MSG_PF2VF_OFFSET_KB << 10));
642 amdgpu_virt_read_pf2vf_data(adev);
647 void amdgpu_virt_exchange_data(struct amdgpu_device *adev)
649 uint64_t bp_block_offset = 0;
650 uint32_t bp_block_size = 0;
651 struct amd_sriov_msg_pf2vf_info *pf2vf_v2 = NULL;
653 if (adev->mman.fw_vram_usage_va != NULL) {
655 adev->virt.fw_reserve.p_pf2vf =
656 (struct amd_sriov_msg_pf2vf_info_header *)
657 (adev->mman.fw_vram_usage_va + (AMD_SRIOV_MSG_PF2VF_OFFSET_KB << 10));
658 adev->virt.fw_reserve.p_vf2pf =
659 (struct amd_sriov_msg_vf2pf_info_header *)
660 (adev->mman.fw_vram_usage_va + (AMD_SRIOV_MSG_VF2PF_OFFSET_KB << 10));
662 amdgpu_virt_read_pf2vf_data(adev);
663 amdgpu_virt_write_vf2pf_data(adev);
665 /* bad page handling for version 2 */
666 if (adev->virt.fw_reserve.p_pf2vf->version == 2) {
667 pf2vf_v2 = (struct amd_sriov_msg_pf2vf_info *)adev->virt.fw_reserve.p_pf2vf;
669 bp_block_offset = ((uint64_t)pf2vf_v2->bp_block_offset_low & 0xFFFFFFFF) |
670 ((((uint64_t)pf2vf_v2->bp_block_offset_high) << 32) & 0xFFFFFFFF00000000);
671 bp_block_size = pf2vf_v2->bp_block_size;
673 if (bp_block_size && !adev->virt.ras_init_done)
674 amdgpu_virt_init_ras_err_handler_data(adev);
676 if (adev->virt.ras_init_done)
677 amdgpu_virt_add_bad_page(adev, bp_block_offset, bp_block_size);
683 void amdgpu_detect_virtualization(struct amdgpu_device *adev)
687 switch (adev->asic_type) {
690 reg = RREG32(mmBIF_IOV_FUNC_IDENTIFIER);
696 case CHIP_SIENNA_CICHLID:
699 reg = RREG32(mmRCC_IOV_FUNC_IDENTIFIER);
701 default: /* other chip doesn't support SRIOV */
707 adev->virt.caps |= AMDGPU_SRIOV_CAPS_IS_VF;
709 if (reg & 0x80000000)
710 adev->virt.caps |= AMDGPU_SRIOV_CAPS_ENABLE_IOV;
713 if (is_virtual_machine()) /* passthrough mode exclus sriov mod */
714 adev->virt.caps |= AMDGPU_PASSTHROUGH_MODE;
717 /* we have the ability to check now */
718 if (amdgpu_sriov_vf(adev)) {
719 switch (adev->asic_type) {
722 vi_set_virt_ops(adev);
725 soc15_set_virt_ops(adev);
726 /* send a dummy GPU_INIT_DATA request to host on vega10 */
727 amdgpu_virt_request_init_data(adev);
732 soc15_set_virt_ops(adev);
736 case CHIP_SIENNA_CICHLID:
737 nv_set_virt_ops(adev);
738 /* try send GPU_INIT_DATA request to host */
739 amdgpu_virt_request_init_data(adev);
741 default: /* other chip doesn't support SRIOV */
742 DRM_ERROR("Unknown asic type: %d!\n", adev->asic_type);
748 static bool amdgpu_virt_access_debugfs_is_mmio(struct amdgpu_device *adev)
750 return amdgpu_sriov_is_debug(adev) ? true : false;
753 static bool amdgpu_virt_access_debugfs_is_kiq(struct amdgpu_device *adev)
755 return amdgpu_sriov_is_normal(adev) ? true : false;
758 int amdgpu_virt_enable_access_debugfs(struct amdgpu_device *adev)
760 if (!amdgpu_sriov_vf(adev) ||
761 amdgpu_virt_access_debugfs_is_kiq(adev))
764 if (amdgpu_virt_access_debugfs_is_mmio(adev))
765 adev->virt.caps &= ~AMDGPU_SRIOV_CAPS_RUNTIME;
772 void amdgpu_virt_disable_access_debugfs(struct amdgpu_device *adev)
774 if (amdgpu_sriov_vf(adev))
775 adev->virt.caps |= AMDGPU_SRIOV_CAPS_RUNTIME;
778 enum amdgpu_sriov_vf_mode amdgpu_virt_get_sriov_vf_mode(struct amdgpu_device *adev)
780 enum amdgpu_sriov_vf_mode mode;
782 if (amdgpu_sriov_vf(adev)) {
783 if (amdgpu_sriov_is_pp_one_vf(adev))
784 mode = SRIOV_VF_MODE_ONE_VF;
786 mode = SRIOV_VF_MODE_MULTI_VF;
788 mode = SRIOV_VF_MODE_BARE_METAL;
794 void amdgpu_virt_update_sriov_video_codec(struct amdgpu_device *adev,
795 struct amdgpu_video_codec_info *encode, uint32_t encode_array_size,
796 struct amdgpu_video_codec_info *decode, uint32_t decode_array_size)
800 if (!adev->virt.is_mm_bw_enabled)
804 for (i = 0; i < encode_array_size; i++) {
805 encode[i].max_width = adev->virt.encode_max_dimension_pixels;
806 encode[i].max_pixels_per_frame = adev->virt.encode_max_frame_pixels;
807 if (encode[i].max_width > 0)
808 encode[i].max_height = encode[i].max_pixels_per_frame / encode[i].max_width;
810 encode[i].max_height = 0;
815 for (i = 0; i < decode_array_size; i++) {
816 decode[i].max_width = adev->virt.decode_max_dimension_pixels;
817 decode[i].max_pixels_per_frame = adev->virt.decode_max_frame_pixels;
818 if (decode[i].max_width > 0)
819 decode[i].max_height = decode[i].max_pixels_per_frame / decode[i].max_width;
821 decode[i].max_height = 0;
826 static bool amdgpu_virt_get_rlcg_reg_access_flag(struct amdgpu_device *adev,
827 u32 acc_flags, u32 hwip,
828 bool write, u32 *rlcg_flag)
834 if (amdgpu_sriov_reg_indirect_gc(adev)) {
836 write ? AMDGPU_RLCG_GC_WRITE : AMDGPU_RLCG_GC_READ;
838 /* only in new version, AMDGPU_REGS_NO_KIQ and
839 * AMDGPU_REGS_RLC are enabled simultaneously */
840 } else if ((acc_flags & AMDGPU_REGS_RLC) &&
841 !(acc_flags & AMDGPU_REGS_NO_KIQ) && write) {
842 *rlcg_flag = AMDGPU_RLCG_GC_WRITE_LEGACY;
847 if (amdgpu_sriov_reg_indirect_mmhub(adev) &&
848 (acc_flags & AMDGPU_REGS_RLC) && write) {
849 *rlcg_flag = AMDGPU_RLCG_MMHUB_WRITE;
859 static u32 amdgpu_virt_rlcg_reg_rw(struct amdgpu_device *adev, u32 offset, u32 v, u32 flag)
861 struct amdgpu_rlcg_reg_access_ctrl *reg_access_ctrl;
862 uint32_t timeout = 50000;
865 static void *scratch_reg0;
866 static void *scratch_reg1;
867 static void *scratch_reg2;
868 static void *scratch_reg3;
869 static void *spare_int;
871 if (!adev->gfx.rlc.rlcg_reg_access_supported) {
873 "indirect registers access through rlcg is not available\n");
877 reg_access_ctrl = &adev->gfx.rlc.reg_access_ctrl;
878 scratch_reg0 = (void __iomem *)adev->rmmio + 4 * reg_access_ctrl->scratch_reg0;
879 scratch_reg1 = (void __iomem *)adev->rmmio + 4 * reg_access_ctrl->scratch_reg1;
880 scratch_reg2 = (void __iomem *)adev->rmmio + 4 * reg_access_ctrl->scratch_reg2;
881 scratch_reg3 = (void __iomem *)adev->rmmio + 4 * reg_access_ctrl->scratch_reg3;
882 if (reg_access_ctrl->spare_int)
883 spare_int = (void __iomem *)adev->rmmio + 4 * reg_access_ctrl->spare_int;
885 if (offset == reg_access_ctrl->grbm_cntl) {
886 /* if the target reg offset is grbm_cntl, write to scratch_reg2 */
887 writel(v, scratch_reg2);
888 writel(v, ((void __iomem *)adev->rmmio) + (offset * 4));
889 } else if (offset == reg_access_ctrl->grbm_idx) {
890 /* if the target reg offset is grbm_idx, write to scratch_reg3 */
891 writel(v, scratch_reg3);
892 writel(v, ((void __iomem *)adev->rmmio) + (offset * 4));
895 * SCRATCH_REG0 = read/write value
896 * SCRATCH_REG1[30:28] = command
897 * SCRATCH_REG1[19:0] = address in dword
898 * SCRATCH_REG1[26:24] = Error reporting
900 writel(v, scratch_reg0);
901 writel((offset | flag), scratch_reg1);
902 if (reg_access_ctrl->spare_int)
903 writel(1, spare_int);
905 for (i = 0; i < timeout; i++) {
906 tmp = readl(scratch_reg1);
907 if (!(tmp & AMDGPU_RLCG_SCRATCH1_ADDRESS_MASK))
913 if (amdgpu_sriov_rlcg_error_report_enabled(adev)) {
914 if (tmp & AMDGPU_RLCG_VFGATE_DISABLED) {
916 "vfgate is disabled, rlcg failed to program reg: 0x%05x\n", offset);
917 } else if (tmp & AMDGPU_RLCG_WRONG_OPERATION_TYPE) {
919 "wrong operation type, rlcg failed to program reg: 0x%05x\n", offset);
920 } else if (tmp & AMDGPU_RLCG_REG_NOT_IN_RANGE) {
922 "regiser is not in range, rlcg failed to program reg: 0x%05x\n", offset);
925 "unknown error type, rlcg failed to program reg: 0x%05x\n", offset);
929 "timeout: rlcg faled to program reg: 0x%05x\n", offset);
934 ret = readl(scratch_reg0);
938 void amdgpu_sriov_wreg(struct amdgpu_device *adev,
939 u32 offset, u32 value,
940 u32 acc_flags, u32 hwip)
944 if (!amdgpu_sriov_runtime(adev) &&
945 amdgpu_virt_get_rlcg_reg_access_flag(adev, acc_flags, hwip, true, &rlcg_flag)) {
946 amdgpu_virt_rlcg_reg_rw(adev, offset, value, rlcg_flag);
950 if (acc_flags & AMDGPU_REGS_NO_KIQ)
951 WREG32_NO_KIQ(offset, value);
953 WREG32(offset, value);
956 u32 amdgpu_sriov_rreg(struct amdgpu_device *adev,
957 u32 offset, u32 acc_flags, u32 hwip)
961 if (!amdgpu_sriov_runtime(adev) &&
962 amdgpu_virt_get_rlcg_reg_access_flag(adev, acc_flags, hwip, false, &rlcg_flag))
963 return amdgpu_virt_rlcg_reg_rw(adev, offset, 0, rlcg_flag);
965 if (acc_flags & AMDGPU_REGS_NO_KIQ)
966 return RREG32_NO_KIQ(offset);
968 return RREG32(offset);