From: Jack Xiao Date: Thu, 25 Apr 2024 07:28:48 +0000 (+0800) Subject: drm/amdgpu/gfx: enable mes to map legacy queue support X-Git-Tag: io_uring-6.11-20240722~49^2~25^2~505 X-Git-Url: https://git.kernel.dk/?a=commitdiff_plain;h=f9d8c5c7855d8f3e4c3e678777d02a49046eafb0;p=linux-2.6-block.git drm/amdgpu/gfx: enable mes to map legacy queue support Enable mes to map legacy queue support. v2: kiq_set_resources is required. Signed-off-by: Jack Xiao Reviewed-by: Hawking Zhang Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c index 1d955652f3ba..f5a0d96cd29c 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c @@ -623,10 +623,14 @@ int amdgpu_gfx_enable_kcq(struct amdgpu_device *adev, int xcc_id) queue_mask |= (1ull << amdgpu_queue_mask_bit_to_set_resource_bit(adev, i)); } - DRM_INFO("kiq ring mec %d pipe %d q %d\n", kiq_ring->me, kiq_ring->pipe, - kiq_ring->queue); amdgpu_device_flush_hdp(adev, NULL); + if (adev->enable_mes) + queue_mask = ~0ULL; + + DRM_INFO("kiq ring mec %d pipe %d q %d\n", kiq_ring->me, kiq_ring->pipe, + kiq_ring->queue); + spin_lock(&kiq->ring_lock); r = amdgpu_ring_alloc(kiq_ring, kiq->pmf->map_queues_size * adev->gfx.num_compute_rings + @@ -637,14 +641,14 @@ int amdgpu_gfx_enable_kcq(struct amdgpu_device *adev, int xcc_id) return r; } - if (adev->enable_mes) - queue_mask = ~0ULL; - kiq->pmf->kiq_set_resources(kiq_ring, queue_mask); - for (i = 0; i < adev->gfx.num_compute_rings; i++) { - j = i + xcc_id * adev->gfx.num_compute_rings; - kiq->pmf->kiq_map_queues(kiq_ring, - &adev->gfx.compute_ring[j]); + + if (!adev->enable_mes) { + for (i = 0; i < adev->gfx.num_compute_rings; i++) { + j = i + xcc_id * adev->gfx.num_compute_rings; + kiq->pmf->kiq_map_queues(kiq_ring, + &adev->gfx.compute_ring[j]); + } } r = amdgpu_ring_test_helper(kiq_ring); @@ -652,6 +656,20 @@ int amdgpu_gfx_enable_kcq(struct amdgpu_device *adev, int xcc_id) if (r) DRM_ERROR("KCQ enable failed\n"); + if (adev->enable_mes) { + for (i = 0; i < adev->gfx.num_compute_rings; i++) { + j = i + xcc_id * adev->gfx.num_compute_rings; + r = amdgpu_mes_map_legacy_queue(adev, + &adev->gfx.compute_ring[j]); + if (r) { + DRM_ERROR("failed to map compute queue\n"); + return r; + } + } + + return 0; + } + return r; } @@ -666,6 +684,20 @@ int amdgpu_gfx_enable_kgq(struct amdgpu_device *adev, int xcc_id) amdgpu_device_flush_hdp(adev, NULL); + if (adev->enable_mes) { + for (i = 0; i < adev->gfx.num_gfx_rings; i++) { + j = i + xcc_id * adev->gfx.num_gfx_rings; + r = amdgpu_mes_map_legacy_queue(adev, + &adev->gfx.gfx_ring[j]); + if (r) { + DRM_ERROR("failed to map gfx queue\n"); + return r; + } + } + + return 0; + } + spin_lock(&kiq->ring_lock); /* No need to map kcq on the slave */ if (amdgpu_gfx_is_master_xcc(adev, xcc_id)) {