drm/amdgpu/gfx: enable mes to map legacy queue support
authorJack Xiao <Jack.Xiao@amd.com>
Thu, 25 Apr 2024 07:28:48 +0000 (15:28 +0800)
committerAlex Deucher <alexander.deucher@amd.com>
Tue, 30 Apr 2024 14:00:01 +0000 (10:00 -0400)
Enable mes to map legacy queue support.

v2: kiq_set_resources is required.

Signed-off-by: Jack Xiao <Jack.Xiao@amd.com>
Reviewed-by: Hawking Zhang <Hawking.Zhang@amd.com>
Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c

index 1d955652f3ba6e00bc57b71d11fded470754cd1f..f5a0d96cd29c06ed0624bd163c04fb440a316a4d 100644 (file)
@@ -623,10 +623,14 @@ int amdgpu_gfx_enable_kcq(struct amdgpu_device *adev, int xcc_id)
                queue_mask |= (1ull << amdgpu_queue_mask_bit_to_set_resource_bit(adev, i));
        }
 
-       DRM_INFO("kiq ring mec %d pipe %d q %d\n", kiq_ring->me, kiq_ring->pipe,
-                                                       kiq_ring->queue);
        amdgpu_device_flush_hdp(adev, NULL);
 
+       if (adev->enable_mes)
+               queue_mask = ~0ULL;
+
+       DRM_INFO("kiq ring mec %d pipe %d q %d\n", kiq_ring->me, kiq_ring->pipe,
+                kiq_ring->queue);
+
        spin_lock(&kiq->ring_lock);
        r = amdgpu_ring_alloc(kiq_ring, kiq->pmf->map_queues_size *
                                        adev->gfx.num_compute_rings +
@@ -637,14 +641,14 @@ int amdgpu_gfx_enable_kcq(struct amdgpu_device *adev, int xcc_id)
                return r;
        }
 
-       if (adev->enable_mes)
-               queue_mask = ~0ULL;
-
        kiq->pmf->kiq_set_resources(kiq_ring, queue_mask);
-       for (i = 0; i < adev->gfx.num_compute_rings; i++) {
-               j = i + xcc_id * adev->gfx.num_compute_rings;
-               kiq->pmf->kiq_map_queues(kiq_ring,
-                                        &adev->gfx.compute_ring[j]);
+
+       if (!adev->enable_mes) {
+               for (i = 0; i < adev->gfx.num_compute_rings; i++) {
+                       j = i + xcc_id * adev->gfx.num_compute_rings;
+                       kiq->pmf->kiq_map_queues(kiq_ring,
+                                                &adev->gfx.compute_ring[j]);
+               }
        }
 
        r = amdgpu_ring_test_helper(kiq_ring);
@@ -652,6 +656,20 @@ int amdgpu_gfx_enable_kcq(struct amdgpu_device *adev, int xcc_id)
        if (r)
                DRM_ERROR("KCQ enable failed\n");
 
+       if (adev->enable_mes) {
+               for (i = 0; i < adev->gfx.num_compute_rings; i++) {
+                       j = i + xcc_id * adev->gfx.num_compute_rings;
+                       r = amdgpu_mes_map_legacy_queue(adev,
+                                              &adev->gfx.compute_ring[j]);
+                       if (r) {
+                               DRM_ERROR("failed to map compute queue\n");
+                               return r;
+                       }
+               }
+
+               return 0;
+       }
+
        return r;
 }
 
@@ -666,6 +684,20 @@ int amdgpu_gfx_enable_kgq(struct amdgpu_device *adev, int xcc_id)
 
        amdgpu_device_flush_hdp(adev, NULL);
 
+       if (adev->enable_mes) {
+               for (i = 0; i < adev->gfx.num_gfx_rings; i++) {
+                       j = i + xcc_id * adev->gfx.num_gfx_rings;
+                       r = amdgpu_mes_map_legacy_queue(adev,
+                                                       &adev->gfx.gfx_ring[j]);
+                       if (r) {
+                               DRM_ERROR("failed to map gfx queue\n");
+                               return r;
+                       }
+               }
+
+               return 0;
+       }
+
        spin_lock(&kiq->ring_lock);
        /* No need to map kcq on the slave */
        if (amdgpu_gfx_is_master_xcc(adev, xcc_id)) {