drm/amdgpu/userq: optimize enforce isolation and s/r
authorAlex Deucher <alexander.deucher@amd.com>
Wed, 16 Apr 2025 20:59:08 +0000 (16:59 -0400)
committerAlex Deucher <alexander.deucher@amd.com>
Tue, 22 Apr 2025 12:51:45 +0000 (08:51 -0400)
If user queues are disabled for all IPs in the case
of suspend and resume and for gfx/compute in the case
of enforce isolation, we can return early.

Reviewed-by: Prike Liang <Prike.Liang@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
drivers/gpu/drm/amd/amdgpu/amdgpu_userqueue.c

index 59488acd89fa8e0187d17428b0fc445d01c7b4dd..17bf2d568ae2c4c1b5843a03fa42e313af3f0f25 100644 (file)
@@ -759,12 +759,16 @@ void amdgpu_userq_mgr_fini(struct amdgpu_userq_mgr *userq_mgr)
 
 int amdgpu_userq_suspend(struct amdgpu_device *adev)
 {
+       u32 ip_mask = amdgpu_userqueue_get_supported_ip_mask(adev);
        const struct amdgpu_userq_funcs *userq_funcs;
        struct amdgpu_usermode_queue *queue;
        struct amdgpu_userq_mgr *uqm, *tmp;
        int queue_id;
        int ret = 0;
 
+       if (!ip_mask)
+               return 0;
+
        mutex_lock(&adev->userq_mutex);
        list_for_each_entry_safe(uqm, tmp, &adev->userq_mgr_list, list) {
                cancel_delayed_work_sync(&uqm->resume_work);
@@ -779,12 +783,16 @@ int amdgpu_userq_suspend(struct amdgpu_device *adev)
 
 int amdgpu_userq_resume(struct amdgpu_device *adev)
 {
+       u32 ip_mask = amdgpu_userqueue_get_supported_ip_mask(adev);
        const struct amdgpu_userq_funcs *userq_funcs;
        struct amdgpu_usermode_queue *queue;
        struct amdgpu_userq_mgr *uqm, *tmp;
        int queue_id;
        int ret = 0;
 
+       if (!ip_mask)
+               return 0;
+
        mutex_lock(&adev->userq_mutex);
        list_for_each_entry_safe(uqm, tmp, &adev->userq_mgr_list, list) {
                idr_for_each_entry(&uqm->userq_idr, queue, queue_id) {
@@ -799,12 +807,17 @@ int amdgpu_userq_resume(struct amdgpu_device *adev)
 int amdgpu_userq_stop_sched_for_enforce_isolation(struct amdgpu_device *adev,
                                                  u32 idx)
 {
+       u32 ip_mask = amdgpu_userqueue_get_supported_ip_mask(adev);
        const struct amdgpu_userq_funcs *userq_funcs;
        struct amdgpu_usermode_queue *queue;
        struct amdgpu_userq_mgr *uqm, *tmp;
        int queue_id;
        int ret = 0;
 
+       /* only need to stop gfx/compute */
+       if (!(ip_mask & ((1 << AMDGPU_HW_IP_GFX) | (1 << AMDGPU_HW_IP_COMPUTE))))
+               return 0;
+
        mutex_lock(&adev->userq_mutex);
        if (adev->userq_halt_for_enforce_isolation)
                dev_warn(adev->dev, "userq scheduling already stopped!\n");
@@ -827,12 +840,17 @@ int amdgpu_userq_stop_sched_for_enforce_isolation(struct amdgpu_device *adev,
 int amdgpu_userq_start_sched_for_enforce_isolation(struct amdgpu_device *adev,
                                                   u32 idx)
 {
+       u32 ip_mask = amdgpu_userqueue_get_supported_ip_mask(adev);
        const struct amdgpu_userq_funcs *userq_funcs;
        struct amdgpu_usermode_queue *queue;
        struct amdgpu_userq_mgr *uqm, *tmp;
        int queue_id;
        int ret = 0;
 
+       /* only need to stop gfx/compute */
+       if (!(ip_mask & ((1 << AMDGPU_HW_IP_GFX) | (1 << AMDGPU_HW_IP_COMPUTE))))
+               return 0;
+
        mutex_lock(&adev->userq_mutex);
        if (!adev->userq_halt_for_enforce_isolation)
                dev_warn(adev->dev, "userq scheduling already started!\n");