drm/sched: Qualify drm_sched_wakeup() by drm_sched_entity_is_ready()
authorLuben Tuikov <ltuikov89@gmail.com>
Thu, 9 Nov 2023 23:53:26 +0000 (18:53 -0500)
committerLuben Tuikov <ltuikov89@gmail.com>
Fri, 10 Nov 2023 00:05:35 +0000 (19:05 -0500)
Don't "wake up" the GPU scheduler unless the entity is ready, as well as we
can queue to the scheduler, i.e. there is no point in waking up the scheduler
for the entity unless the entity is ready.

Signed-off-by: Luben Tuikov <ltuikov89@gmail.com>
Fixes: bc8d6a9df99038 ("drm/sched: Don't disturb the entity when in RR-mode scheduling")
Reviewed-by: Danilo Krummrich <dakr@redhat.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20231110000123.72565-2-ltuikov89@gmail.com
drivers/gpu/drm/scheduler/sched_entity.c
drivers/gpu/drm/scheduler/sched_main.c
include/drm/gpu_scheduler.h

index f1db63cc8198122cbc8c3bbebca292684f836459..4d42b1e4daa67fcacb382094a7c04af1dab69384 100644 (file)
@@ -370,7 +370,7 @@ static void drm_sched_entity_wakeup(struct dma_fence *f,
                container_of(cb, struct drm_sched_entity, cb);
 
        drm_sched_entity_clear_dep(f, cb);
-       drm_sched_wakeup(entity->rq->sched);
+       drm_sched_wakeup(entity->rq->sched, entity);
 }
 
 /**
@@ -602,7 +602,7 @@ void drm_sched_entity_push_job(struct drm_sched_job *sched_job)
                if (drm_sched_policy == DRM_SCHED_POLICY_FIFO)
                        drm_sched_rq_update_fifo(entity, submit_ts);
 
-               drm_sched_wakeup(entity->rq->sched);
+               drm_sched_wakeup(entity->rq->sched, entity);
        }
 }
 EXPORT_SYMBOL(drm_sched_entity_push_job);
index cd0dc3f81d05f0bbe5ec870d4554059a47474e41..8f5e466bd582398201d1b9f38f9da811a288913e 100644 (file)
@@ -925,10 +925,12 @@ static bool drm_sched_can_queue(struct drm_gpu_scheduler *sched)
  *
  * Wake up the scheduler if we can queue jobs.
  */
-void drm_sched_wakeup(struct drm_gpu_scheduler *sched)
+void drm_sched_wakeup(struct drm_gpu_scheduler *sched,
+                     struct drm_sched_entity *entity)
 {
-       if (drm_sched_can_queue(sched))
-               drm_sched_run_job_queue(sched);
+       if (drm_sched_entity_is_ready(entity))
+               if (drm_sched_can_queue(sched))
+                       drm_sched_run_job_queue(sched);
 }
 
 /**
index 754fd2217334e59b59cdde59223d8b8255d8d8e2..09916c84703f59a9a136a9fce897716378ef69a5 100644 (file)
@@ -559,7 +559,7 @@ void drm_sched_entity_modify_sched(struct drm_sched_entity *entity,
 
 void drm_sched_tdr_queue_imm(struct drm_gpu_scheduler *sched);
 void drm_sched_job_cleanup(struct drm_sched_job *job);
-void drm_sched_wakeup(struct drm_gpu_scheduler *sched);
+void drm_sched_wakeup(struct drm_gpu_scheduler *sched, struct drm_sched_entity *entity);
 bool drm_sched_wqueue_ready(struct drm_gpu_scheduler *sched);
 void drm_sched_wqueue_stop(struct drm_gpu_scheduler *sched);
 void drm_sched_wqueue_start(struct drm_gpu_scheduler *sched);