drm/sched: Add drm_sched_wqueue_* helpers
authorMatthew Brost <matthew.brost@intel.com>
Tue, 31 Oct 2023 03:24:35 +0000 (20:24 -0700)
committerLuben Tuikov <ltuikov89@gmail.com>
Wed, 1 Nov 2023 21:29:20 +0000 (17:29 -0400)
Add scheduler wqueue ready, stop, and start helpers to hide the
implementation details of the scheduler from the drivers.

v2:
  - s/sched_wqueue/sched_wqueue (Luben)
  - Remove the extra white line after the return-statement (Luben)
  - update drm_sched_wqueue_ready comment (Luben)

Cc: Luben Tuikov <luben.tuikov@amd.com>
Signed-off-by: Matthew Brost <matthew.brost@intel.com>
Reviewed-by: Luben Tuikov <luben.tuikov@amd.com>
Link: https://lore.kernel.org/r/20231031032439.1558703-2-matthew.brost@intel.com
Signed-off-by: Luben Tuikov <ltuikov89@gmail.com>
drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_arcturus.c
drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
drivers/gpu/drm/msm/adreno/adreno_device.c
drivers/gpu/drm/scheduler/sched_main.c
include/drm/gpu_scheduler.h

index 625db444df1cb60ddff16397f0714ef4238d91ad..10d56979fe3b96fd2522d1a1785500adb40d52e7 100644 (file)
@@ -290,7 +290,7 @@ static int suspend_resume_compute_scheduler(struct amdgpu_device *adev, bool sus
        for (i = 0; i < adev->gfx.num_compute_rings; i++) {
                struct amdgpu_ring *ring = &adev->gfx.compute_ring[i];
 
-               if (!(ring && ring->sched.thread))
+               if (!(ring && drm_sched_wqueue_ready(&ring->sched)))
                        continue;
 
                /* stop secheduler and drain ring. */
index a4faea4fa0b592aed2a252f5521af514d34104e3..a4c0bb358db710dab0edb9e4f834f0eaa464d35f 100644 (file)
@@ -1659,9 +1659,9 @@ static int amdgpu_debugfs_test_ib_show(struct seq_file *m, void *unused)
        for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
                struct amdgpu_ring *ring = adev->rings[i];
 
-               if (!ring || !ring->sched.thread)
+               if (!ring || !drm_sched_wqueue_ready(&ring->sched))
                        continue;
-               kthread_park(ring->sched.thread);
+               drm_sched_wqueue_stop(&ring->sched);
        }
 
        seq_puts(m, "run ib test:\n");
@@ -1675,9 +1675,9 @@ static int amdgpu_debugfs_test_ib_show(struct seq_file *m, void *unused)
        for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
                struct amdgpu_ring *ring = adev->rings[i];
 
-               if (!ring || !ring->sched.thread)
+               if (!ring || !drm_sched_wqueue_ready(&ring->sched))
                        continue;
-               kthread_unpark(ring->sched.thread);
+               drm_sched_wqueue_start(&ring->sched);
        }
 
        up_write(&adev->reset_domain->sem);
@@ -1897,7 +1897,8 @@ static int amdgpu_debugfs_ib_preempt(void *data, u64 val)
 
        ring = adev->rings[val];
 
-       if (!ring || !ring->funcs->preempt_ib || !ring->sched.thread)
+       if (!ring || !ring->funcs->preempt_ib ||
+           !drm_sched_wqueue_ready(&ring->sched))
                return -EINVAL;
 
        /* the last preemption failed */
@@ -1915,7 +1916,7 @@ static int amdgpu_debugfs_ib_preempt(void *data, u64 val)
                goto pro_end;
 
        /* stop the scheduler */
-       kthread_park(ring->sched.thread);
+       drm_sched_wqueue_stop(&ring->sched);
 
        /* preempt the IB */
        r = amdgpu_ring_preempt_ib(ring);
@@ -1949,7 +1950,7 @@ static int amdgpu_debugfs_ib_preempt(void *data, u64 val)
 
 failure:
        /* restart the scheduler */
-       kthread_unpark(ring->sched.thread);
+       drm_sched_wqueue_start(&ring->sched);
 
        up_read(&adev->reset_domain->sem);
 
index 19b539cab7fecee26a91265a8725018e3cb812fd..a7f7afcb2bf0e523d8230e8300604da3e686edd0 100644 (file)
@@ -4601,7 +4601,7 @@ bool amdgpu_device_has_job_running(struct amdgpu_device *adev)
        for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
                struct amdgpu_ring *ring = adev->rings[i];
 
-               if (!ring || !ring->sched.thread)
+               if (!ring || !drm_sched_wqueue_ready(&ring->sched))
                        continue;
 
                spin_lock(&ring->sched.job_list_lock);
@@ -4740,7 +4740,7 @@ int amdgpu_device_pre_asic_reset(struct amdgpu_device *adev,
        for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
                struct amdgpu_ring *ring = adev->rings[i];
 
-               if (!ring || !ring->sched.thread)
+               if (!ring || !drm_sched_wqueue_ready(&ring->sched))
                        continue;
 
                /* Clear job fence from fence drv to avoid force_completion
@@ -5282,7 +5282,7 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
                for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
                        struct amdgpu_ring *ring = tmp_adev->rings[i];
 
-                       if (!ring || !ring->sched.thread)
+                       if (!ring || !drm_sched_wqueue_ready(&ring->sched))
                                continue;
 
                        drm_sched_stop(&ring->sched, job ? &job->base : NULL);
@@ -5357,7 +5357,7 @@ skip_hw_reset:
                for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
                        struct amdgpu_ring *ring = tmp_adev->rings[i];
 
-                       if (!ring || !ring->sched.thread)
+                       if (!ring || !drm_sched_wqueue_ready(&ring->sched))
                                continue;
 
                        drm_sched_start(&ring->sched, true);
@@ -5683,7 +5683,7 @@ pci_ers_result_t amdgpu_pci_error_detected(struct pci_dev *pdev, pci_channel_sta
                for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
                        struct amdgpu_ring *ring = adev->rings[i];
 
-                       if (!ring || !ring->sched.thread)
+                       if (!ring || !drm_sched_wqueue_ready(&ring->sched))
                                continue;
 
                        drm_sched_stop(&ring->sched, NULL);
@@ -5811,7 +5811,7 @@ void amdgpu_pci_resume(struct pci_dev *pdev)
        for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
                struct amdgpu_ring *ring = adev->rings[i];
 
-               if (!ring || !ring->sched.thread)
+               if (!ring || !drm_sched_wqueue_ready(&ring->sched))
                        continue;
 
                drm_sched_start(&ring->sched, true);
index fa527935ffd4230a546000d5f382a912625e9f56..8fa9ce3746b6064118f3c6200a3ac5884d361136 100644 (file)
@@ -809,7 +809,8 @@ static void suspend_scheduler(struct msm_gpu *gpu)
         */
        for (i = 0; i < gpu->nr_rings; i++) {
                struct drm_gpu_scheduler *sched = &gpu->rb[i]->sched;
-               kthread_park(sched->thread);
+
+               drm_sched_wqueue_stop(sched);
        }
 }
 
@@ -819,7 +820,8 @@ static void resume_scheduler(struct msm_gpu *gpu)
 
        for (i = 0; i < gpu->nr_rings; i++) {
                struct drm_gpu_scheduler *sched = &gpu->rb[i]->sched;
-               kthread_unpark(sched->thread);
+
+               drm_sched_wqueue_start(sched);
        }
 }
 
index 99797a8c836ac72203b6963a4788ebdb0b2abcc7..54c1c5fe01ba9f21bc7b6a8d4054824b1d782b4f 100644 (file)
@@ -439,7 +439,7 @@ void drm_sched_stop(struct drm_gpu_scheduler *sched, struct drm_sched_job *bad)
 {
        struct drm_sched_job *s_job, *tmp;
 
-       kthread_park(sched->thread);
+       drm_sched_wqueue_stop(sched);
 
        /*
         * Reinsert back the bad job here - now it's safe as
@@ -552,7 +552,7 @@ void drm_sched_start(struct drm_gpu_scheduler *sched, bool full_recovery)
                spin_unlock(&sched->job_list_lock);
        }
 
-       kthread_unpark(sched->thread);
+       drm_sched_wqueue_start(sched);
 }
 EXPORT_SYMBOL(drm_sched_start);
 
@@ -1252,3 +1252,38 @@ void drm_sched_increase_karma(struct drm_sched_job *bad)
        }
 }
 EXPORT_SYMBOL(drm_sched_increase_karma);
+
+/**
+ * drm_sched_wqueue_ready - Is the scheduler ready for submission
+ *
+ * @sched: scheduler instance
+ *
+ * Returns true if submission is ready
+ */
+bool drm_sched_wqueue_ready(struct drm_gpu_scheduler *sched)
+{
+       return !!sched->thread;
+}
+EXPORT_SYMBOL(drm_sched_wqueue_ready);
+
+/**
+ * drm_sched_wqueue_stop - stop scheduler submission
+ *
+ * @sched: scheduler instance
+ */
+void drm_sched_wqueue_stop(struct drm_gpu_scheduler *sched)
+{
+       kthread_park(sched->thread);
+}
+EXPORT_SYMBOL(drm_sched_wqueue_stop);
+
+/**
+ * drm_sched_wqueue_start - start scheduler submission
+ *
+ * @sched: scheduler instance
+ */
+void drm_sched_wqueue_start(struct drm_gpu_scheduler *sched)
+{
+       kthread_unpark(sched->thread);
+}
+EXPORT_SYMBOL(drm_sched_wqueue_start);
index 653e0eec9743ecfa6c96f3cc26292c45b61aa67d..7e622c18d3365e0b314555b904d08364773827ad 100644 (file)
@@ -553,6 +553,9 @@ void drm_sched_entity_modify_sched(struct drm_sched_entity *entity,
 
 void drm_sched_job_cleanup(struct drm_sched_job *job);
 void drm_sched_wakeup_if_can_queue(struct drm_gpu_scheduler *sched);
+bool drm_sched_wqueue_ready(struct drm_gpu_scheduler *sched);
+void drm_sched_wqueue_stop(struct drm_gpu_scheduler *sched);
+void drm_sched_wqueue_start(struct drm_gpu_scheduler *sched);
 void drm_sched_stop(struct drm_gpu_scheduler *sched, struct drm_sched_job *bad);
 void drm_sched_start(struct drm_gpu_scheduler *sched, bool full_recovery);
 void drm_sched_resubmit_jobs(struct drm_gpu_scheduler *sched);