drm/v3d: Add job to pending list if the reset was skipped
authorMaíra Canal <mcanal@igalia.com>
Wed, 30 Apr 2025 20:51:52 +0000 (17:51 -0300)
committerMaíra Canal <mcanal@igalia.com>
Fri, 2 May 2025 19:25:14 +0000 (16:25 -0300)
When a CL/CSD job times out, we check if the GPU has made any progress
since the last timeout. If so, instead of resetting the hardware, we skip
the reset and let the timer get rearmed. This gives long-running jobs a
chance to complete.

However, when `timedout_job()` is called, the job in question is removed
from the pending list, which means it won't be automatically freed through
`free_job()`. Consequently, when we skip the reset and keep the job
running, the job won't be freed when it finally completes.

This situation leads to a memory leak, as exposed in [1] and [2].

Similarly to commit 704d3d60fec4 ("drm/etnaviv: don't block scheduler when
GPU is still active"), this patch ensures the job is put back on the
pending list when extending the timeout.

Cc: stable@vger.kernel.org # 6.0
Reported-by: Daivik Bhatia <dtgs1208@gmail.com>
Closes: https://gitlab.freedesktop.org/mesa/mesa/-/issues/12227 [1]
Closes: https://github.com/raspberrypi/linux/issues/6817 [2]
Reviewed-by: Iago Toral Quiroga <itoral@igalia.com>
Acked-by: Tvrtko Ursulin <tvrtko.ursulin@igalia.com>
Link: https://lore.kernel.org/r/20250430210643.57924-1-mcanal@igalia.com
Signed-off-by: Maíra Canal <mcanal@igalia.com>
drivers/gpu/drm/v3d/v3d_sched.c

index 4a7701a33cf8d76c8f527e496966c6689673012f..eb35482f6fb5770819c9fc8632847e0f19c808cc 100644 (file)
@@ -744,11 +744,16 @@ v3d_gpu_reset_for_timeout(struct v3d_dev *v3d, struct drm_sched_job *sched_job)
        return DRM_GPU_SCHED_STAT_NOMINAL;
 }
 
-/* If the current address or return address have changed, then the GPU
- * has probably made progress and we should delay the reset.  This
- * could fail if the GPU got in an infinite loop in the CL, but that
- * is pretty unlikely outside of an i-g-t testcase.
- */
+static void
+v3d_sched_skip_reset(struct drm_sched_job *sched_job)
+{
+       struct drm_gpu_scheduler *sched = sched_job->sched;
+
+       spin_lock(&sched->job_list_lock);
+       list_add(&sched_job->list, &sched->pending_list);
+       spin_unlock(&sched->job_list_lock);
+}
+
 static enum drm_gpu_sched_stat
 v3d_cl_job_timedout(struct drm_sched_job *sched_job, enum v3d_queue q,
                    u32 *timedout_ctca, u32 *timedout_ctra)
@@ -758,9 +763,16 @@ v3d_cl_job_timedout(struct drm_sched_job *sched_job, enum v3d_queue q,
        u32 ctca = V3D_CORE_READ(0, V3D_CLE_CTNCA(q));
        u32 ctra = V3D_CORE_READ(0, V3D_CLE_CTNRA(q));
 
+       /* If the current address or return address have changed, then the GPU
+        * has probably made progress and we should delay the reset. This
+        * could fail if the GPU got in an infinite loop in the CL, but that
+        * is pretty unlikely outside of an i-g-t testcase.
+        */
        if (*timedout_ctca != ctca || *timedout_ctra != ctra) {
                *timedout_ctca = ctca;
                *timedout_ctra = ctra;
+
+               v3d_sched_skip_reset(sched_job);
                return DRM_GPU_SCHED_STAT_NOMINAL;
        }
 
@@ -800,11 +812,13 @@ v3d_csd_job_timedout(struct drm_sched_job *sched_job)
        struct v3d_dev *v3d = job->base.v3d;
        u32 batches = V3D_CORE_READ(0, V3D_CSD_CURRENT_CFG4(v3d->ver));
 
-       /* If we've made progress, skip reset and let the timer get
-        * rearmed.
+       /* If we've made progress, skip reset, add the job to the pending
+        * list, and let the timer get rearmed.
         */
        if (job->timedout_batches != batches) {
                job->timedout_batches = batches;
+
+               v3d_sched_skip_reset(sched_job);
                return DRM_GPU_SCHED_STAT_NOMINAL;
        }