drm/i915: Don't set queue_priority_hint if we don't kick the submission
authorChris Wilson <chris@chris-wilson.co.uk>
Mon, 21 Oct 2019 08:02:11 +0000 (09:02 +0100)
committerChris Wilson <chris@chris-wilson.co.uk>
Mon, 21 Oct 2019 10:04:24 +0000 (11:04 +0100)
If we change the priority of the active context, then it has no impact
on the decision of whether to preempt the active context -- we don't
preempt the context with itself. In this situation, we elide the tasklet
rescheduling and should *not* be marking up the queue_priority_hint as
that may mask a later submission where we decide we don't have to kick
the tasklet as a higher priority submission is pending (spoiler alert,
it was not).

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Cc: Mika Kuoppala <mika.kuoppala@linux.intel.com>
Reviewed-by: Mika Kuoppala <mika.kuoppala@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20191021080226.537-1-chris@chris-wilson.co.uk
drivers/gpu/drm/i915/i915_scheduler.c

index 0ca40f6bf08c7b3b1bccfc18c4d1805555e9ddbc..d2edb527dcb8517894efa5eaf11fd4517ac965a2 100644 (file)
@@ -189,22 +189,34 @@ static inline bool need_preempt(int prio, int active)
        return prio >= max(I915_PRIORITY_NORMAL, active);
 }
 
-static void kick_submission(struct intel_engine_cs *engine, int prio)
+static void kick_submission(struct intel_engine_cs *engine,
+                           const struct i915_request *rq,
+                           int prio)
 {
-       const struct i915_request *inflight =
-               execlists_active(&engine->execlists);
+       const struct i915_request *inflight;
+
+       /*
+        * We only need to kick the tasklet once for the high priority
+        * new context we add into the queue.
+        */
+       if (prio <= engine->execlists.queue_priority_hint)
+               return;
+
+       /* Nothing currently active? We're overdue for a submission! */
+       inflight = execlists_active(&engine->execlists);
+       if (!inflight)
+               return;
 
        /*
         * If we are already the currently executing context, don't
-        * bother evaluating if we should preempt ourselves, or if
-        * we expect nothing to change as a result of running the
-        * tasklet, i.e. we have not change the priority queue
-        * sufficiently to oust the running context.
+        * bother evaluating if we should preempt ourselves.
         */
-       if (!inflight || !need_preempt(prio, rq_prio(inflight)))
+       if (inflight->hw_context == rq->hw_context)
                return;
 
-       tasklet_hi_schedule(&engine->execlists.tasklet);
+       engine->execlists.queue_priority_hint = prio;
+       if (need_preempt(prio, rq_prio(inflight)))
+               tasklet_hi_schedule(&engine->execlists.tasklet);
 }
 
 static void __i915_schedule(struct i915_sched_node *node,
@@ -330,13 +342,8 @@ static void __i915_schedule(struct i915_sched_node *node,
                        list_move_tail(&node->link, cache.priolist);
                }
 
-               if (prio <= engine->execlists.queue_priority_hint)
-                       continue;
-
-               engine->execlists.queue_priority_hint = prio;
-
                /* Defer (tasklet) submission until after all of our updates. */
-               kick_submission(engine, prio);
+               kick_submission(engine, node_to_request(node), prio);
        }
 
        spin_unlock(&engine->active.lock);