drm/i915: Rename execlists->queue_priority to queue_priority_hint
authorChris Wilson <chris@chris-wilson.co.uk>
Tue, 29 Jan 2019 18:54:51 +0000 (18:54 +0000)
committerChris Wilson <chris@chris-wilson.co.uk>
Tue, 29 Jan 2019 20:00:03 +0000 (20:00 +0000)
After noticing that we trigger preemption events for currently executing
requests, as well as requests that complete before the preemption and
attempting to suppress those preemption events, it is wise to not
consider the queue_priority to be authoritative. As we only track the
maximum priority seen between dequeue passes, if the maximum priority
request is no longer available for dequeuing (it completed or is even
executing on another engine), we have no knowledge of the previous
queue_priority as it would require us to keep a full history of enqueued
requests -- but we already have that history in the priolists!

Rename the queue_priority to queue_priority_hint so that we do not
confuse it as being exactly the maximum priority in the queue, but merely
an indication that we have seen a new maximum priority value and as such
we should check whether it should preempt the currently running request.

v2: s/preempt_priority_hint/queue_priority_hint/ as preempt implies it
being only used for the singular task of preemption and not the wider
question of waking up due to a change in the queue.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190129185452.20989-3-chris@chris-wilson.co.uk
drivers/gpu/drm/i915/i915_scheduler.c
drivers/gpu/drm/i915/intel_engine_cs.c
drivers/gpu/drm/i915/intel_guc_submission.c
drivers/gpu/drm/i915/intel_lrc.c
drivers/gpu/drm/i915/intel_ringbuffer.h

index 340faea6c08a21fd8bebdc7c715d29a7ca53c8f5..4eeba588b996618a240a7ae9a9800f0176a2cd8c 100644 (file)
@@ -127,8 +127,7 @@ static inline struct i915_priolist *to_priolist(struct rb_node *rb)
        return rb_entry(rb, struct i915_priolist, node);
 }
 
-static void assert_priolists(struct intel_engine_execlists * const execlists,
-                            long queue_priority)
+static void assert_priolists(struct intel_engine_execlists * const execlists)
 {
        struct rb_node *rb;
        long last_prio, i;
@@ -139,7 +138,7 @@ static void assert_priolists(struct intel_engine_execlists * const execlists,
        GEM_BUG_ON(rb_first_cached(&execlists->queue) !=
                   rb_first(&execlists->queue.rb_root));
 
-       last_prio = (queue_priority >> I915_USER_PRIORITY_SHIFT) + 1;
+       last_prio = (INT_MAX >> I915_USER_PRIORITY_SHIFT) + 1;
        for (rb = rb_first_cached(&execlists->queue); rb; rb = rb_next(rb)) {
                const struct i915_priolist *p = to_priolist(rb);
 
@@ -166,7 +165,7 @@ i915_sched_lookup_priolist(struct intel_engine_cs *engine, int prio)
        int idx, i;
 
        lockdep_assert_held(&engine->timeline.lock);
-       assert_priolists(execlists, INT_MAX);
+       assert_priolists(execlists);
 
        /* buckets sorted from highest [in slot 0] to lowest priority */
        idx = I915_PRIORITY_COUNT - (prio & I915_PRIORITY_MASK) - 1;
@@ -353,7 +352,7 @@ static void __i915_schedule(struct i915_request *rq,
                                continue;
                }
 
-               if (prio <= engine->execlists.queue_priority)
+               if (prio <= engine->execlists.queue_priority_hint)
                        continue;
 
                /*
@@ -366,7 +365,7 @@ static void __i915_schedule(struct i915_request *rq,
                        continue;
 
                /* Defer (tasklet) submission until after all of our updates. */
-               engine->execlists.queue_priority = prio;
+               engine->execlists.queue_priority_hint = prio;
                tasklet_hi_schedule(&engine->execlists.tasklet);
        }
 
index 8dca76f6315d958f654c24d85aca411ef9545fe7..0a610c9691fd29d3bb2c210f6ce08c37ca640b97 100644 (file)
@@ -480,7 +480,7 @@ static void intel_engine_init_execlist(struct intel_engine_cs *engine)
        GEM_BUG_ON(!is_power_of_2(execlists_num_ports(execlists)));
        GEM_BUG_ON(execlists_num_ports(execlists) > EXECLIST_MAX_PORTS);
 
-       execlists->queue_priority = INT_MIN;
+       execlists->queue_priority_hint = INT_MIN;
        execlists->queue = RB_ROOT_CACHED;
 }
 
@@ -1178,7 +1178,7 @@ void intel_engines_park(struct drm_i915_private *i915)
                }
 
                /* Must be reset upon idling, or we may miss the busy wakeup. */
-               GEM_BUG_ON(engine->execlists.queue_priority != INT_MIN);
+               GEM_BUG_ON(engine->execlists.queue_priority_hint != INT_MIN);
 
                if (engine->park)
                        engine->park(engine);
index 4295ade0d6138781ca6abe4be5a85154d53ec73a..f12ecc8dec6bb29c7585b1ddc787f09b1da6aaa9 100644 (file)
@@ -737,7 +737,7 @@ static bool __guc_dequeue(struct intel_engine_cs *engine)
                if (intel_engine_has_preemption(engine)) {
                        struct guc_preempt_work *preempt_work =
                                &engine->i915->guc.preempt_work[engine->id];
-                       int prio = execlists->queue_priority;
+                       int prio = execlists->queue_priority_hint;
 
                        if (__execlists_need_preempt(prio, port_prio(port))) {
                                execlists_set_active(execlists,
@@ -783,7 +783,8 @@ static bool __guc_dequeue(struct intel_engine_cs *engine)
                        kmem_cache_free(engine->i915->priorities, p);
        }
 done:
-       execlists->queue_priority = rb ? to_priolist(rb)->priority : INT_MIN;
+       execlists->queue_priority_hint =
+               rb ? to_priolist(rb)->priority : INT_MIN;
        if (submit)
                port_assign(port, last);
        if (last)
index 5db16dd8e84499fcb5cdf0dc38692a12e86b39aa..a1034f7069aa07e747b8d1a3576be50b66f8d763 100644 (file)
@@ -591,7 +591,7 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
                if (!execlists_is_active(execlists, EXECLISTS_ACTIVE_HWACK))
                        return;
 
-               if (need_preempt(engine, last, execlists->queue_priority)) {
+               if (need_preempt(engine, last, execlists->queue_priority_hint)) {
                        inject_preempt_context(engine);
                        return;
                }
@@ -699,20 +699,20 @@ done:
        /*
         * Here be a bit of magic! Or sleight-of-hand, whichever you prefer.
         *
-        * We choose queue_priority such that if we add a request of greater
+        * We choose the priority hint such that if we add a request of greater
         * priority than this, we kick the submission tasklet to decide on
         * the right order of submitting the requests to hardware. We must
         * also be prepared to reorder requests as they are in-flight on the
-        * HW. We derive the queue_priority then as the first "hole" in
+        * HW. We derive the priority hint then as the first "hole" in
         * the HW submission ports and if there are no available slots,
         * the priority of the lowest executing request, i.e. last.
         *
         * When we do receive a higher priority request ready to run from the
-        * user, see queue_request(), the queue_priority is bumped to that
+        * user, see queue_request(), the priority hint is bumped to that
         * request triggering preemption on the next dequeue (or subsequent
         * interrupt for secondary ports).
         */
-       execlists->queue_priority =
+       execlists->queue_priority_hint =
                port != execlists->port ? rq_prio(last) : INT_MIN;
 
        if (submit) {
@@ -861,7 +861,7 @@ static void execlists_cancel_requests(struct intel_engine_cs *engine)
 
        /* Remaining _unready_ requests will be nop'ed when submitted */
 
-       execlists->queue_priority = INT_MIN;
+       execlists->queue_priority_hint = INT_MIN;
        execlists->queue = RB_ROOT_CACHED;
        GEM_BUG_ON(port_isset(execlists->port));
 
@@ -1092,8 +1092,8 @@ static void __submit_queue_imm(struct intel_engine_cs *engine)
 
 static void submit_queue(struct intel_engine_cs *engine, int prio)
 {
-       if (prio > engine->execlists.queue_priority) {
-               engine->execlists.queue_priority = prio;
+       if (prio > engine->execlists.queue_priority_hint) {
+               engine->execlists.queue_priority_hint = prio;
                __submit_queue_imm(engine);
        }
 }
@@ -2777,7 +2777,9 @@ void intel_execlists_show_requests(struct intel_engine_cs *engine,
 
        last = NULL;
        count = 0;
-       drm_printf(m, "\t\tQueue priority: %d\n", execlists->queue_priority);
+       if (execlists->queue_priority_hint != INT_MIN)
+               drm_printf(m, "\t\tQueue priority hint: %d\n",
+                          execlists->queue_priority_hint);
        for (rb = rb_first_cached(&execlists->queue); rb; rb = rb_next(rb)) {
                struct i915_priolist *p = rb_entry(rb, typeof(*p), node);
                int i;
index 1f30ffb84936f8ae34121ded689a4e5fee8b8441..ef024c154a1b1d0830602c71b72427bcd8bf3009 100644 (file)
@@ -293,14 +293,18 @@ struct intel_engine_execlists {
        unsigned int port_mask;
 
        /**
-        * @queue_priority: Highest pending priority.
+        * @queue_priority_hint: Highest pending priority.
         *
         * When we add requests into the queue, or adjust the priority of
         * executing requests, we compute the maximum priority of those
         * pending requests. We can then use this value to determine if
         * we need to preempt the executing requests to service the queue.
+        * However, since the we may have recorded the priority of an inflight
+        * request we wanted to preempt but since completed, at the time of
+        * dequeuing the priority hint may no longer may match the highest
+        * available request priority.
         */
-       int queue_priority;
+       int queue_priority_hint;
 
        /**
         * @queue: queue of requests, in priority lists