block, bfq: always inject I/O of queues blocked by wakers
authorPaolo Valente <paolo.valente@linaro.org>
Thu, 4 Mar 2021 17:46:22 +0000 (18:46 +0100)
committerJens Axboe <axboe@kernel.dk>
Thu, 25 Mar 2021 16:50:07 +0000 (10:50 -0600)
Suppose that I/O dispatch is plugged, to wait for new I/O for the
in-service bfq-queue, say bfqq.  Suppose then that there is a further
bfq_queue woken by bfqq, and that this woken queue has pending I/O. A
woken queue does not steal bandwidth from bfqq, because it remains
soon without I/O if bfqq is not served. So there is virtually no risk
of loss of bandwidth for bfqq if this woken queue has I/O dispatched
while bfqq is waiting for new I/O. In contrast, this extra I/O
injection boosts throughput. This commit performs this extra
injection.

Tested-by: Jan Kara <jack@suse.cz>
Signed-off-by: Paolo Valente <paolo.valente@linaro.org>
Tested-by: Oleksandr Natalenko <oleksandr@natalenko.name>
Link: https://lore.kernel.org/r/20210304174627.161-2-paolo.valente@linaro.org
Signed-off-by: Jens Axboe <axboe@kernel.dk>
block/bfq-iosched.c
block/bfq-wf2q.c

index 95586137194eedefa5955dd89fef1775a690f237..eb249775029e57b631241b21f942dbe4e741d8da 100644 (file)
@@ -4491,9 +4491,15 @@ check_queue:
                        bfq_bfqq_busy(bfqq->bic->bfqq[0]) &&
                        bfqq->bic->bfqq[0]->next_rq ?
                        bfqq->bic->bfqq[0] : NULL;
+               struct bfq_queue *blocked_bfqq =
+                       !hlist_empty(&bfqq->woken_list) ?
+                       container_of(bfqq->woken_list.first,
+                                    struct bfq_queue,
+                                    woken_list_node)
+                       : NULL;
 
                /*
-                * The next three mutually-exclusive ifs decide
+                * The next four mutually-exclusive ifs decide
                 * whether to try injection, and choose the queue to
                 * pick an I/O request from.
                 *
@@ -4526,7 +4532,15 @@ check_queue:
                 * next bfqq's I/O is brought forward dramatically,
                 * for it is not blocked for milliseconds.
                 *
-                * The third if checks whether bfqq is a queue for
+                * The third if checks whether there is a queue woken
+                * by bfqq, and currently with pending I/O. Such a
+                * woken queue does not steal bandwidth from bfqq,
+                * because it remains soon without I/O if bfqq is not
+                * served. So there is virtually no risk of loss of
+                * bandwidth for bfqq if this woken queue has I/O
+                * dispatched while bfqq is waiting for new I/O.
+                *
+                * The fourth if checks whether bfqq is a queue for
                 * which it is better to avoid injection. It is so if
                 * bfqq delivers more throughput when served without
                 * any further I/O from other queues in the middle, or
@@ -4546,11 +4560,11 @@ check_queue:
                 * bfq_update_has_short_ttime(), it is rather likely
                 * that, if I/O is being plugged for bfqq and the
                 * waker queue has pending I/O requests that are
-                * blocking bfqq's I/O, then the third alternative
+                * blocking bfqq's I/O, then the fourth alternative
                 * above lets the waker queue get served before the
                 * I/O-plugging timeout fires. So one may deem the
                 * second alternative superfluous. It is not, because
-                * the third alternative may be way less effective in
+                * the fourth alternative may be way less effective in
                 * case of a synchronization. For two main
                 * reasons. First, throughput may be low because the
                 * inject limit may be too low to guarantee the same
@@ -4559,7 +4573,7 @@ check_queue:
                 * guarantees (the second alternative unconditionally
                 * injects a pending I/O request of the waker queue
                 * for each bfq_dispatch_request()). Second, with the
-                * third alternative, the duration of the plugging,
+                * fourth alternative, the duration of the plugging,
                 * i.e., the time before bfqq finally receives new I/O,
                 * may not be minimized, because the waker queue may
                 * happen to be served only after other queues.
@@ -4577,6 +4591,14 @@ check_queue:
                           bfq_bfqq_budget_left(bfqq->waker_bfqq)
                        )
                        bfqq = bfqq->waker_bfqq;
+               else if (blocked_bfqq &&
+                          bfq_bfqq_busy(blocked_bfqq) &&
+                          blocked_bfqq->next_rq &&
+                          bfq_serv_to_charge(blocked_bfqq->next_rq,
+                                             blocked_bfqq) <=
+                          bfq_bfqq_budget_left(blocked_bfqq)
+                       )
+                       bfqq = blocked_bfqq;
                else if (!idling_boosts_thr_without_issues(bfqd, bfqq) &&
                         (bfqq->wr_coeff == 1 || bfqd->wr_busy_queues > 1 ||
                          !bfq_bfqq_has_short_ttime(bfqq)))
index 070e34a7feb1843c43849406212fb37b1aae5bf1..7a462df71f680f8ec4c67b5d4631664b1a165ca2 100644 (file)
@@ -1706,4 +1706,12 @@ void bfq_add_bfqq_busy(struct bfq_data *bfqd, struct bfq_queue *bfqq)
 
        if (bfqq->wr_coeff > 1)
                bfqd->wr_busy_queues++;
+
+       /* Move bfqq to the head of the woken list of its waker */
+       if (!hlist_unhashed(&bfqq->woken_list_node) &&
+           &bfqq->woken_list_node != bfqq->waker_bfqq->woken_list.first) {
+               hlist_del_init(&bfqq->woken_list_node);
+               hlist_add_head(&bfqq->woken_list_node,
+                              &bfqq->waker_bfqq->woken_list);
+       }
 }