blk-mq: pass a flags argument to blk_mq_request_bypass_insert
authorChristoph Hellwig <hch@lst.de>
Thu, 13 Apr 2023 06:40:55 +0000 (08:40 +0200)
committerJens Axboe <axboe@kernel.dk>
Thu, 13 Apr 2023 12:52:30 +0000 (06:52 -0600)
Replace the boolean at_head argument with the same flags that are already
passed to blk_mq_insert_request.

Signed-off-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Bart Van Assche <bvanassche@acm.org>
Reviewed-by: Damien Le Moal <dlemoal@kernel.org>
Link: https://lore.kernel.org/r/20230413064057.707578-19-hch@lst.de
Signed-off-by: Jens Axboe <axboe@kernel.dk>
block/blk-flush.c
block/blk-mq.c
block/blk-mq.h

index 015982bd2f7c8f4b0d2bcbe29b203ed78b8105b3..1d3af17619deb712919ee04ed4610555994ba9f3 100644 (file)
@@ -428,7 +428,7 @@ void blk_insert_flush(struct request *rq)
         */
        if ((policy & REQ_FSEQ_DATA) &&
            !(policy & (REQ_FSEQ_PREFLUSH | REQ_FSEQ_POSTFLUSH))) {
-               blk_mq_request_bypass_insert(rq, false);
+               blk_mq_request_bypass_insert(rq, 0);
                blk_mq_run_hw_queue(hctx, false);
                return;
        }
index ba64c4621e29d660e980d84efa3018cd28d305b0..ff74559d7da1fc1bebe7bbcb108788eda7a90d06 100644 (file)
@@ -1447,7 +1447,7 @@ static void blk_mq_requeue_work(struct work_struct *work)
                if (rq->rq_flags & RQF_DONTPREP) {
                        rq->rq_flags &= ~RQF_SOFTBARRIER;
                        list_del_init(&rq->queuelist);
-                       blk_mq_request_bypass_insert(rq, false);
+                       blk_mq_request_bypass_insert(rq, 0);
                } else if (rq->rq_flags & RQF_SOFTBARRIER) {
                        rq->rq_flags &= ~RQF_SOFTBARRIER;
                        list_del_init(&rq->queuelist);
@@ -2457,17 +2457,17 @@ static void blk_mq_run_work_fn(struct work_struct *work)
 /**
  * blk_mq_request_bypass_insert - Insert a request at dispatch list.
  * @rq: Pointer to request to be inserted.
- * @at_head: true if the request should be inserted at the head of the list.
+ * @flags: BLK_MQ_INSERT_*
  *
  * Should only be used carefully, when the caller knows we want to
  * bypass a potential IO scheduler on the target device.
  */
-void blk_mq_request_bypass_insert(struct request *rq, bool at_head)
+void blk_mq_request_bypass_insert(struct request *rq, blk_insert_t flags)
 {
        struct blk_mq_hw_ctx *hctx = rq->mq_hctx;
 
        spin_lock(&hctx->lock);
-       if (at_head)
+       if (flags & BLK_MQ_INSERT_AT_HEAD)
                list_add(&rq->queuelist, &hctx->dispatch);
        else
                list_add_tail(&rq->queuelist, &hctx->dispatch);
@@ -2526,7 +2526,7 @@ static void blk_mq_insert_request(struct request *rq, blk_insert_t flags)
                 * and it is added to the scheduler queue, there is no chance to
                 * dispatch it given we prioritize requests in hctx->dispatch.
                 */
-               blk_mq_request_bypass_insert(rq, flags & BLK_MQ_INSERT_AT_HEAD);
+               blk_mq_request_bypass_insert(rq, flags);
        } else if (rq->rq_flags & RQF_FLUSH_SEQ) {
                /*
                 * Firstly normal IO request is inserted to scheduler queue or
@@ -2549,7 +2549,7 @@ static void blk_mq_insert_request(struct request *rq, blk_insert_t flags)
                 * Simply queue flush rq to the front of hctx->dispatch so that
                 * intensive flush workloads can benefit in case of NCQ HW.
                 */
-               blk_mq_request_bypass_insert(rq, true);
+               blk_mq_request_bypass_insert(rq, BLK_MQ_INSERT_AT_HEAD);
        } else if (q->elevator) {
                LIST_HEAD(list);
 
@@ -2670,7 +2670,7 @@ static void blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx,
                break;
        case BLK_STS_RESOURCE:
        case BLK_STS_DEV_RESOURCE:
-               blk_mq_request_bypass_insert(rq, false);
+               blk_mq_request_bypass_insert(rq, 0);
                blk_mq_run_hw_queue(hctx, false);
                break;
        default:
@@ -2718,7 +2718,7 @@ static void blk_mq_plug_issue_direct(struct blk_plug *plug)
                        break;
                case BLK_STS_RESOURCE:
                case BLK_STS_DEV_RESOURCE:
-                       blk_mq_request_bypass_insert(rq, false);
+                       blk_mq_request_bypass_insert(rq, 0);
                        blk_mq_run_hw_queue(hctx, false);
                        goto out;
                default:
@@ -2837,7 +2837,7 @@ static void blk_mq_try_issue_list_directly(struct blk_mq_hw_ctx *hctx,
                        break;
                case BLK_STS_RESOURCE:
                case BLK_STS_DEV_RESOURCE:
-                       blk_mq_request_bypass_insert(rq, false);
+                       blk_mq_request_bypass_insert(rq, 0);
                        if (list_empty(list))
                                blk_mq_run_hw_queue(hctx, false);
                        goto out;
index 273eee00524b9877e1d584b5be41ffd393026fbc..bb16c0a54411b06a9b77958a2dae45c436ace872 100644 (file)
@@ -67,7 +67,7 @@ void blk_mq_free_map_and_rqs(struct blk_mq_tag_set *set,
 /*
  * Internal helpers for request insertion into sw queues
  */
-void blk_mq_request_bypass_insert(struct request *rq, bool at_head);
+void blk_mq_request_bypass_insert(struct request *rq, blk_insert_t flags);
 
 /*
  * CPU -> queue mappings