block: add a struct io_comp_batch argument to fops->iopoll()
[linux-2.6-block.git] / block / blk-mq.c
index fa4de25c3bcbc22445efa716f6a8276339cd1f84..79c25b64e8b090f0f153404f12ed591923a6d9d1 100644 (file)
@@ -318,17 +318,23 @@ static struct request *blk_mq_rq_ctx_init(struct blk_mq_alloc_data *data,
        struct elevator_queue *e = q->elevator;
        struct blk_mq_tags *tags = blk_mq_tags_from_data(data);
        struct request *rq = tags->static_rqs[tag];
+       unsigned int rq_flags = 0;
 
        if (e) {
-               rq->rq_flags = RQF_ELV;
+               rq_flags = RQF_ELV;
                rq->tag = BLK_MQ_NO_TAG;
                rq->internal_tag = tag;
        } else {
-               rq->rq_flags = 0;
                rq->tag = tag;
                rq->internal_tag = BLK_MQ_NO_TAG;
        }
 
+       if (data->flags & BLK_MQ_REQ_PM)
+               rq_flags |= RQF_PM;
+       if (blk_queue_io_stat(q))
+               rq_flags |= RQF_IO_STAT;
+       rq->rq_flags = rq_flags;
+
        if (blk_mq_need_time_stamp(rq))
                rq->start_time_ns = ktime_get_ns();
        else
@@ -338,10 +344,6 @@ static struct request *blk_mq_rq_ctx_init(struct blk_mq_alloc_data *data,
        rq->mq_ctx = ctx;
        rq->mq_hctx = hctx;
        rq->cmd_flags = data->cmd_flags;
-       if (data->flags & BLK_MQ_REQ_PM)
-               rq->rq_flags |= RQF_PM;
-       if (blk_queue_io_stat(q))
-               rq->rq_flags |= RQF_IO_STAT;
        rq->rq_disk = NULL;
        rq->part = NULL;
 #ifdef CONFIG_BLK_RQ_ALLOC_TIME
@@ -357,7 +359,6 @@ static struct request *blk_mq_rq_ctx_init(struct blk_mq_alloc_data *data,
        rq->end_io = NULL;
        rq->end_io_data = NULL;
 
-       data->ctx->rq_dispatched[op_is_sync(data->cmd_flags)]++;
        blk_crypto_rq_set_defaults(rq);
        INIT_LIST_HEAD(&rq->queuelist);
        /* tag was already set */
@@ -381,7 +382,6 @@ static struct request *blk_mq_rq_ctx_init(struct blk_mq_alloc_data *data,
                }
        }
 
-       data->hctx->queued++;
        return rq;
 }
 
@@ -404,17 +404,11 @@ __blk_mq_alloc_requests_batch(struct blk_mq_alloc_data *data,
                tag = tag_offset + i;
                tags &= ~(1UL << i);
                rq = blk_mq_rq_ctx_init(data, tag, alloc_time_ns);
-               rq->rq_next = *data->cached_rq;
-               *data->cached_rq = rq;
+               rq_list_add(data->cached_rq, rq);
        }
        data->nr_tags -= nr;
 
-       if (!data->cached_rq)
-               return NULL;
-
-       rq = *data->cached_rq;
-       *data->cached_rq = rq->rq_next;
-       return rq;
+       return rq_list_pop(data->cached_rq);
 }
 
 static struct request *__blk_mq_alloc_requests(struct blk_mq_alloc_data *data)
@@ -593,7 +587,6 @@ static void __blk_mq_free_request(struct request *rq)
 void blk_mq_free_request(struct request *rq)
 {
        struct request_queue *q = rq->q;
-       struct blk_mq_ctx *ctx = rq->mq_ctx;
        struct blk_mq_hw_ctx *hctx = rq->mq_hctx;
 
        if (rq->rq_flags & (RQF_ELVPRIV | RQF_ELV)) {
@@ -607,7 +600,6 @@ void blk_mq_free_request(struct request *rq)
                }
        }
 
-       ctx->rq_completed[rq_is_sync(rq)]++;
        if (rq->rq_flags & RQF_MQ_INFLIGHT)
                __blk_mq_dec_active_requests(hctx);
 
@@ -624,11 +616,9 @@ EXPORT_SYMBOL_GPL(blk_mq_free_request);
 
 void blk_mq_free_plug_rqs(struct blk_plug *plug)
 {
-       while (plug->cached_rq) {
-               struct request *rq;
+       struct request *rq;
 
-               rq = plug->cached_rq;
-               plug->cached_rq = rq->rq_next;
+       while ((rq = rq_list_pop(&plug->cached_rq)) != NULL) {
                percpu_ref_get(&rq->q->q_usage_counter);
                blk_mq_free_request(rq);
        }
@@ -1302,14 +1292,6 @@ struct request *blk_mq_dequeue_from_ctx(struct blk_mq_hw_ctx *hctx,
        return data.rq;
 }
 
-static inline unsigned int queued_to_index(unsigned int queued)
-{
-       if (!queued)
-               return 0;
-
-       return min(BLK_MQ_MAX_DISPATCH_ORDER - 1, ilog2(queued) + 1);
-}
-
 static bool __blk_mq_get_driver_tag(struct request *rq)
 {
        struct sbitmap_queue *bt = &rq->mq_hctx->tags->bitmap_tags;
@@ -1633,8 +1615,6 @@ out:
        if (!list_empty(&zone_list))
                list_splice_tail_init(&zone_list, list);
 
-       hctx->dispatched[queued_to_index(queued)]++;
-
        /* If we didn't flush the entire list, we could have told the driver
         * there was more coming, but that turned out to be a lie.
         */
@@ -2430,8 +2410,7 @@ void blk_mq_submit_bio(struct bio *bio)
 
        plug = blk_mq_plug(q, bio);
        if (plug && plug->cached_rq) {
-               rq = plug->cached_rq;
-               plug->cached_rq = rq->rq_next;
+               rq = rq_list_pop(&plug->cached_rq);
                INIT_LIST_HEAD(&rq->queuelist);
        } else {
                struct blk_mq_alloc_data data = {
@@ -4195,20 +4174,15 @@ static bool blk_mq_poll_hybrid(struct request_queue *q, blk_qc_t qc)
 }
 
 static int blk_mq_poll_classic(struct request_queue *q, blk_qc_t cookie,
-               unsigned int flags)
+                              struct io_comp_batch *iob, unsigned int flags)
 {
        struct blk_mq_hw_ctx *hctx = blk_qc_to_hctx(q, cookie);
        long state = get_current_state();
        int ret;
 
-       hctx->poll_considered++;
-
        do {
-               hctx->poll_invoked++;
-
-               ret = q->mq_ops->poll(hctx);
+               ret = q->mq_ops->poll(hctx, iob);
                if (ret > 0) {
-                       hctx->poll_success++;
                        __set_current_state(TASK_RUNNING);
                        return ret;
                }
@@ -4227,14 +4201,15 @@ static int blk_mq_poll_classic(struct request_queue *q, blk_qc_t cookie,
        return 0;
 }
 
-int blk_mq_poll(struct request_queue *q, blk_qc_t cookie, unsigned int flags)
+int blk_mq_poll(struct request_queue *q, blk_qc_t cookie, struct io_comp_batch *iob,
+               unsigned int flags)
 {
        if (!(flags & BLK_POLL_NOSLEEP) &&
            q->poll_nsec != BLK_MQ_POLL_CLASSIC) {
                if (blk_mq_poll_hybrid(q, cookie))
                        return 1;
        }
-       return blk_mq_poll_classic(q, cookie, flags);
+       return blk_mq_poll_classic(q, cookie, iob, flags);
 }
 
 unsigned int blk_mq_rq_cpu(struct request *rq)