struct elevator_queue *e = q->elevator;
struct blk_mq_tags *tags = blk_mq_tags_from_data(data);
struct request *rq = tags->static_rqs[tag];
+ unsigned int rq_flags = 0;
if (e) {
- rq->rq_flags = RQF_ELV;
+ rq_flags = RQF_ELV;
rq->tag = BLK_MQ_NO_TAG;
rq->internal_tag = tag;
} else {
- rq->rq_flags = 0;
rq->tag = tag;
rq->internal_tag = BLK_MQ_NO_TAG;
}
+ if (data->flags & BLK_MQ_REQ_PM)
+ rq_flags |= RQF_PM;
+ if (blk_queue_io_stat(q))
+ rq_flags |= RQF_IO_STAT;
+ rq->rq_flags = rq_flags;
+
if (blk_mq_need_time_stamp(rq))
rq->start_time_ns = ktime_get_ns();
else
rq->mq_ctx = ctx;
rq->mq_hctx = hctx;
rq->cmd_flags = data->cmd_flags;
- if (data->flags & BLK_MQ_REQ_PM)
- rq->rq_flags |= RQF_PM;
- if (blk_queue_io_stat(q))
- rq->rq_flags |= RQF_IO_STAT;
rq->rq_disk = NULL;
rq->part = NULL;
#ifdef CONFIG_BLK_RQ_ALLOC_TIME
rq->end_io = NULL;
rq->end_io_data = NULL;
- data->ctx->rq_dispatched[op_is_sync(data->cmd_flags)]++;
blk_crypto_rq_set_defaults(rq);
INIT_LIST_HEAD(&rq->queuelist);
/* tag was already set */
}
}
- data->hctx->queued++;
return rq;
}
tag = tag_offset + i;
tags &= ~(1UL << i);
rq = blk_mq_rq_ctx_init(data, tag, alloc_time_ns);
- rq->rq_next = *data->cached_rq;
- *data->cached_rq = rq;
+ rq_list_add(data->cached_rq, rq);
}
data->nr_tags -= nr;
- if (!data->cached_rq)
- return NULL;
-
- rq = *data->cached_rq;
- *data->cached_rq = rq->rq_next;
- return rq;
+ return rq_list_pop(data->cached_rq);
}
static struct request *__blk_mq_alloc_requests(struct blk_mq_alloc_data *data)
void blk_mq_free_request(struct request *rq)
{
struct request_queue *q = rq->q;
- struct blk_mq_ctx *ctx = rq->mq_ctx;
struct blk_mq_hw_ctx *hctx = rq->mq_hctx;
if (rq->rq_flags & (RQF_ELVPRIV | RQF_ELV)) {
}
}
- ctx->rq_completed[rq_is_sync(rq)]++;
if (rq->rq_flags & RQF_MQ_INFLIGHT)
__blk_mq_dec_active_requests(hctx);
void blk_mq_free_plug_rqs(struct blk_plug *plug)
{
- while (plug->cached_rq) {
- struct request *rq;
+ struct request *rq;
- rq = plug->cached_rq;
- plug->cached_rq = rq->rq_next;
+ while ((rq = rq_list_pop(&plug->cached_rq)) != NULL) {
percpu_ref_get(&rq->q->q_usage_counter);
blk_mq_free_request(rq);
}
return data.rq;
}
-static inline unsigned int queued_to_index(unsigned int queued)
-{
- if (!queued)
- return 0;
-
- return min(BLK_MQ_MAX_DISPATCH_ORDER - 1, ilog2(queued) + 1);
-}
-
static bool __blk_mq_get_driver_tag(struct request *rq)
{
struct sbitmap_queue *bt = &rq->mq_hctx->tags->bitmap_tags;
if (!list_empty(&zone_list))
list_splice_tail_init(&zone_list, list);
- hctx->dispatched[queued_to_index(queued)]++;
-
/* If we didn't flush the entire list, we could have told the driver
* there was more coming, but that turned out to be a lie.
*/
plug = blk_mq_plug(q, bio);
if (plug && plug->cached_rq) {
- rq = plug->cached_rq;
- plug->cached_rq = rq->rq_next;
+ rq = rq_list_pop(&plug->cached_rq);
INIT_LIST_HEAD(&rq->queuelist);
} else {
struct blk_mq_alloc_data data = {
}
static int blk_mq_poll_classic(struct request_queue *q, blk_qc_t cookie,
- unsigned int flags)
+ struct io_comp_batch *iob, unsigned int flags)
{
struct blk_mq_hw_ctx *hctx = blk_qc_to_hctx(q, cookie);
long state = get_current_state();
int ret;
- hctx->poll_considered++;
-
do {
- hctx->poll_invoked++;
-
- ret = q->mq_ops->poll(hctx);
+ ret = q->mq_ops->poll(hctx, iob);
if (ret > 0) {
- hctx->poll_success++;
__set_current_state(TASK_RUNNING);
return ret;
}
return 0;
}
-int blk_mq_poll(struct request_queue *q, blk_qc_t cookie, unsigned int flags)
+int blk_mq_poll(struct request_queue *q, blk_qc_t cookie, struct io_comp_batch *iob,
+ unsigned int flags)
{
if (!(flags & BLK_POLL_NOSLEEP) &&
q->poll_nsec != BLK_MQ_POLL_CLASSIC) {
if (blk_mq_poll_hybrid(q, cookie))
return 1;
}
- return blk_mq_poll_classic(q, cookie, flags);
+ return blk_mq_poll_classic(q, cookie, iob, flags);
}
unsigned int blk_mq_rq_cpu(struct request *rq)