void blk_execute_rq_nowait(struct gendisk *bd_disk, struct request *rq,
int at_head, rq_end_io_fn *done)
{
- __blk_execute_rq_nowait(bd_disk, rq, false, true, done);
+ __blk_execute_rq_nowait(bd_disk, rq, at_head, true, done);
}
EXPORT_SYMBOL_GPL(blk_execute_rq_nowait);
blk_mq_hctx_mark_pending(hctx, ctx);
}
+static void blk_add_rq_to_plug(struct blk_plug *plug, struct request *rq)
+{
+ list_add_tail(&rq->queuelist, &plug->mq_list);
+ plug->rq_count++;
+ if (!plug->multiple_queues && !list_is_singular(&plug->mq_list)) {
+ struct request *tmp;
+
+ tmp = list_first_entry(&plug->mq_list, struct request,
+ queuelist);
+ if (tmp->q != rq->q)
+ plug->multiple_queues = true;
+ }
+}
+
/**
* blk_mq_request_bypass_insert - Insert a request at dispatch list.
* @rq: Pointer to request to be inserted.
bool run_queue)
{
struct blk_mq_hw_ctx *hctx = rq->mq_hctx;
+ struct blk_plug *plug = current->plug;
- spin_lock(&hctx->lock);
- if (at_head)
- list_add(&rq->queuelist, &hctx->dispatch);
- else
- list_add_tail(&rq->queuelist, &hctx->dispatch);
- spin_unlock(&hctx->lock);
+ if (plug) {
+ blk_add_rq_to_plug(plug, rq);
+ } else {
+ spin_lock(&hctx->lock);
+ if (at_head)
+ list_add(&rq->queuelist, &hctx->dispatch);
+ else
+ list_add_tail(&rq->queuelist, &hctx->dispatch);
+ spin_unlock(&hctx->lock);
- if (run_queue)
- blk_mq_run_hw_queue(hctx, false);
+ if (run_queue)
+ blk_mq_run_hw_queue(hctx, false);
+ }
}
void blk_mq_insert_requests(struct blk_mq_hw_ctx *hctx, struct blk_mq_ctx *ctx,
hctx->queue->mq_ops->commit_rqs(hctx);
}
-static void blk_add_rq_to_plug(struct blk_plug *plug, struct request *rq)
-{
- list_add_tail(&rq->queuelist, &plug->mq_list);
- plug->rq_count++;
- if (!plug->multiple_queues && !list_is_singular(&plug->mq_list)) {
- struct request *tmp;
-
- tmp = list_first_entry(&plug->mq_list, struct request,
- queuelist);
- if (tmp->q != rq->q)
- plug->multiple_queues = true;
- }
-}
-
/*
* Allow 4x BLK_MAX_REQUEST_COUNT requests on plug queue for multiple
* queues. This is important for md arrays to benefit from merging
}
} else if ((q->nr_hw_queues > 1 && is_sync) ||
!data.hctx->dispatch_busy) {
+
/*
* There is no scheduler and we can try to send directly
* to the hardware.
return ret;
iter_count = iov_iter_count(&iter);
nr_segs = iter.nr_segs;
+ printk("segs here %d\n", (int)nr_segs);
if (!iter_count || (iter_count >> 9) > queue_max_hw_sectors(q))
return -EINVAL;
if (nr_segs > queue_max_segments(q))
return -EINVAL;
/* no iovecs to alloc, as we already have a BVEC iterator */
- bio = bio_kmalloc(gfp_mask, 0);
+ bio = bio_alloc(gfp_mask, 0);
if (!bio)
return -ENOMEM;