summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--block/blk-exec.c2
-rw-r--r--block/blk-mq.c50
-rw-r--r--drivers/nvme/host/ioctl.c3
-rw-r--r--drivers/nvme/host/pci.c2
4 files changed, 33 insertions, 24 deletions
diff --git a/block/blk-exec.c b/block/blk-exec.c
index 5fa3d735b9c8..44c4a2ffa708 100644
--- a/block/blk-exec.c
+++ b/block/blk-exec.c
@@ -66,7 +66,7 @@ EXPORT_SYMBOL_GPL(__blk_execute_rq_nowait);
void blk_execute_rq_nowait(struct gendisk *bd_disk, struct request *rq,
int at_head, rq_end_io_fn *done)
{
- __blk_execute_rq_nowait(bd_disk, rq, false, true, done);
+ __blk_execute_rq_nowait(bd_disk, rq, at_head, true, done);
}
EXPORT_SYMBOL_GPL(blk_execute_rq_nowait);
diff --git a/block/blk-mq.c b/block/blk-mq.c
index ce94adc5118f..9ef6f7bb3ca3 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -1840,6 +1840,20 @@ void __blk_mq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq,
blk_mq_hctx_mark_pending(hctx, ctx);
}
+static void blk_add_rq_to_plug(struct blk_plug *plug, struct request *rq)
+{
+ list_add_tail(&rq->queuelist, &plug->mq_list);
+ plug->rq_count++;
+ if (!plug->multiple_queues && !list_is_singular(&plug->mq_list)) {
+ struct request *tmp;
+
+ tmp = list_first_entry(&plug->mq_list, struct request,
+ queuelist);
+ if (tmp->q != rq->q)
+ plug->multiple_queues = true;
+ }
+}
+
/**
* blk_mq_request_bypass_insert - Insert a request at dispatch list.
* @rq: Pointer to request to be inserted.
@@ -1853,16 +1867,21 @@ void blk_mq_request_bypass_insert(struct request *rq, bool at_head,
bool run_queue)
{
struct blk_mq_hw_ctx *hctx = rq->mq_hctx;
+ struct blk_plug *plug = current->plug;
- spin_lock(&hctx->lock);
- if (at_head)
- list_add(&rq->queuelist, &hctx->dispatch);
- else
- list_add_tail(&rq->queuelist, &hctx->dispatch);
- spin_unlock(&hctx->lock);
+ if (plug) {
+ blk_add_rq_to_plug(plug, rq);
+ } else {
+ spin_lock(&hctx->lock);
+ if (at_head)
+ list_add(&rq->queuelist, &hctx->dispatch);
+ else
+ list_add_tail(&rq->queuelist, &hctx->dispatch);
+ spin_unlock(&hctx->lock);
- if (run_queue)
- blk_mq_run_hw_queue(hctx, false);
+ if (run_queue)
+ blk_mq_run_hw_queue(hctx, false);
+ }
}
void blk_mq_insert_requests(struct blk_mq_hw_ctx *hctx, struct blk_mq_ctx *ctx,
@@ -2121,20 +2140,6 @@ void blk_mq_try_issue_list_directly(struct blk_mq_hw_ctx *hctx,
hctx->queue->mq_ops->commit_rqs(hctx);
}
-static void blk_add_rq_to_plug(struct blk_plug *plug, struct request *rq)
-{
- list_add_tail(&rq->queuelist, &plug->mq_list);
- plug->rq_count++;
- if (!plug->multiple_queues && !list_is_singular(&plug->mq_list)) {
- struct request *tmp;
-
- tmp = list_first_entry(&plug->mq_list, struct request,
- queuelist);
- if (tmp->q != rq->q)
- plug->multiple_queues = true;
- }
-}
-
/*
* Allow 4x BLK_MAX_REQUEST_COUNT requests on plug queue for multiple
* queues. This is important for md arrays to benefit from merging
@@ -2278,6 +2283,7 @@ blk_qc_t blk_mq_submit_bio(struct bio *bio)
}
} else if ((q->nr_hw_queues > 1 && is_sync) ||
!data.hctx->dispatch_busy) {
+
/*
* There is no scheduler and we can try to send directly
* to the hardware.
diff --git a/drivers/nvme/host/ioctl.c b/drivers/nvme/host/ioctl.c
index 0d93a79900e4..7040090a745a 100644
--- a/drivers/nvme/host/ioctl.c
+++ b/drivers/nvme/host/ioctl.c
@@ -171,13 +171,14 @@ int nvme_rq_map_user_fixedb(struct request_queue *q, struct request *rq,
return ret;
iter_count = iov_iter_count(&iter);
nr_segs = iter.nr_segs;
+ printk("segs here %d\n", (int)nr_segs);
if (!iter_count || (iter_count >> 9) > queue_max_hw_sectors(q))
return -EINVAL;
if (nr_segs > queue_max_segments(q))
return -EINVAL;
/* no iovecs to alloc, as we already have a BVEC iterator */
- bio = bio_kmalloc(gfp_mask, 0);
+ bio = bio_alloc(gfp_mask, 0);
if (!bio)
return -ENOMEM;
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
index b82492cd7503..69af7555ff30 100644
--- a/drivers/nvme/host/pci.c
+++ b/drivers/nvme/host/pci.c
@@ -857,6 +857,8 @@ static blk_status_t nvme_map_data(struct nvme_dev *dev, struct request *req,
}
}
+ if (!strncmp(current->comm, "fio", 3))
+ printk("segs %d\n", blk_rq_nr_phys_segments(req));
iod->dma_len = 0;
iod->sg = mempool_alloc(dev->iod_mempool, GFP_ATOMIC);
if (!iod->sg)