foo nvme-passthru-wip
authorJens Axboe <axboe@kernel.dk>
Mon, 27 Sep 2021 22:26:17 +0000 (16:26 -0600)
committerJens Axboe <axboe@kernel.dk>
Mon, 27 Sep 2021 22:26:17 +0000 (16:26 -0600)
Signed-off-by: Jens Axboe <axboe@kernel.dk>
block/blk-exec.c
block/blk-mq.c
drivers/nvme/host/ioctl.c
drivers/nvme/host/pci.c

index 5fa3d735b9c8ba811c1484b172edddf47412c5ab..44c4a2ffa7087e4e785493790ec2339110c863b3 100644 (file)
@@ -66,7 +66,7 @@ EXPORT_SYMBOL_GPL(__blk_execute_rq_nowait);
 void blk_execute_rq_nowait(struct gendisk *bd_disk, struct request *rq,
                           int at_head, rq_end_io_fn *done)
 {
-       __blk_execute_rq_nowait(bd_disk, rq, false, true, done);
+       __blk_execute_rq_nowait(bd_disk, rq, at_head, true, done);
 }
 EXPORT_SYMBOL_GPL(blk_execute_rq_nowait);
 
index ce94adc5118f43582b75899c6278b32a6eef4ab0..9ef6f7bb3ca39fecbdf074733cd6d2ec14d6acae 100644 (file)
@@ -1840,6 +1840,20 @@ void __blk_mq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq,
        blk_mq_hctx_mark_pending(hctx, ctx);
 }
 
+static void blk_add_rq_to_plug(struct blk_plug *plug, struct request *rq)
+{
+       list_add_tail(&rq->queuelist, &plug->mq_list);
+       plug->rq_count++;
+       if (!plug->multiple_queues && !list_is_singular(&plug->mq_list)) {
+               struct request *tmp;
+
+               tmp = list_first_entry(&plug->mq_list, struct request,
+                                               queuelist);
+               if (tmp->q != rq->q)
+                       plug->multiple_queues = true;
+       }
+}
+
 /**
  * blk_mq_request_bypass_insert - Insert a request at dispatch list.
  * @rq: Pointer to request to be inserted.
@@ -1853,16 +1867,21 @@ void blk_mq_request_bypass_insert(struct request *rq, bool at_head,
                                  bool run_queue)
 {
        struct blk_mq_hw_ctx *hctx = rq->mq_hctx;
+       struct blk_plug *plug = current->plug;
 
-       spin_lock(&hctx->lock);
-       if (at_head)
-               list_add(&rq->queuelist, &hctx->dispatch);
-       else
-               list_add_tail(&rq->queuelist, &hctx->dispatch);
-       spin_unlock(&hctx->lock);
+       if (plug) {
+               blk_add_rq_to_plug(plug, rq);
+       } else {
+               spin_lock(&hctx->lock);
+               if (at_head)
+                       list_add(&rq->queuelist, &hctx->dispatch);
+               else
+                       list_add_tail(&rq->queuelist, &hctx->dispatch);
+               spin_unlock(&hctx->lock);
 
-       if (run_queue)
-               blk_mq_run_hw_queue(hctx, false);
+               if (run_queue)
+                       blk_mq_run_hw_queue(hctx, false);
+       }
 }
 
 void blk_mq_insert_requests(struct blk_mq_hw_ctx *hctx, struct blk_mq_ctx *ctx,
@@ -2121,20 +2140,6 @@ void blk_mq_try_issue_list_directly(struct blk_mq_hw_ctx *hctx,
                hctx->queue->mq_ops->commit_rqs(hctx);
 }
 
-static void blk_add_rq_to_plug(struct blk_plug *plug, struct request *rq)
-{
-       list_add_tail(&rq->queuelist, &plug->mq_list);
-       plug->rq_count++;
-       if (!plug->multiple_queues && !list_is_singular(&plug->mq_list)) {
-               struct request *tmp;
-
-               tmp = list_first_entry(&plug->mq_list, struct request,
-                                               queuelist);
-               if (tmp->q != rq->q)
-                       plug->multiple_queues = true;
-       }
-}
-
 /*
  * Allow 4x BLK_MAX_REQUEST_COUNT requests on plug queue for multiple
  * queues. This is important for md arrays to benefit from merging
@@ -2278,6 +2283,7 @@ blk_qc_t blk_mq_submit_bio(struct bio *bio)
                }
        } else if ((q->nr_hw_queues > 1 && is_sync) ||
                        !data.hctx->dispatch_busy) {
+
                /*
                 * There is no scheduler and we can try to send directly
                 * to the hardware.
index 0d93a79900e4cae778d674aa7daabe8adc3e9be1..7040090a745a44f31294527798fcf53af3e5f5f8 100644 (file)
@@ -171,13 +171,14 @@ int nvme_rq_map_user_fixedb(struct request_queue *q, struct request *rq,
                return ret;
        iter_count = iov_iter_count(&iter);
        nr_segs = iter.nr_segs;
+       printk("segs here %d\n", (int)nr_segs);
 
        if (!iter_count || (iter_count >> 9) > queue_max_hw_sectors(q))
                return -EINVAL;
        if (nr_segs > queue_max_segments(q))
                return -EINVAL;
        /* no iovecs to alloc, as we already have a BVEC iterator */
-       bio = bio_kmalloc(gfp_mask, 0);
+       bio = bio_alloc(gfp_mask, 0);
        if (!bio)
                return -ENOMEM;
 
index b82492cd750330803f48e58f6c3e1a3d7d7daaff..69af7555ff30ec1127f6872a8cc3a4a0442bd4ec 100644 (file)
@@ -857,6 +857,8 @@ static blk_status_t nvme_map_data(struct nvme_dev *dev, struct request *req,
                }
        }
 
+       if (!strncmp(current->comm, "fio", 3))
+               printk("segs %d\n", blk_rq_nr_phys_segments(req));
        iod->dma_len = 0;
        iod->sg = mempool_alloc(dev->iod_mempool, GFP_ATOMIC);
        if (!iod->sg)