nvme: support ranged discard requests
authorChristoph Hellwig <hch@lst.de>
Wed, 8 Feb 2017 13:46:50 +0000 (14:46 +0100)
committerJens Axboe <axboe@fb.com>
Wed, 8 Feb 2017 20:43:10 +0000 (13:43 -0700)
NVMe supports up to 256 ranges per DSM command, so wire up support
for ranged discards up to that limit.

Signed-off-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Jens Axboe <axboe@fb.com>
drivers/nvme/host/core.c
include/linux/nvme.h

index 1640a5c8abbb599e6cc87a61a75f79f1ebeec730..2701c21d1719febf225c9d33ab10a919640beeb8 100644 (file)
@@ -238,26 +238,38 @@ static inline void nvme_setup_flush(struct nvme_ns *ns,
 static inline int nvme_setup_discard(struct nvme_ns *ns, struct request *req,
                struct nvme_command *cmnd)
 {
+       unsigned short segments = blk_rq_nr_discard_segments(req), n = 0;
        struct nvme_dsm_range *range;
-       unsigned int nr_bytes = blk_rq_bytes(req);
+       struct bio *bio;
 
-       range = kmalloc(sizeof(*range), GFP_ATOMIC);
+       range = kmalloc_array(segments, sizeof(*range), GFP_ATOMIC);
        if (!range)
                return BLK_MQ_RQ_QUEUE_BUSY;
 
-       range->cattr = cpu_to_le32(0);
-       range->nlb = cpu_to_le32(nr_bytes >> ns->lba_shift);
-       range->slba = cpu_to_le64(nvme_block_nr(ns, blk_rq_pos(req)));
+       __rq_for_each_bio(bio, req) {
+               u64 slba = nvme_block_nr(ns, bio->bi_iter.bi_sector);
+               u32 nlb = bio->bi_iter.bi_size >> ns->lba_shift;
+
+               range[n].cattr = cpu_to_le32(0);
+               range[n].nlb = cpu_to_le32(nlb);
+               range[n].slba = cpu_to_le64(slba);
+               n++;
+       }
+
+       if (WARN_ON_ONCE(n != segments)) {
+               kfree(range);
+               return BLK_MQ_RQ_QUEUE_ERROR;
+       }
 
        memset(cmnd, 0, sizeof(*cmnd));
        cmnd->dsm.opcode = nvme_cmd_dsm;
        cmnd->dsm.nsid = cpu_to_le32(ns->ns_id);
-       cmnd->dsm.nr = 0;
+       cmnd->dsm.nr = segments - 1;
        cmnd->dsm.attributes = cpu_to_le32(NVME_DSMGMT_AD);
 
        req->special_vec.bv_page = virt_to_page(range);
        req->special_vec.bv_offset = offset_in_page(range);
-       req->special_vec.bv_len = sizeof(*range);
+       req->special_vec.bv_len = sizeof(*range) * segments;
        req->rq_flags |= RQF_SPECIAL_PAYLOAD;
 
        return BLK_MQ_RQ_QUEUE_OK;
@@ -871,6 +883,9 @@ static void nvme_config_discard(struct nvme_ns *ns)
        struct nvme_ctrl *ctrl = ns->ctrl;
        u32 logical_block_size = queue_logical_block_size(ns->queue);
 
+       BUILD_BUG_ON(PAGE_SIZE / sizeof(struct nvme_dsm_range) <
+                       NVME_DSM_MAX_RANGES);
+
        if (ctrl->quirks & NVME_QUIRK_DISCARD_ZEROES)
                ns->queue->limits.discard_zeroes_data = 1;
        else
@@ -879,6 +894,7 @@ static void nvme_config_discard(struct nvme_ns *ns)
        ns->queue->limits.discard_alignment = logical_block_size;
        ns->queue->limits.discard_granularity = logical_block_size;
        blk_queue_max_discard_sectors(ns->queue, UINT_MAX);
+       blk_queue_max_discard_segments(ns->queue, NVME_DSM_MAX_RANGES);
        queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, ns->queue);
 }
 
index 3d1c6f1b15c9bd351fa04d4e9824ff83b5c0045a..3e2ed49c3ad8b54894ffc5e8cfa028d27930581d 100644 (file)
@@ -553,6 +553,8 @@ enum {
        NVME_DSMGMT_AD          = 1 << 2,
 };
 
+#define NVME_DSM_MAX_RANGES    256
+
 struct nvme_dsm_range {
        __le32                  cattr;
        __le32                  nlb;