nvme: fix handling single range discard request
authorMing Lei <ming.lei@redhat.com>
Fri, 3 Mar 2023 23:13:45 +0000 (07:13 +0800)
committerChristoph Hellwig <hch@lst.de>
Wed, 15 Mar 2023 13:58:51 +0000 (14:58 +0100)
When investigating one customer report on warning in nvme_setup_discard,
we observed the controller(nvme/tcp) actually exposes
queue_max_discard_segments(req->q) == 1.

Obviously the current code can't handle this situation, since contiguity
merge like normal RW request is taken.

Fix the issue by building range from request sector/nr_sectors directly.

Fixes: b35ba01ea697 ("nvme: support ranged discard requests")
Signed-off-by: Ming Lei <ming.lei@redhat.com>
Reviewed-by: Chaitanya Kulkarni <kch@nvidia.com>
Signed-off-by: Christoph Hellwig <hch@lst.de>
drivers/nvme/host/core.c

index c2730b116dc680eac04df43a81c81a3826c5da16..d4be525f8100acd6df91b9aa88e496ae656f2344 100644 (file)
@@ -781,16 +781,26 @@ static blk_status_t nvme_setup_discard(struct nvme_ns *ns, struct request *req,
                range = page_address(ns->ctrl->discard_page);
        }
 
-       __rq_for_each_bio(bio, req) {
-               u64 slba = nvme_sect_to_lba(ns, bio->bi_iter.bi_sector);
-               u32 nlb = bio->bi_iter.bi_size >> ns->lba_shift;
-
-               if (n < segments) {
-                       range[n].cattr = cpu_to_le32(0);
-                       range[n].nlb = cpu_to_le32(nlb);
-                       range[n].slba = cpu_to_le64(slba);
+       if (queue_max_discard_segments(req->q) == 1) {
+               u64 slba = nvme_sect_to_lba(ns, blk_rq_pos(req));
+               u32 nlb = blk_rq_sectors(req) >> (ns->lba_shift - 9);
+
+               range[0].cattr = cpu_to_le32(0);
+               range[0].nlb = cpu_to_le32(nlb);
+               range[0].slba = cpu_to_le64(slba);
+               n = 1;
+       } else {
+               __rq_for_each_bio(bio, req) {
+                       u64 slba = nvme_sect_to_lba(ns, bio->bi_iter.bi_sector);
+                       u32 nlb = bio->bi_iter.bi_size >> ns->lba_shift;
+
+                       if (n < segments) {
+                               range[n].cattr = cpu_to_le32(0);
+                               range[n].nlb = cpu_to_le32(nlb);
+                               range[n].slba = cpu_to_le64(slba);
+                       }
+                       n++;
                }
-               n++;
        }
 
        if (WARN_ON_ONCE(n != segments)) {