]> git.itanic.dy.fi Git - linux-stable/commitdiff
nvme: fix handling single range discard request
authorMing Lei <ming.lei@redhat.com>
Fri, 3 Mar 2023 23:13:45 +0000 (07:13 +0800)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Wed, 22 Mar 2023 12:31:27 +0000 (13:31 +0100)
[ Upstream commit 37f0dc2ec78af0c3f35dd05578763de059f6fe77 ]

When investigating one customer report on warning in nvme_setup_discard,
we observed the controller(nvme/tcp) actually exposes
queue_max_discard_segments(req->q) == 1.

Obviously the current code can't handle this situation, since contiguity
merge like normal RW request is taken.

Fix the issue by building range from request sector/nr_sectors directly.

Fixes: b35ba01ea697 ("nvme: support ranged discard requests")
Signed-off-by: Ming Lei <ming.lei@redhat.com>
Reviewed-by: Chaitanya Kulkarni <kch@nvidia.com>
Signed-off-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Sasha Levin <sashal@kernel.org>
drivers/nvme/host/core.c

index 06750f3d527451a6b90231e6350cc90a7c112cb0..ef9d7a795b0077409a21973963b5216206e33908 100644 (file)
@@ -853,16 +853,26 @@ static blk_status_t nvme_setup_discard(struct nvme_ns *ns, struct request *req,
                range = page_address(ns->ctrl->discard_page);
        }
 
-       __rq_for_each_bio(bio, req) {
-               u64 slba = nvme_sect_to_lba(ns, bio->bi_iter.bi_sector);
-               u32 nlb = bio->bi_iter.bi_size >> ns->lba_shift;
-
-               if (n < segments) {
-                       range[n].cattr = cpu_to_le32(0);
-                       range[n].nlb = cpu_to_le32(nlb);
-                       range[n].slba = cpu_to_le64(slba);
+       if (queue_max_discard_segments(req->q) == 1) {
+               u64 slba = nvme_sect_to_lba(ns, blk_rq_pos(req));
+               u32 nlb = blk_rq_sectors(req) >> (ns->lba_shift - 9);
+
+               range[0].cattr = cpu_to_le32(0);
+               range[0].nlb = cpu_to_le32(nlb);
+               range[0].slba = cpu_to_le64(slba);
+               n = 1;
+       } else {
+               __rq_for_each_bio(bio, req) {
+                       u64 slba = nvme_sect_to_lba(ns, bio->bi_iter.bi_sector);
+                       u32 nlb = bio->bi_iter.bi_size >> ns->lba_shift;
+
+                       if (n < segments) {
+                               range[n].cattr = cpu_to_le32(0);
+                               range[n].nlb = cpu_to_le32(nlb);
+                               range[n].slba = cpu_to_le64(slba);
+                       }
+                       n++;
                }
-               n++;
        }
 
        if (WARN_ON_ONCE(n != segments)) {