]> git.itanic.dy.fi Git - linux-stable/commitdiff
blk-mq: make sure active queue usage is held for bio_integrity_prep()
authorChristoph Hellwig <hch@infradead.org>
Mon, 13 Nov 2023 03:52:31 +0000 (11:52 +0800)
committerJens Axboe <axboe@kernel.dk>
Mon, 13 Nov 2023 15:52:52 +0000 (08:52 -0700)
blk_integrity_unregister() can come if queue usage counter isn't held
for one bio with integrity prepared, so this request may be completed with
calling profile->complete_fn, then kernel panic.

Another constraint is that bio_integrity_prep() needs to be called
before bio merge.

Fix the issue by:

- call bio_integrity_prep() with one queue usage counter grabbed reliably

- call bio_integrity_prep() before bio merge

Fixes: 900e080752025f00 ("block: move queue enter logic into blk_mq_submit_bio()")
Reported-by: Yi Zhang <yi.zhang@redhat.com>
Cc: Christoph Hellwig <hch@lst.de>
Signed-off-by: Ming Lei <ming.lei@redhat.com>
Tested-by: Yi Zhang <yi.zhang@redhat.com>
Link: https://lore.kernel.org/r/20231113035231.2708053-1-ming.lei@redhat.com
Signed-off-by: Jens Axboe <axboe@kernel.dk>
block/blk-mq.c

index e2d11183f62e374f0f83fa56a4e642e50092cd0e..900c1be1fee188b03857a7e12b6fe6978f04a021 100644 (file)
@@ -2858,11 +2858,8 @@ static struct request *blk_mq_get_new_requests(struct request_queue *q,
        };
        struct request *rq;
 
-       if (unlikely(bio_queue_enter(bio)))
-               return NULL;
-
        if (blk_mq_attempt_bio_merge(q, bio, nsegs))
-               goto queue_exit;
+               return NULL;
 
        rq_qos_throttle(q, bio);
 
@@ -2878,35 +2875,23 @@ static struct request *blk_mq_get_new_requests(struct request_queue *q,
        rq_qos_cleanup(q, bio);
        if (bio->bi_opf & REQ_NOWAIT)
                bio_wouldblock_error(bio);
-queue_exit:
-       blk_queue_exit(q);
        return NULL;
 }
 
-static inline struct request *blk_mq_get_cached_request(struct request_queue *q,
-               struct blk_plug *plug, struct bio **bio, unsigned int nsegs)
+/* return true if this @rq can be used for @bio */
+static bool blk_mq_can_use_cached_rq(struct request *rq, struct blk_plug *plug,
+               struct bio *bio)
 {
-       struct request *rq;
-       enum hctx_type type, hctx_type;
+       enum hctx_type type = blk_mq_get_hctx_type(bio->bi_opf);
+       enum hctx_type hctx_type = rq->mq_hctx->type;
 
-       if (!plug)
-               return NULL;
-       rq = rq_list_peek(&plug->cached_rq);
-       if (!rq || rq->q != q)
-               return NULL;
+       WARN_ON_ONCE(rq_list_peek(&plug->cached_rq) != rq);
 
-       if (blk_mq_attempt_bio_merge(q, *bio, nsegs)) {
-               *bio = NULL;
-               return NULL;
-       }
-
-       type = blk_mq_get_hctx_type((*bio)->bi_opf);
-       hctx_type = rq->mq_hctx->type;
        if (type != hctx_type &&
            !(type == HCTX_TYPE_READ && hctx_type == HCTX_TYPE_DEFAULT))
-               return NULL;
-       if (op_is_flush(rq->cmd_flags) != op_is_flush((*bio)->bi_opf))
-               return NULL;
+               return false;
+       if (op_is_flush(rq->cmd_flags) != op_is_flush(bio->bi_opf))
+               return false;
 
        /*
         * If any qos ->throttle() end up blocking, we will have flushed the
@@ -2914,12 +2899,12 @@ static inline struct request *blk_mq_get_cached_request(struct request_queue *q,
         * before we throttle.
         */
        plug->cached_rq = rq_list_next(rq);
-       rq_qos_throttle(q, *bio);
+       rq_qos_throttle(rq->q, bio);
 
        blk_mq_rq_time_init(rq, 0);
-       rq->cmd_flags = (*bio)->bi_opf;
+       rq->cmd_flags = bio->bi_opf;
        INIT_LIST_HEAD(&rq->queuelist);
-       return rq;
+       return true;
 }
 
 static void bio_set_ioprio(struct bio *bio)
@@ -2949,7 +2934,7 @@ void blk_mq_submit_bio(struct bio *bio)
        struct blk_plug *plug = blk_mq_plug(bio);
        const int is_sync = op_is_sync(bio->bi_opf);
        struct blk_mq_hw_ctx *hctx;
-       struct request *rq;
+       struct request *rq = NULL;
        unsigned int nr_segs = 1;
        blk_status_t ret;
 
@@ -2960,20 +2945,36 @@ void blk_mq_submit_bio(struct bio *bio)
                        return;
        }
 
-       if (!bio_integrity_prep(bio))
-               return;
-
        bio_set_ioprio(bio);
 
-       rq = blk_mq_get_cached_request(q, plug, &bio, nr_segs);
-       if (!rq) {
-               if (!bio)
+       if (plug) {
+               rq = rq_list_peek(&plug->cached_rq);
+               if (rq && rq->q != q)
+                       rq = NULL;
+       }
+       if (rq) {
+               if (!bio_integrity_prep(bio))
                        return;
-               rq = blk_mq_get_new_requests(q, plug, bio, nr_segs);
-               if (unlikely(!rq))
+               if (blk_mq_attempt_bio_merge(q, bio, nr_segs))
                        return;
+               if (blk_mq_can_use_cached_rq(rq, plug, bio))
+                       goto done;
+               percpu_ref_get(&q->q_usage_counter);
+       } else {
+               if (unlikely(bio_queue_enter(bio)))
+                       return;
+               if (!bio_integrity_prep(bio))
+                       goto fail;
+       }
+
+       rq = blk_mq_get_new_requests(q, plug, bio, nr_segs);
+       if (unlikely(!rq)) {
+fail:
+               blk_queue_exit(q);
+               return;
        }
 
+done:
        trace_block_getrq(bio);
 
        rq_qos_track(q, rq, bio);