]> git.itanic.dy.fi Git - linux-stable/commitdiff
xen/blkfront: don't take local copy of a request from the ring page
authorJuergen Gross <jgross@suse.com>
Mon, 29 Nov 2021 12:11:11 +0000 (13:11 +0100)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Wed, 8 Dec 2021 07:45:04 +0000 (08:45 +0100)
commit 8f5a695d99000fc3aa73934d7ced33cfc64dcdab upstream.

In order to avoid a malicious backend being able to influence the local
copy of a request build the request locally first and then copy it to
the ring page instead of doing it the other way round as today.

Signed-off-by: Juergen Gross <jgross@suse.com>
Reviewed-by: Jan Beulich <jbeulich@suse.com>
Acked-by: Roger Pau Monné <roger.pau@citrix.com>
Link: https://lore.kernel.org/r/20210730103854.12681-3-jgross@suse.com
Signed-off-by: Juergen Gross <jgross@suse.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
drivers/block/xen-blkfront.c

index 7dbfc2da92ec1f16ff782c2c59d3d9c7506d7ac8..d477d04547c8977870a7aafbfd35b70591ac5e25 100644 (file)
@@ -532,7 +532,7 @@ static unsigned long blkif_ring_get_request(struct blkfront_ring_info *rinfo,
        rinfo->shadow[id].status = REQ_WAITING;
        rinfo->shadow[id].associated_id = NO_ASSOCIATED_ID;
 
-       (*ring_req)->u.rw.id = id;
+       rinfo->shadow[id].req.u.rw.id = id;
 
        return id;
 }
@@ -540,11 +540,12 @@ static unsigned long blkif_ring_get_request(struct blkfront_ring_info *rinfo,
 static int blkif_queue_discard_req(struct request *req, struct blkfront_ring_info *rinfo)
 {
        struct blkfront_info *info = rinfo->dev_info;
-       struct blkif_request *ring_req;
+       struct blkif_request *ring_req, *final_ring_req;
        unsigned long id;
 
        /* Fill out a communications ring structure. */
-       id = blkif_ring_get_request(rinfo, req, &ring_req);
+       id = blkif_ring_get_request(rinfo, req, &final_ring_req);
+       ring_req = &rinfo->shadow[id].req;
 
        ring_req->operation = BLKIF_OP_DISCARD;
        ring_req->u.discard.nr_sectors = blk_rq_sectors(req);
@@ -555,8 +556,8 @@ static int blkif_queue_discard_req(struct request *req, struct blkfront_ring_inf
        else
                ring_req->u.discard.flag = 0;
 
-       /* Keep a private copy so we can reissue requests when recovering. */
-       rinfo->shadow[id].req = *ring_req;
+       /* Copy the request to the ring page. */
+       *final_ring_req = *ring_req;
 
        return 0;
 }
@@ -689,6 +690,7 @@ static int blkif_queue_rw_req(struct request *req, struct blkfront_ring_info *ri
 {
        struct blkfront_info *info = rinfo->dev_info;
        struct blkif_request *ring_req, *extra_ring_req = NULL;
+       struct blkif_request *final_ring_req, *final_extra_ring_req = NULL;
        unsigned long id, extra_id = NO_ASSOCIATED_ID;
        bool require_extra_req = false;
        int i;
@@ -730,7 +732,8 @@ static int blkif_queue_rw_req(struct request *req, struct blkfront_ring_info *ri
                }
 
        /* Fill out a communications ring structure. */
-       id = blkif_ring_get_request(rinfo, req, &ring_req);
+       id = blkif_ring_get_request(rinfo, req, &final_ring_req);
+       ring_req = &rinfo->shadow[id].req;
 
        num_sg = blk_rq_map_sg(req->q, req, rinfo->shadow[id].sg);
        num_grant = 0;
@@ -781,7 +784,9 @@ static int blkif_queue_rw_req(struct request *req, struct blkfront_ring_info *ri
                ring_req->u.rw.nr_segments = num_grant;
                if (unlikely(require_extra_req)) {
                        extra_id = blkif_ring_get_request(rinfo, req,
-                                                         &extra_ring_req);
+                                                         &final_extra_ring_req);
+                       extra_ring_req = &rinfo->shadow[extra_id].req;
+
                        /*
                         * Only the first request contains the scatter-gather
                         * list.
@@ -823,10 +828,10 @@ static int blkif_queue_rw_req(struct request *req, struct blkfront_ring_info *ri
        if (setup.segments)
                kunmap_atomic(setup.segments);
 
-       /* Keep a private copy so we can reissue requests when recovering. */
-       rinfo->shadow[id].req = *ring_req;
+       /* Copy request(s) to the ring page. */
+       *final_ring_req = *ring_req;
        if (unlikely(require_extra_req))
-               rinfo->shadow[extra_id].req = *extra_ring_req;
+               *final_extra_ring_req = *extra_ring_req;
 
        if (max_grefs > 0)
                gnttab_free_grant_references(setup.gref_head);