[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH 5/6] xen-blkfront: make local copy of response before using it



Data on the shared page can be changed at any time by the backend. Make
a local copy, which is no longer controlled by the backend. And only
then access it.

This is complementary to XSA155.

CC: stable@xxxxxxxxxxxxxxx
Signed-off-by: Marek Marczykowski-Górecki <marmarek@xxxxxxxxxxxxxxxxxxxxxx>
---
 drivers/block/xen-blkfront.c | 34 +++++++++++++++++-----------------
 1 file changed, 17 insertions(+), 17 deletions(-)

diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c
index 2a8e781..3926811 100644
--- a/drivers/block/xen-blkfront.c
+++ b/drivers/block/xen-blkfront.c
@@ -1549,7 +1549,7 @@ static bool blkif_completion(unsigned long *id,
 static irqreturn_t blkif_interrupt(int irq, void *dev_id)
 {
        struct request *req;
-       struct blkif_response *bret;
+       struct blkif_response bret;
        RING_IDX i, rp;
        unsigned long flags;
        struct blkfront_ring_info *rinfo = (struct blkfront_ring_info *)dev_id;
@@ -1566,8 +1566,8 @@ static irqreturn_t blkif_interrupt(int irq, void *dev_id)
        for (i = rinfo->ring.rsp_cons; i != rp; i++) {
                unsigned long id;
 
-               bret = RING_GET_RESPONSE(&rinfo->ring, i);
-               id   = bret->id;
+               RING_COPY_RESPONSE(&rinfo->ring, i, &bret);
+               id   = bret.id;
                /*
                 * The backend has messed up and given us an id that we would
                 * never have given to it (we stamp it up to BLK_RING_SIZE -
@@ -1575,39 +1575,39 @@ static irqreturn_t blkif_interrupt(int irq, void 
*dev_id)
                 */
                if (id >= BLK_RING_SIZE(info)) {
                        WARN(1, "%s: response to %s has incorrect id (%ld)\n",
-                            info->gd->disk_name, op_name(bret->operation), id);
+                            info->gd->disk_name, op_name(bret.operation), id);
                        /* We can't safely get the 'struct request' as
                         * the id is busted. */
                        continue;
                }
                req  = rinfo->shadow[id].request;
 
-               if (bret->operation != BLKIF_OP_DISCARD) {
+               if (bret.operation != BLKIF_OP_DISCARD) {
                        /*
                         * We may need to wait for an extra response if the
                         * I/O request is split in 2
                         */
-                       if (!blkif_completion(&id, rinfo, bret))
+                       if (!blkif_completion(&id, rinfo, &bret))
                                continue;
                }
 
                if (add_id_to_freelist(rinfo, id)) {
                        WARN(1, "%s: response to %s (id %ld) couldn't be 
recycled!\n",
-                            info->gd->disk_name, op_name(bret->operation), id);
+                            info->gd->disk_name, op_name(bret.operation), id);
                        continue;
                }
 
-               if (bret->status == BLKIF_RSP_OKAY)
+               if (bret.status == BLKIF_RSP_OKAY)
                        blkif_req(req)->error = BLK_STS_OK;
                else
                        blkif_req(req)->error = BLK_STS_IOERR;
 
-               switch (bret->operation) {
+               switch (bret.operation) {
                case BLKIF_OP_DISCARD:
-                       if (unlikely(bret->status == BLKIF_RSP_EOPNOTSUPP)) {
+                       if (unlikely(bret.status == BLKIF_RSP_EOPNOTSUPP)) {
                                struct request_queue *rq = info->rq;
                                printk(KERN_WARNING "blkfront: %s: %s op 
failed\n",
-                                          info->gd->disk_name, 
op_name(bret->operation));
+                                          info->gd->disk_name, 
op_name(bret.operation));
                                blkif_req(req)->error = BLK_STS_NOTSUPP;
                                info->feature_discard = 0;
                                info->feature_secdiscard = 0;
@@ -1617,15 +1617,15 @@ static irqreturn_t blkif_interrupt(int irq, void 
*dev_id)
                        break;
                case BLKIF_OP_FLUSH_DISKCACHE:
                case BLKIF_OP_WRITE_BARRIER:
-                       if (unlikely(bret->status == BLKIF_RSP_EOPNOTSUPP)) {
+                       if (unlikely(bret.status == BLKIF_RSP_EOPNOTSUPP)) {
                                printk(KERN_WARNING "blkfront: %s: %s op 
failed\n",
-                                      info->gd->disk_name, 
op_name(bret->operation));
+                                      info->gd->disk_name, 
op_name(bret.operation));
                                blkif_req(req)->error = BLK_STS_NOTSUPP;
                        }
-                       if (unlikely(bret->status == BLKIF_RSP_ERROR &&
+                       if (unlikely(bret.status == BLKIF_RSP_ERROR &&
                                     rinfo->shadow[id].req.u.rw.nr_segments == 
0)) {
                                printk(KERN_WARNING "blkfront: %s: empty %s op 
failed\n",
-                                      info->gd->disk_name, 
op_name(bret->operation));
+                                      info->gd->disk_name, 
op_name(bret.operation));
                                blkif_req(req)->error = BLK_STS_NOTSUPP;
                        }
                        if (unlikely(blkif_req(req)->error)) {
@@ -1638,9 +1638,9 @@ static irqreturn_t blkif_interrupt(int irq, void *dev_id)
                        /* fall through */
                case BLKIF_OP_READ:
                case BLKIF_OP_WRITE:
-                       if (unlikely(bret->status != BLKIF_RSP_OKAY))
+                       if (unlikely(bret.status != BLKIF_RSP_OKAY))
                                dev_dbg(&info->xbdev->dev, "Bad return from 
blkdev data "
-                                       "request: %x\n", bret->status);
+                                       "request: %x\n", bret.status);
 
                        break;
                default:
-- 
git-series 0.9.1

_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxxx
https://lists.xenproject.org/mailman/listinfo/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.