[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] [qemu-xen master] xen_disk: split discard input to match internal representation



commit 7875efb9f641ed0e79320bf258ee69cd0bf03716
Author:     Olaf Hering <olaf@xxxxxxxxx>
AuthorDate: Wed Nov 23 10:39:12 2016 +0000
Commit:     Stefano Stabellini <sstabellini@xxxxxxxxxx>
CommitDate: Wed Nov 23 10:47:48 2016 -0800

    xen_disk: split discard input to match internal representation
    
    The guest sends discard requests as u64 sector/count pairs, but the
    block layer operates internally with s64/s32 pairs. The conversion
    leads to IO errors in the guest, the discard request is not processed.
    
      domU.cfg:
      'vdev=xvda, format=qcow2, backendtype=qdisk, target=/x.qcow2'
      domU:
      mkfs.ext4 -F /dev/xvda
      Discarding device blocks: failed - Input/output error
    
    Fix this by splitting the request into chunks of BDRV_REQUEST_MAX_SECTORS.
    Add input range checking to avoid overflow.
    
    Fixes f313520 ("xen_disk: add discard support")
    
    Signed-off-by: Olaf Hering <olaf@xxxxxxxxx>
    Reviewed-by: Eric Blake <eblake@xxxxxxxxxx>
    Reviewed-by: Stefano Stabellini <sstabellini@xxxxxxxxxx>
---
 hw/block/xen_disk.c | 42 ++++++++++++++++++++++++++++++++++++------
 1 file changed, 36 insertions(+), 6 deletions(-)

diff --git a/hw/block/xen_disk.c b/hw/block/xen_disk.c
index 3a7dc19..456a2d5 100644
--- a/hw/block/xen_disk.c
+++ b/hw/block/xen_disk.c
@@ -660,6 +660,38 @@ static void qemu_aio_complete(void *opaque, int ret)
     qemu_bh_schedule(ioreq->blkdev->bh);
 }
 
+static bool blk_split_discard(struct ioreq *ioreq, blkif_sector_t 
sector_number,
+                              uint64_t nr_sectors)
+{
+    struct XenBlkDev *blkdev = ioreq->blkdev;
+    int64_t byte_offset;
+    int byte_chunk;
+    uint64_t byte_remaining, limit;
+    uint64_t sec_start = sector_number;
+    uint64_t sec_count = nr_sectors;
+
+    /* Wrap around, or overflowing byte limit? */
+    if (sec_start + sec_count < sec_count ||
+        sec_start + sec_count > INT64_MAX >> BDRV_SECTOR_BITS) {
+        return false;
+    }
+
+    limit = BDRV_REQUEST_MAX_SECTORS << BDRV_SECTOR_BITS;
+    byte_offset = sec_start << BDRV_SECTOR_BITS;
+    byte_remaining = sec_count << BDRV_SECTOR_BITS;
+
+    do {
+        byte_chunk = byte_remaining > limit ? limit : byte_remaining;
+        ioreq->aio_inflight++;
+        blk_aio_pdiscard(blkdev->blk, byte_offset, byte_chunk,
+                         qemu_aio_complete, ioreq);
+        byte_remaining -= byte_chunk;
+        byte_offset += byte_chunk;
+    } while (byte_remaining > 0);
+
+    return true;
+}
+
 static int ioreq_runio_qemu_aio(struct ioreq *ioreq)
 {
     struct XenBlkDev *blkdev = ioreq->blkdev;
@@ -708,12 +740,10 @@ static int ioreq_runio_qemu_aio(struct ioreq *ioreq)
         break;
     case BLKIF_OP_DISCARD:
     {
-        struct blkif_request_discard *discard_req = (void *)&ioreq->req;
-        ioreq->aio_inflight++;
-        blk_aio_pdiscard(blkdev->blk,
-                         discard_req->sector_number << BDRV_SECTOR_BITS,
-                         discard_req->nr_sectors << BDRV_SECTOR_BITS,
-                         qemu_aio_complete, ioreq);
+        struct blkif_request_discard *req = (void *)&ioreq->req;
+        if (!blk_split_discard(ioreq, req->sector_number, req->nr_sectors)) {
+            goto err;
+        }
         break;
     }
     default:
--
generated by git-patchbot for /home/xen/git/qemu-xen.git#master

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxx
https://lists.xenproject.org/xen-changelog

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.