[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[qemu-xen master] virtio-pci: fix virtio_pci_queue_enabled()



commit 0c9753ebda274b0e618d7b4032bb2d83d27483ed
Author:     Laurent Vivier <lvivier@xxxxxxxxxx>
AuthorDate: Mon Jul 27 17:33:19 2020 +0200
Commit:     Michael S. Tsirkin <mst@xxxxxxxxxx>
CommitDate: Mon Jul 27 11:34:50 2020 -0400

    virtio-pci: fix virtio_pci_queue_enabled()
    
    In legacy mode, virtio_pci_queue_enabled() falls back to
    virtio_queue_enabled() to know if the queue is enabled.
    
    But virtio_queue_enabled() calls again virtio_pci_queue_enabled()
    if k->queue_enabled is set. This ends in a crash after a stack
    overflow.
    
    The problem can be reproduced with
    "-device virtio-net-pci,disable-legacy=off,disable-modern=true
     -net tap,vhost=on"
    
    And a look to the backtrace is very explicit:
    
        ...
        #4  0x000000010029a438 in virtio_queue_enabled ()
        #5  0x0000000100497a9c in virtio_pci_queue_enabled ()
        ...
        #130902 0x000000010029a460 in virtio_queue_enabled ()
        #130903 0x0000000100497a9c in virtio_pci_queue_enabled ()
        #130904 0x000000010029a460 in virtio_queue_enabled ()
        #130905 0x0000000100454a20 in vhost_net_start ()
        ...
    
    This patch fixes the problem by introducing a new function
    for the legacy case and calls it from virtio_pci_queue_enabled().
    It also calls it from virtio_queue_enabled() to avoid code duplication.
    
    Fixes: f19bcdfedd53 ("virtio-pci: implement queue_enabled method")
    Cc: Jason Wang <jasowang@xxxxxxxxxx>
    Cc: Cindy Lu <lulu@xxxxxxxxxx>
    CC: Michael S. Tsirkin <mst@xxxxxxxxxx>
    Signed-off-by: Laurent Vivier <lvivier@xxxxxxxxxx>
    Message-Id: <20200727153319.43716-1-lvivier@xxxxxxxxxx>
    Reviewed-by: Michael S. Tsirkin <mst@xxxxxxxxxx>
    Signed-off-by: Michael S. Tsirkin <mst@xxxxxxxxxx>
---
 hw/virtio/virtio-pci.c     | 2 +-
 hw/virtio/virtio.c         | 7 ++++++-
 include/hw/virtio/virtio.h | 1 +
 3 files changed, 8 insertions(+), 2 deletions(-)

diff --git a/hw/virtio/virtio-pci.c b/hw/virtio/virtio-pci.c
index ada1101d07..4ad3ad81a2 100644
--- a/hw/virtio/virtio-pci.c
+++ b/hw/virtio/virtio-pci.c
@@ -1116,7 +1116,7 @@ static bool virtio_pci_queue_enabled(DeviceState *d, int 
n)
         return proxy->vqs[vdev->queue_sel].enabled;
     }
 
-    return virtio_queue_enabled(vdev, n);
+    return virtio_queue_enabled_legacy(vdev, n);
 }
 
 static int virtio_pci_add_mem_cap(VirtIOPCIProxy *proxy,
diff --git a/hw/virtio/virtio.c b/hw/virtio/virtio.c
index 546a198e79..e983025217 100644
--- a/hw/virtio/virtio.c
+++ b/hw/virtio/virtio.c
@@ -3309,6 +3309,11 @@ hwaddr virtio_queue_get_desc_addr(VirtIODevice *vdev, 
int n)
     return vdev->vq[n].vring.desc;
 }
 
+bool virtio_queue_enabled_legacy(VirtIODevice *vdev, int n)
+{
+    return virtio_queue_get_desc_addr(vdev, n) != 0;
+}
+
 bool virtio_queue_enabled(VirtIODevice *vdev, int n)
 {
     BusState *qbus = qdev_get_parent_bus(DEVICE(vdev));
@@ -3317,7 +3322,7 @@ bool virtio_queue_enabled(VirtIODevice *vdev, int n)
     if (k->queue_enabled) {
         return k->queue_enabled(qbus->parent, n);
     }
-    return virtio_queue_get_desc_addr(vdev, n) != 0;
+    return virtio_queue_enabled_legacy(vdev, n);
 }
 
 hwaddr virtio_queue_get_avail_addr(VirtIODevice *vdev, int n)
diff --git a/include/hw/virtio/virtio.h b/include/hw/virtio/virtio.h
index 198ffc7626..e424df12cf 100644
--- a/include/hw/virtio/virtio.h
+++ b/include/hw/virtio/virtio.h
@@ -295,6 +295,7 @@ typedef struct VirtIORNGConf VirtIORNGConf;
                       VIRTIO_F_RING_PACKED, false)
 
 hwaddr virtio_queue_get_desc_addr(VirtIODevice *vdev, int n);
+bool virtio_queue_enabled_legacy(VirtIODevice *vdev, int n);
 bool virtio_queue_enabled(VirtIODevice *vdev, int n);
 hwaddr virtio_queue_get_avail_addr(VirtIODevice *vdev, int n);
 hwaddr virtio_queue_get_used_addr(VirtIODevice *vdev, int n);
--
generated by git-patchbot for /home/xen/git/qemu-xen.git#master



 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.