[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Minios-devel] [UNIKRAFT PATCH v3 01/14] plat/virtio: Release an individual virtqueue



This patch introduces the release of an individual virtqueue.

Signed-off-by: Roxana Nicolescu <nicolescu.roxana1996@xxxxxxxxx>
---
 plat/drivers/include/virtio/virtio_bus.h | 21 +++++++++++++++++++
 plat/drivers/virtio/virtio_pci.c         | 26 ++++++++++++++++++++++++
 2 files changed, 47 insertions(+)

diff --git a/plat/drivers/include/virtio/virtio_bus.h 
b/plat/drivers/include/virtio/virtio_bus.h
index fcea5d63..56f0dafe 100644
--- a/plat/drivers/include/virtio/virtio_bus.h
+++ b/plat/drivers/include/virtio/virtio_bus.h
@@ -104,6 +104,8 @@ struct virtio_config_ops {
                                      __u16 queue_id,
                                      virtqueue_callback_t callback,
                                      struct uk_alloc *a);
+       void (*vq_release)(struct virtio_dev *vdev, struct virtqueue *vq,
+                               struct uk_alloc *a);
 };
 
 /**
@@ -316,6 +318,25 @@ static inline struct virtqueue *virtio_vqueue_setup(struct 
virtio_dev *vdev,
        return vq;
 }
 
+/**
+ * A helper function to release an individual virtqueue.
+ * @param vdev
+ *     Reference to the virtio device.
+ * @param vq
+ *     Reference to the virtqueue.
+ * @param a
+ *     A reference to the allocator.
+ */
+static inline void virtio_vqueue_release(struct virtio_dev *vdev,
+               struct virtqueue *vq, struct uk_alloc *a)
+{
+       UK_ASSERT(vdev);
+       UK_ASSERT(vq);
+       UK_ASSERT(a);
+       if (likely(vdev->cops->vq_release))
+               vdev->cops->vq_release(vdev, vq, a);
+}
+
 static inline int virtio_has_features(__u64 features, __u8 bpos)
 {
        __u64 tmp_feature = 0;
diff --git a/plat/drivers/virtio/virtio_pci.c b/plat/drivers/virtio/virtio_pci.c
index e398bfc6..c0c9032f 100644
--- a/plat/drivers/virtio/virtio_pci.c
+++ b/plat/drivers/virtio/virtio_pci.c
@@ -92,6 +92,8 @@ static struct virtqueue *vpci_legacy_vq_setup(struct 
virtio_dev *vdev,
                                              __u16 num_desc,
                                              virtqueue_callback_t callback,
                                              struct uk_alloc *a);
+static void vpci_legacy_vq_release(struct virtio_dev *vdev,
+               struct virtqueue *vq, struct uk_alloc *a);
 static int virtio_pci_handle(void *arg);
 static int vpci_legacy_notify(struct virtio_dev *vdev, __u16 queue_id);
 static int virtio_pci_legacy_add_dev(struct pci_device *pci_dev,
@@ -110,6 +112,7 @@ static struct virtio_config_ops vpci_legacy_ops = {
        .status_set   = vpci_legacy_pci_status_set,
        .vqs_find     = vpci_legacy_pci_vq_find,
        .vq_setup     = vpci_legacy_vq_setup,
+       .vq_release   = vpci_legacy_vq_release,
 };
 
 static int vpci_legacy_notify(struct virtio_dev *vdev, __u16 queue_id)
@@ -188,6 +191,29 @@ err_exit:
        return vq;
 }
 
+static void vpci_legacy_vq_release(struct virtio_dev *vdev,
+               struct virtqueue *vq, struct uk_alloc *a)
+{
+       struct virtio_pci_dev *vpdev = NULL;
+       long flags;
+
+       UK_ASSERT(vq != NULL);
+       UK_ASSERT(a != NULL);
+       vpdev = to_virtiopcidev(vdev);
+
+       /* Select and deactivate the queue */
+       virtio_cwrite16((void *)(unsigned long)vpdev->pci_base_addr,
+                       VIRTIO_PCI_QUEUE_SEL, vq->queue_id);
+       virtio_cwrite32((void *)(unsigned long)vpdev->pci_base_addr,
+                       VIRTIO_PCI_QUEUE_PFN, 0);
+
+       flags = ukplat_lcpu_save_irqf();
+       UK_TAILQ_REMOVE(&vpdev->vdev.vqs, vq, next);
+       ukplat_lcpu_restore_irqf(flags);
+
+       virtqueue_destroy(vq, a);
+}
+
 static int vpci_legacy_pci_vq_find(struct virtio_dev *vdev, __u16 num_vqs,
                                   __u16 *qdesc_size)
 {
-- 
2.17.1


_______________________________________________
Minios-devel mailing list
Minios-devel@xxxxxxxxxxxxxxxxxxxx
https://lists.xenproject.org/mailman/listinfo/minios-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.