[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Minios-devel] [UNIKRAFT PATCH v2 6/7] plat/drivers: Interrupt handling for virtio-ring



This patch introduces the API for handling the interrupt on the virtio PCI
device. It provides handler for the virtqueue rings. The handler identifies
the virtio device owning the virtqueue and invokes the callback handler
configured by the virtio device.

Signed-off-by: Sharan Santhanam <sharan.santhanam@xxxxxxxxx>
---
 plat/drivers/include/virtio/virtqueue.h | 54 ++++++++++++++++++++++
 plat/drivers/virtio/virtio_pci.c        | 50 +++++++++++++++++++-
 plat/drivers/virtio/virtio_ring.c       | 82 +++++++++++++++++++++++++++++++++
 3 files changed, 184 insertions(+), 2 deletions(-)

diff --git a/plat/drivers/include/virtio/virtqueue.h 
b/plat/drivers/include/virtio/virtqueue.h
index a8a4bc0..27b2763 100644
--- a/plat/drivers/include/virtio/virtqueue.h
+++ b/plat/drivers/include/virtio/virtqueue.h
@@ -83,6 +83,19 @@ struct virtqueue {
 __phys_addr virtqueue_physaddr(struct virtqueue *vq);
 
 /**
+ * Ring interrupt handler. This function is invoked from the interrupt handler
+ * in the virtio device for interrupt specific to the ring.
+ *
+ * @param obj
+ *     Reference to the virtqueue.
+ *
+ * @return int
+ *     0, Interrupt was not for this virtqueue.
+ *     1, Virtqueue has handled the interrupt.
+ */
+int virtqueue_ring_interrupt(void *obj);
+
+/**
  * Negotiate with the virtqueue features.
  * @param feature_set
  *     The feature set the device request.
@@ -93,6 +106,17 @@ __phys_addr virtqueue_physaddr(struct virtqueue *vq);
 __u64 virtqueue_feature_negotiate(__u64 feature_set);
 
 /**
+ * Check if host notification is enabled.
+ *
+ * @param vq
+ *     Reference to the virtqueue.
+ * @return
+ *     Returns 1, host needs notification on new descriptors.
+ *             0, otherwise.
+ */
+int virtqueue_notify_enabled(struct virtqueue *vq);
+
+/**
  * Allocate a virtqueue.
  * @param queue_id
  *     The virtqueue hw id.
@@ -138,6 +162,36 @@ int virtqueue_is_full(struct virtqueue *vq);
  */
 void virtqueue_destroy(struct virtqueue *vq, struct uk_alloc *a);
 
+/**
+ * Disable interrupts on the virtqueue.
+ * @param vq
+ *      Reference to the virtqueue.
+ */
+void virtqueue_intr_disable(struct virtqueue *vq);
+
+/**
+ * Enable interrupts on the virtqueue.
+ * @param vq
+ *      Reference to the virtqueue
+ * @return
+ *     0, On successful enabling of interrupt.
+ *     1, More packet in the ring to be processed.
+ */
+int virtqueue_intr_enable(struct virtqueue *vq);
+
+/**
+ * Notify the host of an event.
+ * @param vq
+ *      Reference to the virtual queue.
+ */
+static inline void virtqueue_host_notify(struct virtqueue *vq)
+{
+       UK_ASSERT(vq);
+
+       if (vq->vq_notify_host && virtqueue_notify_enabled(vq))
+               vq->vq_notify_host(vq->vdev, vq->queue_id);
+}
+
 #ifdef __cplusplus
 }
 #endif /* __cplusplus */
diff --git a/plat/drivers/virtio/virtio_pci.c b/plat/drivers/virtio/virtio_pci.c
index 4e29e62..8503b53 100644
--- a/plat/drivers/virtio/virtio_pci.c
+++ b/plat/drivers/virtio/virtio_pci.c
@@ -96,6 +96,8 @@ static struct virtqueue *vpci_legacy_vq_setup(struct 
virtio_dev *vdev,
 static inline void virtio_device_id_add(struct virtio_dev *vdev,
                                        __u16 pci_dev_id,
                                        __u16 vpci_dev_id_start);
+static int virtio_pci_handle(void *arg);
+static int vpci_legacy_notify(struct virtio_dev *vdev, __u16 queue_id);
 static int virtio_pci_legacy_add_dev(struct pci_device *pci_dev,
                                     struct virtio_pci_dev *vpci_dev);
 
@@ -114,6 +116,43 @@ static struct virtio_config_ops vpci_legacy_ops = {
        .vq_setup     = vpci_legacy_vq_setup,
 };
 
+static int vpci_legacy_notify(struct virtio_dev *vdev, __u16 queue_id)
+{
+       struct virtio_pci_dev *vpdev;
+
+       UK_ASSERT(vdev);
+       vpdev = to_virtiopcidev(vdev);
+       virtio_cwrite16((void *)(unsigned long) vpdev->pci_base_addr,
+                       VIRTIO_PCI_QUEUE_NOTIFY, queue_id);
+
+       return 0;
+}
+
+static int virtio_pci_handle(void *arg)
+{
+       struct virtio_pci_dev *d = (struct virtio_pci_dev *) arg;
+       uint8_t isr_status;
+       struct virtqueue *vq;
+       int rc = 0;
+
+       UK_ASSERT(arg);
+
+       /* Reading the isr status is used to acknowledge the interrupt */
+       isr_status = virtio_cread8((void *)(unsigned long)d->pci_isr_addr, 0);
+       /* We don't support configuration interrupt on the device */
+       if (isr_status & VIRTIO_PCI_ISR_CONFIG) {
+               uk_pr_warn("Unsupported config change interrupt received on 
virtio-pci device %p\n",
+                          d);
+       }
+
+       if (isr_status & VIRTIO_PCI_ISR_HAS_INTR) {
+               UK_TAILQ_FOREACH(vq, &d->vdev.vqs, next) {
+                       rc |= virtqueue_ring_interrupt(vq);
+               }
+       }
+       return rc;
+}
+
 static struct virtqueue *vpci_legacy_vq_setup(struct virtio_dev *vdev,
                                              __u16 queue_id,
                                              __u16 num_desc,
@@ -129,7 +168,7 @@ static struct virtqueue *vpci_legacy_vq_setup(struct 
virtio_dev *vdev,
 
        vpdev = to_virtiopcidev(vdev);
        vq = virtqueue_create(queue_id, num_desc, VIRTIO_PCI_VRING_ALIGN,
-                             callback, NULL, vdev, a);
+                             callback, vpci_legacy_notify, vdev, a);
        if (PTRISERR(vq)) {
                uk_pr_err("Failed to create the virtqueue: %d\n",
                          PTR2ERR(vq));
@@ -157,11 +196,18 @@ static int vpci_legacy_pci_vq_find(struct virtio_dev 
*vdev, __u16 num_vqs,
                                   __u16 *qdesc_size)
 {
        struct virtio_pci_dev *vpdev = NULL;
-       int vq_cnt = 0, i = 0;
+       int vq_cnt = 0, i = 0, rc = 0;
 
        UK_ASSERT(vdev);
        vpdev = to_virtiopcidev(vdev);
 
+       /* Registering the interrupt for the queue */
+       rc = ukplat_irq_register(vpdev->pdev->irq, virtio_pci_handle, vpdev);
+       if (rc != 0) {
+               uk_pr_err("Failed to register the interrupt\n");
+               return rc;
+       }
+
        for (i = 0; i < num_vqs; i++) {
                virtio_cwrite16((void *) (unsigned long)vpdev->pci_base_addr,
                                VIRTIO_PCI_QUEUE_SEL, i);
diff --git a/plat/drivers/virtio/virtio_ring.c 
b/plat/drivers/virtio/virtio_ring.c
index ba91594..6012bd2 100644
--- a/plat/drivers/virtio/virtio_ring.c
+++ b/plat/drivers/virtio/virtio_ring.c
@@ -74,9 +74,74 @@ struct virtqueue_vring {
 /**
  * Static function Declaration(s).
  */
+static inline int virtqueue_hasdata(struct virtqueue_vring *vrq);
 static void virtqueue_vring_init(struct virtqueue_vring *vrq, __u16 nr_desc,
                                 __u16 align);
 
+void virtqueue_intr_disable(struct virtqueue *vq)
+{
+       struct virtqueue_vring *vrq;
+
+       UK_ASSERT(vq);
+
+       vrq = to_virtqueue_vring(vq);
+       vrq->vring.avail->flags |= (VRING_AVAIL_F_NO_INTERRUPT);
+}
+
+int virtqueue_intr_enable(struct virtqueue *vq)
+{
+       struct virtqueue_vring *vrq;
+       int rc = 0;
+
+       UK_ASSERT(vq);
+
+       vrq = to_virtqueue_vring(vq);
+       /* Check if there are no more packets enabled */
+       if (!virtqueue_hasdata(vrq)) {
+               if (vrq->vring.avail->flags | VRING_AVAIL_F_NO_INTERRUPT) {
+                       vrq->vring.avail->flags &=
+                               (~VRING_AVAIL_F_NO_INTERRUPT);
+                       /**
+                        * We enabled the interrupts. We ensure it using the
+                        * memory barrier and check if there are any further
+                        * data available in the queue. The check for data
+                        * after enabling the interrupt is to make sure we do
+                        * not miss any interrupt while transitioning to enable
+                        * interrupt. This is inline with the requirement from
+                        * virtio specification section 3.2.2
+                        */
+                       mb();
+                       /* Check if there are further descriptors */
+                       if (virtqueue_hasdata(vrq)) {
+                               virtqueue_intr_disable(vq);
+                               rc = 1;
+                       }
+               }
+       } else {
+               /**
+                * There are more packet in the virtqueue to be processed while
+                * the interrupt was disabled.
+                */
+               rc = 1;
+       }
+       return rc;
+}
+
+int virtqueue_notify_enabled(struct virtqueue *vq)
+{
+       struct virtqueue_vring *vrq;
+
+       UK_ASSERT(vq);
+       vrq = to_virtqueue_vring(vq);
+
+       return ((vrq->vring.used->flags & VRING_USED_F_NO_NOTIFY) == 0);
+}
+
+static inline int virtqueue_hasdata(struct virtqueue_vring *vrq)
+{
+       return (vrq->last_used_desc_idx != vrq->vring.used->idx);
+}
+
 /**
  * Driver implementation
  */
@@ -92,6 +157,23 @@ __u64 virtqueue_feature_negotiate(__u64 feature_set)
        return feature;
 }
 
+int virtqueue_ring_interrupt(void *obj)
+{
+       struct virtqueue_vring *vrq = NULL;
+       struct virtqueue *vq = (struct virtqueue *)obj;
+       int rc = 0;
+
+       UK_ASSERT(vq);
+
+       vrq = to_virtqueue_vring(vq);
+       if (!virtqueue_hasdata(vrq))
+               return rc;
+
+       if (likely(vq->vq_callback))
+               rc = vq->vq_callback(vq, vq->priv);
+       return rc;
+}
+
 __phys_addr virtqueue_physaddr(struct virtqueue *vq)
 {
        struct virtqueue_vring *vrq = NULL;
-- 
2.7.4


_______________________________________________
Minios-devel mailing list
Minios-devel@xxxxxxxxxxxxxxxxxxxx
https://lists.xenproject.org/mailman/listinfo/minios-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.