[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Minios-devel] [UNIKRAFT PATCH 5/7] plat/drivers: Reintroduce the virtio ring



This patch introduces the API to create destroy the virtio ring. The
configuration API for find the number of queues and setting up
individual queues for a device.

Signed-off-by: Sharan Santhanam <sharan.santhanam@xxxxxxxxx>
---
 plat/drivers/include/virtio/virtio.h    | 138 +++++++++++++++++++---
 plat/drivers/include/virtio/virtqueue.h | 145 +++++++++++++++++++++++
 plat/drivers/virtio/virtio_bus.c        |   6 +-
 plat/drivers/virtio/virtio_pci.c        | 147 ++++++++++++++++++------
 plat/drivers/virtio/virtio_ring.c       | 196 ++++++++++++++++++++++++++++++++
 plat/kvm/Config.uk                      |   1 +
 plat/kvm/Makefile.uk                    |   2 +
 7 files changed, 579 insertions(+), 56 deletions(-)
 create mode 100644 plat/drivers/include/virtio/virtqueue.h
 create mode 100644 plat/drivers/virtio/virtio_ring.c

diff --git a/plat/drivers/include/virtio/virtio.h 
b/plat/drivers/include/virtio/virtio.h
index b01faf6..1dfa495 100644
--- a/plat/drivers/include/virtio/virtio.h
+++ b/plat/drivers/include/virtio/virtio.h
@@ -42,6 +42,7 @@
 #include <uk/arch/lcpu.h>
 #include <uk/alloc.h>
 #include <virtio/virtio_config.h>
+#include <virtio/virtqueue.h>
 
 #ifdef __cplusplus
 extern "C" {
@@ -80,20 +81,27 @@ struct virtio_dev_id {
  */
 struct virtio_config_ops {
        /** Resetting the device */
-       void (*reset_device)(struct virtio_dev *vdev);
+       void (*device_reset)(struct virtio_dev *vdev);
        /** Set configuration option */
-       int (*set_config)(struct virtio_dev *vdev, __u16 offset,
-                       const void *buf, __u32 len);
+       int (*config_set)(struct virtio_dev *vdev, __u16 offset,
+                         const void *buf, __u32 len);
        /** Get configuration option */
-       int (*get_config)(struct virtio_dev *vdev, __u16 offset, void *buf,
-                       __u32 len, __u8 type_len);
+       int (*config_get)(struct virtio_dev *vdev, __u16 offset, void *buf,
+                         __u32 len, __u8 type_len);
        /** Get the feature */
-       __u64 (*get_features)(struct virtio_dev *vdev);
+       __u64 (*features_get)(struct virtio_dev *vdev);
        /** Set the feature */
-       void (*set_features)(struct virtio_dev *vdev, __u64 features);
+       void (*features_set)(struct virtio_dev *vdev, __u64 features);
        /** Get and Set Status */
-       __u8 (*get_status)(struct virtio_dev *vdev);
-       void (*set_status)(struct virtio_dev *vdev, __u8 status);
+       __u8 (*status_get)(struct virtio_dev *vdev);
+       void (*status_set)(struct virtio_dev *vdev, __u8 status);
+       /** Find the virtqueue */
+       int (*vqs_find)(struct virtio_dev *vdev, __u16 num_vq, __u16 *vq_size);
+       /** Setup the virtqueue */
+       struct virtqueue *(*vq_setup)(struct virtio_dev *vdev, __u16 num_desc,
+                                     __u16 queue_id,
+                                     virtqueue_callback_t callback,
+                                     struct uk_alloc *a);
 };
 
 /**
@@ -116,6 +124,8 @@ struct virtio_driver {
 struct virtio_dev {
        /* Feature bit describing the virtio device */
        __u64 features;
+       /* List of the virtqueue for the device */
+       UK_TAILQ_HEAD(virtqueue_head, struct virtqueue) vqs;
        /* Private data of the driver */
        void *priv;
        /* Virtio device identifier */
@@ -142,6 +152,7 @@ void _virtio_register_driver(struct virtio_driver *vdrv);
  *      The status to update.
  * @return
  *      0 on successful updating the status.
+ *      -ENOTSUP, if the operation is not supported on the virtio device.
  */
 static inline int virtio_dev_status_update(struct virtio_dev *vdev, __u8 
status)
 {
@@ -149,32 +160,63 @@ static inline int virtio_dev_status_update(struct 
virtio_dev *vdev, __u8 status)
 
        UK_ASSERT(vdev);
 
-       if (likely(vdev->cops->set_status)) {
-               vdev->cops->set_status(vdev, status);
+       if (likely(vdev->cops->status_set)) {
+               vdev->cops->status_set(vdev, status);
                rc = 0;
        }
        return rc;
 }
 
+/**
+ * The function to get the feature supported by the device.
+ * @param vdev
+ *     Reference to the virtio device.
+ *
+ * @return __u64
+ *     A bit map of the feature supported by the device.
+ */
 static inline __u64 virtio_feature_get(struct virtio_dev *vdev)
 {
        __u64 features = 0;
 
        UK_ASSERT(vdev);
 
-       if (likely(vdev->cops->get_features))
-               features = vdev->cops->get_features(vdev);
+       if (likely(vdev->cops->features_get))
+               features = vdev->cops->features_get(vdev);
        return features;
 }
 
+/**
+ * The function to set the negotiated features.
+ * @param vdev
+ *     Reference to the virtio device.
+ * @param feature
+ *     A bit map of the feature negotiated.
+ */
 static inline void virtio_feature_set(struct virtio_dev *vdev, __u32 feature)
 {
        UK_ASSERT(vdev);
 
-       if (likely(vdev->cops->set_features))
-               vdev->cops->set_features(vdev, feature);
+       if (likely(vdev->cops->features_set))
+               vdev->cops->features_set(vdev, feature);
 }
 
+/**
+ * Get the configuration information from the virtio device.
+ * @param vdev
+ *     Reference to the virtio device.
+ * @param offset
+ *     Offset into the virtio device configuration space.
+ * @param buf
+ *     A buffer to store the configuration information.
+ * @param len
+ *     The length of the buffer.
+ * @param type_len
+ *     The data type of the configuration data.
+ * @return int
+ *     0, on successful reading the configuration space.
+ *     < 0, on error.
+ */
 static inline int virtio_config_get(struct virtio_dev *vdev, __u16 offset,
                                    void *buf, __u32 len, __u8 type_len)
 {
@@ -182,12 +224,74 @@ static inline int virtio_config_get(struct virtio_dev 
*vdev, __u16 offset,
 
        UK_ASSERT(vdev);
 
-       if (likely(vdev->cops->get_config))
-               rc = vdev->cops->get_config(vdev, offset, buf, len, type_len);
+       if (likely(vdev->cops->config_get))
+               rc = vdev->cops->config_get(vdev, offset, buf, len, type_len);
+
+       return rc;
+}
+
+/**
+ * The helper function to find the number of the vqs supported on the device.
+ * @param vdev
+ *     A reference to the virtio device.
+ * @param total_vqs
+ *     The total number of virtqueues requested.
+ * @param vq_size
+ *     An array of max descriptors on each virtqueue found on the
+ *     virtio device
+ * @return int
+ *     On success, the function return the number of available virtqueues
+ *     On error,
+ *             -ENOTSUP if the function is not supported.
+ */
+static inline int virtio_find_vqs(struct virtio_dev *vdev, __u16 total_vqs,
+                                 __u16 *vq_size)
+{
+       int rc = -ENOTSUP;
+
+       UK_ASSERT(vdev);
+
+       if (likely(vdev->cops->vqs_find))
+               rc = vdev->cops->vqs_find(vdev, total_vqs, vq_size);
 
        return rc;
 }
 
+/**
+ * A helper function to setup an individual virtqueue.
+ * @param vdev
+ *     Reference to the virtio device.
+ * @param vq_id
+ *     The virtqueue queue id.
+ * @param nr_desc
+ *     The count of the descriptor to be configured.
+ * @param callback
+ *     A reference to callback function to invoked by the virtio device on an
+ *     interrupt from the virtqueue.
+ * @param a
+ *     A reference to the allocator.
+ *
+ * @return struct virtqueue *
+ *     On success, a reference to the virtqueue.
+ *     On error,
+ *             -ENOTSUP operation not supported on the device.
+ *             -ENOMEM  Failed allocating the virtqueue.
+ */
+static inline struct virtqueue *virtio_vqueue_setup(struct virtio_dev *vdev,
+                                           __u16 vq_id, __u16 nr_desc,
+                                           virtqueue_callback_t  callback,
+                                           struct uk_alloc *a)
+{
+       struct virtqueue *vq = ERR2PTR(-ENOTSUP);
+
+       UK_ASSERT(vdev && a);
+
+       if (likely(vdev->cops->vq_setup))
+               vq = vdev->cops->vq_setup(vdev, vq_id, nr_desc, callback, a);
+
+       return vq;
+}
+
 static inline int virtio_has_features(__u64 features, __u8 bpos)
 {
        __u64 tmp_feature = 0;
diff --git a/plat/drivers/include/virtio/virtqueue.h 
b/plat/drivers/include/virtio/virtqueue.h
new file mode 100644
index 0000000..a8a4bc0
--- /dev/null
+++ b/plat/drivers/include/virtio/virtqueue.h
@@ -0,0 +1,145 @@
+/* SPDX-License-Identifier: BSD-3-Clause */
+/*
+ * Authors: Sharan Santhanam <sharan.santhanam@xxxxxxxxx>
+ *
+ * Copyright (c) 2018, NEC Europe Ltd., NEC Corporation. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the copyright holder nor the names of its
+ *    contributors may be used to endorse or promote products derived from
+ *    this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ * THIS HEADER MAY NOT BE EXTRACTED OR MODIFIED IN ANY WAY.
+ */
+
+#ifndef __PLAT_DRV_VIRTQUEUE_H__
+#define __PLAT_DRV_VIRTQUEUE_H__
+
+#include <uk/config.h>
+#include <uk/list.h>
+#include <uk/sglist.h>
+#include <uk/arch/types.h>
+#include <virtio/virtio_ring.h>
+#include <virtio/virtio_config.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif /* __cplusplus */
+
+/**
+ * Type declarations
+ */
+struct virtqueue;
+struct virtio_dev;
+typedef int (*virtqueue_callback_t)(struct virtqueue *, void *priv);
+typedef int (*virtqueue_notify_host_t)(struct virtio_dev *, __u16 queue_nr);
+
+/**
+ * Structure to describe the virtqueue.
+ */
+struct virtqueue {
+       /* Reference the virtio_dev it belong to */
+       struct virtio_dev *vdev;
+       /* Virtqueue identifier */
+       __u16 queue_id;
+       /* Notify to the host */
+       virtqueue_notify_host_t vq_notify_host;
+       /* Callback from the virtqueue */
+       virtqueue_callback_t vq_callback;
+       /* Next entry of the queue */
+       UK_TAILQ_ENTRY(struct virtqueue) next;
+       /* Private data structure used by the driver of the queue */
+       void *priv;
+};
+
+/**
+ * Fetch the physical address of the descriptor ring.
+ * @param vq
+ *     Reference to the virtqueue.
+ *
+ * @return
+ *     Return the guest physical address of the vring.
+ */
+__phys_addr virtqueue_physaddr(struct virtqueue *vq);
+
+/**
+ * Negotiate with the virtqueue features.
+ * @param feature_set
+ *     The feature set the device request.
+ *
+ * @return __u64
+ *     The negotiated feature set.
+ */
+__u64 virtqueue_feature_negotiate(__u64 feature_set);
+
+/**
+ * Allocate a virtqueue.
+ * @param queue_id
+ *     The virtqueue hw id.
+ * @param nr_descs
+ *     The number of descriptor for the queue.
+ * @param align
+ *     The memory alignment for the ring memory.
+ * @param callback
+ *     A reference to callback to the virtio-dev.
+ * @param notify
+ *     A reference to notification function to the host.
+ * @param vdev:
+ *     A reference to the virtio device.
+ * @param  a:
+ *     A reference to the allocator.
+ *
+ * @return struct virtqueue *
+ *     On success, return a reference to the virtqueue.
+ *     On failure,
+ *                -ENOMEM: Failed to allocate the queue.
+ */
+struct virtqueue *virtqueue_create(__u16 queue_id, __u16 nr_descs, __u16 align,
+                                  virtqueue_callback_t callback,
+                                  virtqueue_notify_host_t notify,
+                                  struct virtio_dev *vdev, struct uk_alloc *a);
+
+/**
+ * Check the virtqueue if full.
+ * @param vq
+ *     A reference to the virtqueue.
+ * @return int
+ *     1 on full,
+ *     0 otherwise
+ */
+int virtqueue_is_full(struct virtqueue *vq);
+
+/*
+ * Destroy a virtual queue
+ * @param vq
+ *     A reference to the virtual queue
+ * @param a
+ *     Reference to the memory allocator
+ */
+void virtqueue_destroy(struct virtqueue *vq, struct uk_alloc *a);
+
+#ifdef __cplusplus
+}
+#endif /* __cplusplus */
+
+#endif /* __PLAT_DRV_VIRTQUEUE_H__ */
diff --git a/plat/drivers/virtio/virtio_bus.c b/plat/drivers/virtio/virtio_bus.c
index ef66ae8..b0ab9c8 100644
--- a/plat/drivers/virtio/virtio_bus.c
+++ b/plat/drivers/virtio/virtio_bus.c
@@ -92,8 +92,8 @@ static int virtio_device_reinit(struct virtio_dev *vdev)
         * This may not be necessary while initializing the device for the first
         * time.
         */
-       if (vdev->cops->reset_device) {
-               vdev->cops->reset_device(vdev);
+       if (vdev->cops->device_reset) {
+               vdev->cops->device_reset(vdev);
                /* Set the device status */
                vdev->status = VIRTIO_DEV_RESET;
        }
@@ -140,6 +140,8 @@ int virtio_register_device(struct virtio_dev *vdev)
                return rc;
        }
 
+       /* Initialize the virtqueue list */
+       UK_TAILQ_INIT(&vdev->vqs);
 
        /* Calling the driver add device */
        rc = drv->add_dev(vdev);
diff --git a/plat/drivers/virtio/virtio_pci.c b/plat/drivers/virtio/virtio_pci.c
index ab2f2c8..d07b011 100644
--- a/plat/drivers/virtio/virtio_pci.c
+++ b/plat/drivers/virtio/virtio_pci.c
@@ -35,16 +35,13 @@
 #include <uk/config.h>
 #include <uk/arch/types.h>
 #include <errno.h>
-#ifdef CONFIG_LIBUKALLOC
 #include <uk/alloc.h>
-#else
-#error "Virtio PCI device requires UKALLOC"
-#endif /* CONFIG_LIBUKALLOC */
 #include <uk/print.h>
 #include <uk/plat/lcpu.h>
 #include <uk/plat/irq.h>
 #include <pci/pci_bus.h>
 #include <virtio/virtio_config.h>
+#include <virtio/virtqueue.h>
 #include <virtio/virtio.h>
 #include <virtio/virtio_pci.h>
 
@@ -69,44 +66,118 @@ struct virtio_pci_dev {
 };
 
 /**
- * Fetch the virtio pci information from the virtiodev.
+ * Fetch the virtio pci information from the virtio device.
+ * @param vdev
+ *     Reference to the virtio device.
  */
 #define to_virtiopcidev(vdev) \
                __containerof(vdev, struct virtio_pci_dev, vdev)
 
-
 /**
  * Static function declaration.
  */
 static void vpci_legacy_pci_dev_reset(struct virtio_dev *vdev);
 static int vpci_legacy_pci_config_set(struct virtio_dev *vdev, __u16 offset,
-                               const void *buf, __u32 len);
+                                     const void *buf, __u32 len);
 static int vpci_legacy_pci_config_get(struct virtio_dev *vdev, __u16 offset,
-                                       void *buf, __u32 len,
-                                       __u8 type_len __maybe_unused);
+                                     void *buf, __u32 len,
+                                     __u8 type_len __maybe_unused);
 static __u64 vpci_legacy_pci_features_get(struct virtio_dev *vdev);
 static void vpci_legacy_pci_features_set(struct virtio_dev *vdev,
-                                               __u64 features);
+                                        __u64 features);
+static int vpci_legacy_pci_vq_find(struct virtio_dev *vdev, __u16 num_vq,
+                                  __u16 *qdesc_size);
 static void vpci_legacy_pci_status_set(struct virtio_dev *vdev, __u8 status);
 static __u8 vpci_legacy_pci_status_get(struct virtio_dev *vdev);
+static struct virtqueue *vpci_legacy_vq_setup(struct virtio_dev *vdev,
+                                             __u16 queue_id,
+                                             __u16 num_desc,
+                                             virtqueue_callback_t callback,
+                                             struct uk_alloc *a);
 static inline void virtio_device_id_add(struct virtio_dev *vdev,
-                               __u16 pci_dev_id, __u16 vpci_dev_id_start);
+                                       __u16 pci_dev_id,
+                                       __u16 vpci_dev_id_start);
 static int virtio_pci_legacy_add_dev(struct pci_device *pci_dev,
-               struct virtio_pci_dev *vpci_dev);
+                                    struct virtio_pci_dev *vpci_dev);
 
 /**
  * Configuration operations legacy PCI device.
  */
 static struct virtio_config_ops vpci_legacy_ops = {
-       .reset_device = vpci_legacy_pci_dev_reset,
-       .set_config   = vpci_legacy_pci_config_set,
-       .get_config   = vpci_legacy_pci_config_get,
-       .get_features = vpci_legacy_pci_features_get,
-       .set_features = vpci_legacy_pci_features_set,
-       .get_status   = vpci_legacy_pci_status_get,
-       .set_status   = vpci_legacy_pci_status_set,
+       .device_reset = vpci_legacy_pci_dev_reset,
+       .config_get   = vpci_legacy_pci_config_get,
+       .config_set   = vpci_legacy_pci_config_set,
+       .features_get = vpci_legacy_pci_features_get,
+       .features_set = vpci_legacy_pci_features_set,
+       .status_get   = vpci_legacy_pci_status_get,
+       .status_set   = vpci_legacy_pci_status_set,
+       .vqs_find     = vpci_legacy_pci_vq_find,
+       .vq_setup     = vpci_legacy_vq_setup,
 };
 
+static struct virtqueue *vpci_legacy_vq_setup(struct virtio_dev *vdev,
+                                             __u16 queue_id,
+                                             __u16 num_desc,
+                                             virtqueue_callback_t callback,
+                                             struct uk_alloc *a)
+{
+       struct virtio_pci_dev *vpdev = NULL;
+       struct virtqueue *vq;
+       __phys_addr addr;
+       long flags;
+
+       UK_ASSERT(vdev != NULL);
+
+       vpdev = to_virtiopcidev(vdev);
+       vq = virtqueue_create(queue_id, num_desc, VIRTIO_PCI_VRING_ALIGN,
+                             callback, NULL, vdev, a);
+       if (PTRISERR(vq)) {
+               uk_pr_err("Failed to create the virtqueue: %d\n",
+                               PTR2ERR(vq));
+               goto err_exit;
+       }
+
+       /* Physical address of the queue */
+       addr = virtqueue_physaddr(vq);
+       /* Select the queue of interest */
+       virtio_cwrite16((void *)(unsigned long)vpdev->pci_base_addr,
+                       VIRTIO_PCI_QUEUE_SEL, queue_id);
+       virtio_cwrite32((void *)(unsigned long)vpdev->pci_base_addr,
+                       VIRTIO_PCI_QUEUE_PFN,
+                       addr >> VIRTIO_PCI_QUEUE_ADDR_SHIFT);
+
+       flags = ukplat_lcpu_save_irqf();
+       UK_TAILQ_INSERT_TAIL(&vpdev->vdev.vqs, vq, next);
+       ukplat_lcpu_restore_irqf(flags);
+
+err_exit:
+       return vq;
+}
+
+static int vpci_legacy_pci_vq_find(struct virtio_dev *vdev, __u16 num_vqs,
+                                  __u16 *qdesc_size)
+{
+       struct virtio_pci_dev *vpdev = NULL;
+       int vq_cnt = 0, i = 0;
+
+       UK_ASSERT(vdev);
+       vpdev = to_virtiopcidev(vdev);
+
+       for (i = 0; i < num_vqs; i++) {
+               virtio_cwrite16((void *) (unsigned long)vpdev->pci_base_addr,
+                               VIRTIO_PCI_QUEUE_SEL, i);
+               qdesc_size[i] = virtio_cread16(
+                               (void *) (unsigned long)vpdev->pci_base_addr,
+                               VIRTIO_PCI_QUEUE_SIZE);
+               if (unlikely(!qdesc_size[i])) {
+                       uk_pr_err("Virtqueue %d not available\n", i);
+                       continue;
+               }
+               vq_cnt++;
+       }
+       return vq_cnt;
+}
+
 static int vpci_legacy_pci_config_set(struct virtio_dev *vdev, __u16 offset,
                                const void *buf, __u32 len)
 {
@@ -116,14 +187,14 @@ static int vpci_legacy_pci_config_set(struct virtio_dev 
*vdev, __u16 offset,
        vpdev = to_virtiopcidev(vdev);
 
        _virtio_cwrite_bytes((void *)(unsigned long)vpdev->pci_base_addr,
-                       VIRTIO_PCI_CONFIG_OFF + offset, buf, len, 1);
+                            VIRTIO_PCI_CONFIG_OFF + offset, buf, len, 1);
 
        return 0;
 }
 
 static int vpci_legacy_pci_config_get(struct virtio_dev *vdev, __u16 offset,
-                                       void *buf, __u32 len,
-                                       __u8 type_len __maybe_unused)
+                                     void *buf, __u32 len,
+                                     __u8 type_len __maybe_unused)
 {
        struct virtio_pci_dev *vpdev = NULL;
        int rc = 0;
@@ -152,7 +223,7 @@ static __u8 vpci_legacy_pci_status_get(struct virtio_dev 
*vdev)
        UK_ASSERT(vdev);
        vpdev = to_virtiopcidev(vdev);
        return virtio_cread8((void *) (unsigned long) vpdev->pci_base_addr,
-                       VIRTIO_PCI_STATUS);
+                            VIRTIO_PCI_STATUS);
 }
 
 static void vpci_legacy_pci_status_set(struct virtio_dev *vdev, __u8 status)
@@ -167,7 +238,7 @@ static void vpci_legacy_pci_status_set(struct virtio_dev 
*vdev, __u8 status)
        curr_status = vpci_legacy_pci_status_get(vdev);
        status |= curr_status;
        virtio_cwrite8((void *)(unsigned long) vpdev->pci_base_addr,
-                        VIRTIO_PCI_STATUS, status);
+                      VIRTIO_PCI_STATUS, status);
 }
 
 static void vpci_legacy_pci_dev_reset(struct virtio_dev *vdev)
@@ -182,7 +253,7 @@ static void vpci_legacy_pci_dev_reset(struct virtio_dev 
*vdev)
         * Resetting the device.
         */
        virtio_cwrite8((void *) (unsigned long)vpdev->pci_base_addr,
-                       VIRTIO_PCI_STATUS, VIRTIO_CONFIG_STATUS_RESET);
+                      VIRTIO_PCI_STATUS, VIRTIO_CONFIG_STATUS_RESET);
        /**
         * Waiting for the resetting the device. Find a better way
         * of doing this instead of repeating register read.
@@ -206,19 +277,21 @@ static __u64 vpci_legacy_pci_features_get(struct 
virtio_dev *vdev)
 
        vpdev = to_virtiopcidev(vdev);
        features = virtio_cread32((void *) (unsigned long)vpdev->pci_base_addr,
-                               VIRTIO_PCI_HOST_FEATURES);
+                                 VIRTIO_PCI_HOST_FEATURES);
        return features;
 }
 
 static void vpci_legacy_pci_features_set(struct virtio_dev *vdev,
-                                               __u64 features)
+                                        __u64 features)
 {
        struct virtio_pci_dev *vpdev = NULL;
 
        UK_ASSERT(vdev);
        vpdev = to_virtiopcidev(vdev);
+       /* Mask out features not supported by the virtqueue driver */
+       features = virtqueue_feature_negotiate(features);
        virtio_cwrite32((void *) (unsigned long)vpdev->pci_base_addr,
-                VIRTIO_PCI_GUEST_FEATURES, (__u32)features);
+                       VIRTIO_PCI_GUEST_FEATURES, (__u32)features);
 }
 
 static inline void virtio_device_id_add(struct virtio_dev *vdev,
@@ -232,8 +305,8 @@ static int virtio_pci_legacy_add_dev(struct pci_device 
*pci_dev,
 {
        /* Check the valid range of the virtio legacy device */
        if (pci_dev->id.device_id < 0x1000 || pci_dev->id.device_id > 0x103f) {
-               uk_pr_err("Error: Invalid Virtio Devices %d\n",
-                               pci_dev->id.device_id);
+               uk_pr_err("Invalid Virtio Devices %04x\n",
+                         pci_dev->id.device_id);
                return -EINVAL;
        }
 
@@ -242,12 +315,12 @@ static int virtio_pci_legacy_add_dev(struct pci_device 
*pci_dev,
        /* Setting the configuration operation */
        vpci_dev->vdev.cops = &vpci_legacy_ops;
 
-       uk_pr_info("Added a virtio-pci device(%02x)\n",
-                       (int) pci_dev->id.device_id);
+       uk_pr_info("Added virtio-pci device %04x\n",
+                  pci_dev->id.device_id);
 
        /* Mapping the virtio device identifier */
        virtio_device_id_add(&vpci_dev->vdev, pci_dev->id.device_id,
-                       VIRTIO_PCI_LEGACY_DEVICEID_START);
+                            VIRTIO_PCI_LEGACY_DEVICEID_START);
        return 0;
 }
 
@@ -261,7 +334,7 @@ static int virtio_pci_add_dev(struct pci_device *pci_dev)
 
        vpci_dev = uk_malloc(a, sizeof(*vpci_dev));
        if (!vpci_dev) {
-               uk_pr_err("Error in memory allocation of pci_device\n");
+               uk_pr_err("Failed to allocate virtio-pci device\n");
                return -ENOMEM;
        }
 
@@ -276,19 +349,19 @@ static int virtio_pci_add_dev(struct pci_device *pci_dev)
         */
        rc = virtio_pci_legacy_add_dev(pci_dev, vpci_dev);
        if (rc != 0) {
-               uk_pr_err("Failed(%d) to probe the legacy device\n",
-                               rc);
+               uk_pr_err("Failed to probe (legacy) pci device: %d\n", rc);
                goto free_pci_dev;
        }
 
        rc = virtio_register_device(&vpci_dev->vdev);
        if (rc != 0) {
-               uk_pr_err("Failed(%d) to register the virtio device\n", rc);
+               uk_pr_err("Failed to register the virtio device: %d\n", rc);
                goto free_pci_dev;
        }
 
 exit:
        return rc;
+
 free_pci_dev:
        uk_free(a, vpci_dev);
        goto exit;
diff --git a/plat/drivers/virtio/virtio_ring.c 
b/plat/drivers/virtio/virtio_ring.c
new file mode 100644
index 0000000..ba91594
--- /dev/null
+++ b/plat/drivers/virtio/virtio_ring.c
@@ -0,0 +1,196 @@
+/* SPDX-License-Identifier: BSD-3-Clause */
+/*
+ * Authors: Sharan Santhanam <sharan.santhanam@xxxxxxxxx>
+ *
+ * Copyright (c) 2018, NEC Europe Ltd., NEC Corporation. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the copyright holder nor the names of its
+ *    contributors may be used to endorse or promote products derived from
+ *    this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ * THIS HEADER MAY NOT BE EXTRACTED OR MODIFIED IN ANY WAY.
+ */
+/**
+ * Inspired from the FreeBSD.
+ * Commit-id: a89e7a10d501
+ */
+#include <uk/config.h>
+#include <string.h>
+#include <uk/print.h>
+#include <uk/errptr.h>
+#include <cpu.h>
+#include <uk/sglist.h>
+#include <uk/arch/atomic.h>
+#include <uk/plat/io.h>
+#include <virtio/virtio_ring.h>
+#include <virtio/virtqueue.h>
+
+#define VIRTQUEUE_MAX_SIZE  32768
+#define to_virtqueue_vring(vq)                 \
+       __containerof(vq, struct virtqueue_vring, vq)
+
+struct virtqueue_desc_info {
+       void *cookie;
+       __u16 desc_count;
+};
+
+struct virtqueue_vring {
+       struct virtqueue vq;
+       /* Descriptor Ring */
+       struct vring vring;
+       /* Reference to the vring */
+       void   *vring_mem;
+       /* Keep track of available descriptors */
+       __u16 desc_avail;
+       /* Index of the next available slot */
+       __u16 head_free_desc;
+       /* Index of the last used descriptor by the host */
+       __u16 last_used_desc_idx;
+       /* Cookie to identify driver buffer */
+       struct virtqueue_desc_info vq_info[];
+};
+
+/**
+ * Static function Declaration(s).
+ */
+static void virtqueue_vring_init(struct virtqueue_vring *vrq, __u16 nr_desc,
+                                __u16 align);
+
+/**
+ * Driver implementation
+ */
+__u64 virtqueue_feature_negotiate(__u64 feature_set)
+{
+       __u64 feature = (1ULL << VIRTIO_TRANSPORT_F_START) - 1;
+
+       /**
+        * Currently out vring driver does not support any ring feature. We will
+        * add support to transport feature in the future.
+        */
+       feature &= feature_set;
+       return feature;
+}
+
+__phys_addr virtqueue_physaddr(struct virtqueue *vq)
+{
+       struct virtqueue_vring *vrq = NULL;
+
+       UK_ASSERT(vq);
+
+       vrq = to_virtqueue_vring(vq);
+       return ukplat_virt_to_phys(vrq->vring_mem);
+}
+
+static void virtqueue_vring_init(struct virtqueue_vring *vrq, __u16 nr_desc,
+                                __u16 align)
+{
+       int i = 0;
+
+       vring_init(&vrq->vring, nr_desc, vrq->vring_mem, align);
+
+       vrq->desc_avail = vrq->vring.num;
+       vrq->head_free_desc = 0;
+       vrq->last_used_desc_idx = 0;
+       for (i = 0; i < nr_desc - 1; i++)
+               vrq->vring.desc[i].next = i + 1;
+       /**
+        * When we reach this descriptor we have completely used all the
+        * descriptor in the vring.
+        */
+       vrq->vring.desc[nr_desc - 1].next = VIRTQUEUE_MAX_SIZE;
+}
+
+struct virtqueue *virtqueue_create(__u16 queue_id, __u16 nr_descs, __u16 align,
+                                  virtqueue_callback_t callback,
+                                  virtqueue_notify_host_t notify,
+                                  struct virtio_dev *vdev, struct uk_alloc *a)
+{
+       struct virtqueue_vring *vrq;
+       struct virtqueue *vq;
+       int rc;
+       size_t ring_size = 0;
+
+       UK_ASSERT(a);
+
+       vrq = uk_malloc(a, sizeof(struct virtqueue) +
+                       nr_descs * sizeof(struct virtqueue_desc_info));
+       if (!vrq) {
+               uk_pr_err("Allocation of virtqueue failed\n");
+               rc = -ENOMEM;
+               goto err_exit;
+       }
+       /**
+        * Initialize the value before referencing it in
+        * uk_posix_memalign as we don't set NULL on all failures in the
+        * allocation.
+        */
+       vrq->vring_mem = NULL;
+
+       ring_size = vring_size(nr_descs, align);
+       uk_posix_memalign(a, (void **)&vrq->vring_mem, __PAGE_SIZE, ring_size);
+       if (!vrq->vring_mem) {
+               uk_pr_err("Allocation of vring failed\n");
+               rc = -ENOMEM;
+               goto err_freevq;
+       }
+       memset(vrq->vring_mem, 0, ring_size);
+       virtqueue_vring_init(vrq, nr_descs, align);
+
+       vq = &vrq->vq;
+       vq->queue_id = queue_id;
+       vq->vdev = vdev;
+       vq->vq_callback = callback;
+       vq->vq_notify_host = notify;
+       return vq;
+
+err_freevq:
+       uk_free(a, vrq);
+err_exit:
+       return ERR2PTR(rc);
+}
+
+void virtqueue_destroy(struct virtqueue *vq, struct uk_alloc *a)
+{
+       struct virtqueue_vring *vrq;
+
+       UK_ASSERT(vq);
+
+       vrq = to_virtqueue_vring(vq);
+
+       /* Free the ring */
+       uk_free(a, vrq->vring_mem);
+
+       /* Free the virtqueue metadata */
+       uk_free(a, vrq);
+}
+
+int virtqueue_is_full(struct virtqueue *vq)
+{
+       struct virtqueue_vring *vrq;
+
+       UK_ASSERT(vq);
+
+       vrq = to_virtqueue_vring(vq);
+       return (vrq->desc_avail == 0);
+}
diff --git a/plat/kvm/Config.uk b/plat/kvm/Config.uk
index afcedc0..13e5575 100644
--- a/plat/kvm/Config.uk
+++ b/plat/kvm/Config.uk
@@ -57,6 +57,7 @@ config VIRTIO_BUS
       depends on (ARCH_X86_64)
       depends on LIBUKBUS
       select LIBUKALLOC
+      select LIBUKSGLIST
       help
                Virtio bus driver for probing and operating virtio device and
                transport layer.
diff --git a/plat/kvm/Makefile.uk b/plat/kvm/Makefile.uk
index 986bf4a..ccc8319 100644
--- a/plat/kvm/Makefile.uk
+++ b/plat/kvm/Makefile.uk
@@ -92,5 +92,7 @@ LIBKVMVIRTIO_ASINCLUDES-y   += 
-I$(UK_PLAT_DRIVERS_BASE)/include
 LIBKVMVIRTIO_CINCLUDES-y    += -I$(UK_PLAT_DRIVERS_BASE)/include
 LIBKVMVIRTIO_SRCS-$(CONFIG_VIRTIO_BUS) +=\
                                                        
$(UK_PLAT_DRIVERS_BASE)/virtio/virtio_bus.c
+LIBKVMVIRTIO_SRCS-$(CONFIG_VIRTIO_BUS) +=\
+                                                       
$(UK_PLAT_DRIVERS_BASE)/virtio/virtio_ring.c
 LIBKVMVIRTIO_SRCS-$(CONFIG_VIRTIO_PCI) +=\
                                                        
$(UK_PLAT_DRIVERS_BASE)/virtio/virtio_pci.c
-- 
2.7.4


_______________________________________________
Minios-devel mailing list
Minios-devel@xxxxxxxxxxxxxxxxxxxx
https://lists.xenproject.org/mailman/listinfo/minios-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.