[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Minios-devel] [UNIKRAFT PATCH 09/12] plat/drivers: Request interface for virtio block



This patch introduces the request interface.
The following steps are:
    -> find a free spot in the queue
    -> set the ring request
    -> notify Backend
Supported operations are: read and write.

Signed-off-by: Roxana Nicolescu <nicolescu.roxana1996@xxxxxxxxx>
---
 plat/drivers/include/virtio/virtio_blk.h |   6 +
 plat/drivers/virtio/virtio_blk.c         | 183 +++++++++++++++++++++++++++++++
 2 files changed, 189 insertions(+)

diff --git a/plat/drivers/include/virtio/virtio_blk.h 
b/plat/drivers/include/virtio/virtio_blk.h
index c88dc45c..fde1953c 100644
--- a/plat/drivers/include/virtio/virtio_blk.h
+++ b/plat/drivers/include/virtio/virtio_blk.h
@@ -49,6 +49,12 @@ struct virtio_blk_config {
        __u16 num_queues;
 };
 
+/* Header comes first in the scatter-gather list */
+struct virtio_blk_hdr {
+       /* Operation type */
+       uint32_t type;
+       /* Start-sector */
+       uint64_t sector;
 };
 
 #endif /* __PLAT_DRV_VIRTIO_BLK_H */
diff --git a/plat/drivers/virtio/virtio_blk.c b/plat/drivers/virtio/virtio_blk.c
index e18ef805..bc845d97 100644
--- a/plat/drivers/virtio/virtio_blk.c
+++ b/plat/drivers/virtio/virtio_blk.c
@@ -100,6 +100,188 @@ struct uk_blkdev_queue {
        struct uk_sglist_seg *sgsegs;
 };
 
+static int virtio_blkdev_request_set_sglist(struct uk_blkdev_queue *queue,
+               struct uk_blkdev_request *req,
+               __sector sector_size)
+{
+       struct virtio_blk_device *vbdev;
+       size_t data_size = 0;
+       size_t segment_size;
+       size_t segment_max_size;
+       size_t idx;
+       uintptr_t start_data;
+       int rc = 0;
+
+       UK_ASSERT(queue);
+       UK_ASSERT(req);
+
+       vbdev = queue->vbd;
+       start_data = (uintptr_t)req->aio_buf;
+       data_size = req->nb_sectors * sector_size;
+       segment_max_size = vbdev->max_size_segment;
+
+       /* Prepare the sglist */
+       uk_sglist_reset(&queue->sg);
+       rc = uk_sglist_append(&queue->sg, req, sizeof(struct virtio_blk_hdr));
+       if (unlikely(rc != 0)) {
+               uk_pr_err("Failed to append to sg list %d\n", rc);
+               goto out;
+       }
+
+       /* Append to sglist chunks of `segment_max_size` size
+        * Only for read / write operations
+        **/
+       for (idx = 0; idx < data_size; idx += segment_max_size) {
+               segment_size = data_size - idx;
+               segment_size = (segment_size > segment_max_size) ?
+                               segment_max_size : segment_size;
+               rc = uk_sglist_append(&queue->sg,
+                               (void *)(start_data + idx),
+                               segment_size);
+               if (unlikely(rc != 0)) {
+                       uk_pr_err("Failed to append to sg list %d\n",
+                                       rc);
+                       goto out;
+               }
+       }
+
+       rc = uk_sglist_append(&queue->sg, &req->result_status,
+                       sizeof(req->result_status));
+       if (unlikely(rc != 0)) {
+               uk_pr_err("Failed to append to sg list %d\n", rc);
+               goto out;
+       }
+
+out:
+       return rc;
+}
+
+static int virtio_blkdev_request_write(struct uk_blkdev_queue *queue,
+               struct uk_blkdev_request *req, __u16 *read_segs,
+               __u16 *write_segs)
+{
+       struct virtio_blk_device *vbdev;
+       struct uk_blkdev_cap *cap;
+       int rc = 0;
+
+       UK_ASSERT(queue);
+       UK_ASSERT(req);
+
+       vbdev = queue->vbd;
+       cap = &vbdev->blkdev.capabilities;
+       if (req->operation == UK_BLKDEV_WRITE &&
+                       cap->mode == O_RDONLY)
+               return -EPERM;
+
+       if (req->aio_buf == NULL)
+               return -EINVAL;
+
+       if (req->nb_sectors == 0)
+               return -EINVAL;
+
+       if (req->start_sector + req->nb_sectors > cap->sectors)
+               return -EINVAL;
+
+       if (req->nb_sectors > cap->max_sectors_per_req)
+               return -EINVAL;
+
+       rc = virtio_blkdev_request_set_sglist(queue, req, cap->ssize, true);
+       if (rc) {
+               uk_pr_err("Failed to set sglist %d\n", rc);
+               goto out;
+       }
+
+       if (req->operation == UK_BLKDEV_WRITE) {
+               *read_segs = queue->sg.sg_nseg - 1;
+               *write_segs = 1;
+       } else if (req->operation == UK_BLKDEV_READ) {
+               *read_segs = 1;
+               *write_segs = queue->sg.sg_nseg - 1;
+       }
+
+out:
+       return rc;
+}
+
+static int virtio_blkdev_queue_enqueue(struct uk_blkdev_queue *queue,
+               struct uk_blkdev_request *req)
+{
+       __u16 write_segs;
+       __u16 read_segs;
+       int rc = 0;
+
+       UK_ASSERT(queue);
+       UK_ASSERT(req);
+
+       if (virtqueue_is_full(queue->vq)) {
+               uk_pr_debug("The virtqueue is full\n");
+               return -ENOSPC;
+       }
+
+       if (req->operation == UK_BLKDEV_WRITE ||
+                       req->operation == UK_BLKDEV_READ)
+               rc = virtio_blkdev_request_write(queue, req, &read_segs,
+                               &write_segs);
+       else
+               return -EINVAL;
+
+       if (rc)
+               goto out;
+
+       rc = virtqueue_buffer_enqueue(queue->vq, req, &queue->sg,
+                                     read_segs, write_segs);
+
+out:
+       return rc;
+}
+
+static int virtio_blkdev_submit_request(struct uk_blkdev *dev,
+               uint16_t queue_id,
+               struct uk_blkdev_request *req)
+{
+       struct virtio_blk_device *vbdev;
+       struct uk_blkdev_queue *queue;
+       int rc = 0;
+       int status = 0x0;
+
+       UK_ASSERT(req);
+       UK_ASSERT(dev);
+
+       vbdev = to_virtioblkdev(dev);
+       if (unlikely(queue_id >= vbdev->nb_queues)) {
+               uk_pr_err("Invalid queue_id %"__PRIu16"\n", queue_id);
+               return -EINVAL;
+       }
+
+       queue = &vbdev->qs[queue_id];
+       rc = virtio_blkdev_queue_enqueue(queue, req);
+       if (likely(rc >= 0)) {
+               uk_pr_debug("Success and more descriptors available\n");
+               status |= UK_BLKDEV_STATUS_SUCCESS;
+               /**
+                * Notify the host the new buffer.
+                */
+               virtqueue_host_notify(queue->vq);
+               /**
+                * When there is further space available in the ring
+                * return UK_BLKDEV_STATUS_MORE.
+                */
+               status |= likely(rc > 0) ? UK_BLKDEV_STATUS_MORE : 0x0;
+       } else if (rc == -ENOSPC) {
+               uk_pr_debug("No more descriptors available\n");
+               goto err;
+       } else {
+               uk_pr_err("Failed to enqueue descriptors into the ring: %d\n",
+                         rc);
+               goto err;
+       }
+
+       return status;
+
+err:
+       return rc;
+}
+
 
 static int virtio_blkdev_recv_done(struct virtqueue *vq, void *priv)
 {
@@ -588,6 +770,7 @@ static int virtio_blk_add_dev(struct virtio_dev *vdev)
                return -ENOMEM;
 
        vbdev->vdev = vdev;
+       vbdev->blkdev.submit_one = virtio_blkdev_submit_request;
        vbdev->blkdev.dev_ops = &virtio_blkdev_ops;
 
        rc = uk_blkdev_drv_register(&vbdev->blkdev, a, drv_name);
-- 
2.11.0


_______________________________________________
Minios-devel mailing list
Minios-devel@xxxxxxxxxxxxxxxxxxxx
https://lists.xenproject.org/mailman/listinfo/minios-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.