[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Minios-devel] [UNIKRAFT PATCH v3 13/14] plat/drivers: Handle responses from backend for virtio block



This patch introduces the function responsible of processing
responses from the queue.
Responses are popped out from queue until there is nothing to process.

Signed-off-by: Roxana Nicolescu <nicolescu.roxana1996@xxxxxxxxx>
---
 plat/drivers/include/virtio/virtio_blk.h |  4 ++
 plat/drivers/virtio/virtio_blk.c         | 73 ++++++++++++++++++++++++
 2 files changed, 77 insertions(+)

diff --git a/plat/drivers/include/virtio/virtio_blk.h 
b/plat/drivers/include/virtio/virtio_blk.h
index 662418c6..d56c1e13 100644
--- a/plat/drivers/include/virtio/virtio_blk.h
+++ b/plat/drivers/include/virtio/virtio_blk.h
@@ -93,4 +93,8 @@ struct virtio_blk_outhdr {
        __virtio_le64 sector;
 };
 
+/* And this is the final byte of the write scatter-gather list. */
+#define VIRTIO_BLK_S_OK                0
+#define VIRTIO_BLK_S_IOERR     1
+
 #endif /* __PLAT_DRV_VIRTIO_BLK_H */
diff --git a/plat/drivers/virtio/virtio_blk.c b/plat/drivers/virtio/virtio_blk.c
index 520cdfbe..85f40ba6 100644
--- a/plat/drivers/virtio/virtio_blk.c
+++ b/plat/drivers/virtio/virtio_blk.c
@@ -338,6 +338,78 @@ err:
        return rc;
 }
 
+static int virtio_blkdev_queue_dequeue(struct uk_blkdev_queue *queue,
+               struct uk_blkreq **req)
+{
+       int ret = 0;
+       __u32 len;
+       struct virtio_blkdev_request *response_req;
+
+       UK_ASSERT(req);
+       *req = NULL;
+
+       ret = virtqueue_buffer_dequeue(queue->vq, (void **) &response_req,
+                       &len);
+       if (ret < 0) {
+               uk_pr_info("No data available in the queue\n");
+               return 0;
+       }
+
+       /* We need at least one byte for the result status */
+       if (unlikely(len < 1)) {
+               uk_pr_err("Received invalid response size: %u\n", len);
+               ret = -EINVAL;
+               goto out;
+       }
+
+       *req = response_req->req;
+       (*req)->result = -response_req->status;
+
+out:
+       uk_free(a, response_req);
+       return ret;
+}
+
+static int virtio_blkdev_complete_reqs(struct uk_blkdev *dev,
+               struct uk_blkdev_queue *queue)
+{
+       struct uk_blkreq *req;
+       int rc = 0;
+
+       UK_ASSERT(dev);
+
+       /* Queue interrupts have to be off when calling receive */
+       UK_ASSERT(!(queue->intr_enabled & VTBLK_INTR_EN));
+
+moretodo:
+       for (;;) {
+               rc = virtio_blkdev_queue_dequeue(queue, &req);
+               if (unlikely(rc < 0)) {
+                       uk_pr_err("Failed to dequeue the request: %d\n", rc);
+                       goto err_exit;
+               }
+
+               if (!req)
+                       break;
+
+               uk_blkreq_finished(req);
+               if (req->cb)
+                       req->cb(req, req->cb_cookie);
+       }
+
+       /* Enable interrupt only when user had previously enabled it */
+       if (queue->intr_enabled & VTBLK_INTR_USR_EN_MASK) {
+               rc = virtqueue_intr_enable(queue->vq);
+               if (rc == 1)
+                       goto moretodo;
+       }
+
+       return 0;
+
+err_exit:
+       return rc;
+}
+
 static int virtio_blkdev_recv_done(struct virtqueue *vq, void *priv)
 {
        struct uk_blkdev_queue *queue = NULL;
@@ -811,6 +883,7 @@ static int virtio_blk_add_dev(struct virtio_dev *vdev)
                return -ENOMEM;
 
        vbdev->vdev = vdev;
+       vbdev->blkdev.finish_reqs = virtio_blkdev_complete_reqs;
        vbdev->blkdev.submit_one = virtio_blkdev_submit_request;
        vbdev->blkdev.dev_ops = &virtio_blkdev_ops;
 
-- 
2.17.1


_______________________________________________
Minios-devel mailing list
Minios-devel@xxxxxxxxxxxxxxxxxxxx
https://lists.xenproject.org/mailman/listinfo/minios-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.