[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Minios-devel] [UNIKRAFT PATCH v2 14/16] plat/xen/drivers/blk: Handle responses from backend



This patch introduces the function responsible of processing responses
from the queue.

Responses are popped out from queue until there is nothing to process.

Signed-off-by: Roxana Nicolescu <nicolescu.roxana1996@xxxxxxxxx>
---
 plat/xen/drivers/blk/blkfront.c | 119 ++++++++++++++++++++++++++++++++++++++++
 1 file changed, 119 insertions(+)

diff --git a/plat/xen/drivers/blk/blkfront.c b/plat/xen/drivers/blk/blkfront.c
index 4ec0c8fe..ff4ad052 100644
--- a/plat/xen/drivers/blk/blkfront.c
+++ b/plat/xen/drivers/blk/blkfront.c
@@ -275,6 +275,124 @@ static int blkfront_xen_ring_intr_enable(struct 
uk_blkdev_queue *queue)
        return (more > 0);
 }
 
+#define CHECK_STATUS(req, status, operation) \
+       do { \
+               if (status != BLKIF_RSP_OKAY) \
+                       uk_pr_err("Failed to "operation" %lu sector: %d\n", \
+                               req->start_sector,      \
+                               status);        \
+               else    \
+                       uk_pr_debug("Succeed to "operation " %lu sector: %d\n",\
+                               req->start_sector, \
+                               status); \
+       } while (0)
+
+static int blkfront_queue_dequeue(struct uk_blkdev_queue *queue,
+               struct uk_blkreq **req)
+{
+       RING_IDX prod, cons;
+       struct blkif_response *rsp;
+       struct uk_blkreq *req_from_q = NULL;
+       struct blkfront_request *blkfront_req;
+       struct blkif_front_ring *ring;
+       uint8_t status;
+       int rc = 0;
+
+       UK_ASSERT(queue);
+       UK_ASSERT(req);
+
+       ring = &queue->ring;
+       prod = ring->sring->rsp_prod;
+       rmb(); /* Ensure we see queued responses up to 'rp'. */
+       cons = ring->rsp_cons;
+
+       /* No new descriptor since last dequeue operation */
+       if (cons == prod)
+               goto out;
+
+       rsp = RING_GET_RESPONSE(ring, cons);
+       blkfront_req = (struct blkfront_request *) rsp->id;
+       UK_ASSERT(blkfront_req);
+       req_from_q = blkfront_req->req;
+       UK_ASSERT(req_from_q);
+       status = rsp->status;
+       switch (rsp->operation) {
+       case BLKIF_OP_READ:
+               CHECK_STATUS(req_from_q, status, "read");
+               break;
+       case BLKIF_OP_WRITE:
+               CHECK_STATUS(req_from_q, status, "write");
+               break;
+       case BLKIF_OP_WRITE_BARRIER:
+               if (status != BLKIF_RSP_OKAY)
+                       uk_pr_err("Write barrier error %d\n", status);
+               break;
+       case BLKIF_OP_FLUSH_DISKCACHE:
+               if (status != BLKIF_RSP_OKAY)
+                       uk_pr_err("Flush_diskcache error %d\n", status);
+               break;
+       default:
+               uk_pr_err("Unrecognized block operation %d (rsp %d)\n",
+                               rsp->operation, status);
+               break;
+       }
+
+       req_from_q->result = -status;
+       uk_free(drv_allocator, blkfront_req);
+       ring->rsp_cons++;
+
+out:
+       *req = req_from_q;
+       return rc;
+}
+
+static int blkfront_complete_reqs(struct uk_blkdev *blkdev,
+               struct uk_blkdev_queue *queue)
+{
+       struct uk_blkreq *req;
+       int rc;
+       int more;
+
+       UK_ASSERT(blkdev);
+       UK_ASSERT(queue);
+
+       /* Queue interrupts have to be off when calling receive */
+       UK_ASSERT(!(queue->intr_enabled & BLKFRONT_INTR_EN));
+moretodo:
+       for (;;) {
+               rc = blkfront_queue_dequeue(queue, &req);
+               if (rc < 0) {
+                       uk_pr_err("Failed to dequeue the request: %d\n", rc);
+                       goto err_exit;
+               }
+
+               if (!req)
+                       break;
+
+               uk_refcount_release(&req->state);
+               if (req->cb)
+                       req->cb(req, req->cb_cookie);
+       }
+
+       /* Enable interrupt only when user had previously enabled it */
+       if (queue->intr_enabled & BLKFRONT_INTR_USR_EN_MASK) {
+               /* Need to enable the interrupt on the last packet */
+               rc = blkfront_xen_ring_intr_enable(queue);
+               if (rc == 1)
+                       goto moretodo;
+       } else {
+               RING_FINAL_CHECK_FOR_RESPONSES(&queue->ring, more);
+               if (more)
+                       goto moretodo;
+       }
+
+       return 0;
+
+err_exit:
+       return rc;
+
+}
+
 static int blkfront_ring_init(struct uk_blkdev_queue *queue)
 {
        struct blkif_sring *sring = NULL;
@@ -564,6 +682,7 @@ static int blkfront_add_dev(struct xenbus_device *dev)
 
        d->xendev = dev;
        d->blkdev.submit_one = blkfront_submit_request;
+       d->blkdev.finish_reqs = blkfront_complete_reqs;
        d->blkdev.dev_ops = &blkfront_ops;
 
        /* Xenbus initialization */
-- 
2.11.0


_______________________________________________
Minios-devel mailing list
Minios-devel@xxxxxxxxxxxxxxxxxxxx
https://lists.xenproject.org/mailman/listinfo/minios-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.