|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Minios-devel] [UNIKRAFT PATCH 15/17] plat/xen/drivers/blk: Handle responses from backend
This patch introduces the function responsible of processing responses
from the queue.
Responses are popped out from queue until there is nothing to process.
Signed-off-by: Roxana Nicolescu <nicolescu.roxana1996@xxxxxxxxx>
---
plat/xen/drivers/blk/blkfront.c | 133 ++++++++++++++++++++++++++++++++++++++++
1 file changed, 133 insertions(+)
diff --git a/plat/xen/drivers/blk/blkfront.c b/plat/xen/drivers/blk/blkfront.c
index 32688f09..23ec4ad0 100644
--- a/plat/xen/drivers/blk/blkfront.c
+++ b/plat/xen/drivers/blk/blkfront.c
@@ -284,6 +284,138 @@ static int blkfront_xen_ring_intr_enable(struct
uk_blkdev_queue *queue)
return (more > 0);
}
+#define CHECK_STATUS(req, status, operation) \
+ do { \
+ if (status != BLKIF_RSP_OKAY) \
+ uk_pr_err("Failed to "operation" %lu sector: %d\n", \
+ req->start_sector, \
+ status); \
+ else \
+ uk_pr_debug("Succeed to "operation " %lu sector: %d\n",\
+ req->start_sector, \
+ status); \
+ } while (0)
+
+static int blkfront_queue_dequeue(struct uk_blkdev_queue *queue,
+ struct uk_blkdev_request **req)
+{
+ RING_IDX prod, cons;
+ struct blkif_response *rsp;
+ struct uk_blkdev_request *req_from_q = NULL;
+ struct blkfront_request *blkfront_req;
+ struct blkif_front_ring *ring;
+ uint8_t status;
+ int rc = 0;
+
+ UK_ASSERT(queue);
+ UK_ASSERT(req);
+
+ ring = &queue->ring;
+ prod = ring->sring->rsp_prod;
+ rmb(); /* Ensure we see queued responses up to 'rp'. */
+ cons = ring->rsp_cons;
+
+ /* No new descriptor since last dequeue operation */
+ if (cons == prod)
+ goto out;
+
+ rsp = RING_GET_RESPONSE(ring, cons);
+ blkfront_req = (struct blkfront_request *) rsp->id;
+ UK_ASSERT(blkfront_req);
+ req_from_q = blkfront_req->req;
+ UK_ASSERT(req_from_q);
+ status = rsp->status;
+ switch (rsp->operation) {
+ case BLKIF_OP_READ:
+ CHECK_STATUS(req_from_q, status, "read");
+ break;
+ case BLKIF_OP_WRITE:
+ CHECK_STATUS(req_from_q, status, "write");
+ break;
+ case BLKIF_OP_WRITE_BARRIER:
+ if (status != BLKIF_RSP_OKAY)
+ uk_pr_err("Write barrier error %d\n", status);
+ break;
+ case BLKIF_OP_FLUSH_DISKCACHE:
+ if (status != BLKIF_RSP_OKAY)
+ uk_pr_err("Flush_diskcache error %d\n", status);
+ break;
+ default:
+ uk_pr_err("Unrecognized block operation %d (rsp %d)\n",
+ rsp->operation, status);
+ break;
+ }
+
+ req_from_q->result_status = status;
+ uk_free(drv_allocator, blkfront_req);
+ ring->rsp_cons++;
+
+out:
+ *req = req_from_q;
+ return rc;
+}
+
+static int blkfront_complete_reqs(struct uk_blkdev *blkdev, uint16_t queue_id)
+{
+ struct blkfront_dev *dev;
+ struct uk_blkdev_queue *queue;
+ struct uk_blkdev_request *req;
+ int rc;
+ int more;
+
+ UK_ASSERT(blkdev);
+
+ dev = to_blkfront(blkdev);
+
+ if (queue_id >= dev->nb_queues) {
+ uk_pr_err("Invalid queue identifier: %"__PRIu16"\n", queue_id);
+ return -EINVAL;
+ }
+
+ queue = &dev->queues[queue_id];
+
+ /* Queue interrupts have to be off when calling receive */
+ UK_ASSERT(!(queue->intr_enabled & BLKFRONT_INTR_EN));
+moretodo:
+ for (;;) {
+ rc = blkfront_queue_dequeue(queue, &req);
+ if (rc < 0) {
+ uk_pr_err("Failed to dequeue the request: %d\n", rc);
+ goto err_exit;
+ }
+
+ if (!req)
+ break;
+
+ uk_refcount_release(&req->state);
+ if (req->cb) {
+ rc = req->cb(req, req->cookie_callback);
+ if (rc) {
+ uk_pr_err("Callback failed: %d\n", rc);
+ goto err_exit;
+ }
+ }
+ }
+
+ /* Enable interrupt only when user had previously enabled it */
+ if (queue->intr_enabled & BLKFRONT_INTR_USR_EN_MASK) {
+ /* Need to enable the interrupt on the last packet */
+ rc = blkfront_xen_ring_intr_enable(queue);
+ if (rc == 1)
+ goto moretodo;
+ } else {
+ RING_FINAL_CHECK_FOR_RESPONSES(&queue->ring, more);
+ if (more)
+ goto moretodo;
+ }
+
+ return 0;
+
+err_exit:
+ return rc;
+
+}
+
static int blkfront_ring_init(struct uk_blkdev_queue *queue)
{
struct blkif_sring *sring = NULL;
@@ -601,6 +733,7 @@ static int blkfront_add_dev(struct xenbus_device *dev)
d->xendev = dev;
d->blkdev.submit_one = blkfront_submit_request;
+ d->blkdev.finish_reqs = blkfront_complete_reqs;
d->blkdev.dev_ops = &blkfront_ops;
/* Xenbus initialization */
--
2.11.0
_______________________________________________
Minios-devel mailing list
Minios-devel@xxxxxxxxxxxxxxxxxxxx
https://lists.xenproject.org/mailman/listinfo/minios-devel
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |