|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Minios-devel] [UNIKRAFT PATCH 14/17] plat/xen/drivers/blk: Flush requests
This patch introduces the flush requests.
This operation garantees that all previous write requests
are finished.
Signed-off-by: Roxana Nicolescu <nicolescu.roxana1996@xxxxxxxxx>
---
plat/xen/drivers/blk/blkfront.c | 29 +++++++++++++++++++++++++++--
plat/xen/drivers/blk/blkfront.h | 8 ++++++++
plat/xen/drivers/blk/blkfront_xs.c | 15 +++++++++++++++
3 files changed, 50 insertions(+), 2 deletions(-)
diff --git a/plat/xen/drivers/blk/blkfront.c b/plat/xen/drivers/blk/blkfront.c
index 112463a7..32688f09 100644
--- a/plat/xen/drivers/blk/blkfront.c
+++ b/plat/xen/drivers/blk/blkfront.c
@@ -116,7 +116,7 @@ static void blkfront_ring_wr_init(struct blkif_request
*ring_req,
SECTOR_INDEX_IN_PAGE(end_data - 1, sector_size);
}
-static int blkfront_request_write_wr(struct blkfront_request *blkfront_req,
+static int blkfront_request_write(struct blkfront_request *blkfront_req,
struct blkif_request *ring_req)
{
struct blkfront_dev *dev;
@@ -151,6 +151,29 @@ static int blkfront_request_write_wr(struct
blkfront_request *blkfront_req,
return rc;
}
+static int blkfront_request_flush(struct blkfront_request *blkfront_req,
+ struct blkif_request *ring_req)
+{
+ struct blkfront_dev *dev;
+ struct uk_blkdev_queue *queue;
+
+ UK_ASSERT(ring_req);
+
+ queue = blkfront_req->queue;
+ dev = queue->dev;
+ if (dev->barrier)
+ ring_req->operation = BLKIF_OP_WRITE_BARRIER;
+ else if (dev->flush)
+ ring_req->operation = BLKIF_OP_FLUSH_DISKCACHE;
+ else
+ return -ENOTSUP;
+
+ ring_req->nr_segments = 0;
+ ring_req->sector_number = 0;
+
+ return 0;
+}
+
static int blkfront_queue_enqueue(struct uk_blkdev_queue *queue,
struct uk_blkdev_request *req)
{
@@ -179,7 +202,9 @@ static int blkfront_queue_enqueue(struct uk_blkdev_queue
*queue,
if (req->operation == UK_BLKDEV_READ ||
req->operation == UK_BLKDEV_WRITE)
- rc = blkfront_request_write_wr(blkfront_req, ring_req);
+ rc = blkfront_request_write(blkfront_req, ring_req);
+ else if (req->operation == UK_BLKDEV_FFLUSH)
+ rc = blkfront_request_flush(blkfront_req, ring_req);
else
rc = -EINVAL;
diff --git a/plat/xen/drivers/blk/blkfront.h b/plat/xen/drivers/blk/blkfront.h
index be610858..94072d89 100644
--- a/plat/xen/drivers/blk/blkfront.h
+++ b/plat/xen/drivers/blk/blkfront.h
@@ -90,6 +90,14 @@ struct blkfront_dev {
struct uk_blkdev blkdev;
/* A specific number to the blkfront device. */
blkif_vdev_t handle;
+ /* Value which indicates that the backend can process requests with the
+ * BLKIF_OP_WRITE_BARRIER request opcode.
+ */
+ int barrier;
+ /* Value which indicates that the backend can process requests with the
+ * BLKIF_OP_WRITE_FLUSH_DISKCACHE request opcode.
+ */
+ int flush;
/* Number of configured queues used for requests */
uint16_t nb_queues;
/* Vector of queues used for communication with backend */
diff --git a/plat/xen/drivers/blk/blkfront_xs.c
b/plat/xen/drivers/blk/blkfront_xs.c
index 014a25d3..1a71a90d 100644
--- a/plat/xen/drivers/blk/blkfront_xs.c
+++ b/plat/xen/drivers/blk/blkfront_xs.c
@@ -190,6 +190,21 @@ static int blkfront_xb_get_capabilities(struct
blkfront_dev *blkdev)
return err;
}
+ err = xs_scanf(XBT_NIL, xendev->otherend, "feature-flush-cache",
+ "%d", &blkdev->flush);
+ if (err < 0) {
+ uk_pr_err("Failed to read feature-flush-cache from xs: %d\n",
+ err);
+ return err;
+ }
+
+ err = xs_scanf(XBT_NIL, xendev->otherend, "feature-barrier",
+ "%d", &blkdev->barrier);
+ if (err < 0) {
+ uk_pr_err("Failed to read feature-barrier from xs: %d\n", err);
+ return err;
+ }
+
mode = xs_read(XBT_NIL, xendev->otherend, "mode");
if (PTRISERR(mode)) {
uk_pr_err("Failed to read mode from xs: %d.\n", err);
--
2.11.0
_______________________________________________
Minios-devel mailing list
Minios-devel@xxxxxxxxxxxxxxxxxxxx
https://lists.xenproject.org/mailman/listinfo/minios-devel
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |