[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Minios-devel] [UNIKRAFT PATCH 10/12] plat/drivers: Flush requests for virtio block
This patch introduces the flush requests. This operation garantees that all previous write requests are finished. Signed-off-by: Roxana Nicolescu <nicolescu.roxana1996@xxxxxxxxx> --- plat/drivers/include/virtio/virtio_blk.h | 8 ++++ plat/drivers/virtio/virtio_blk.c | 72 +++++++++++++++++++++++++------- 2 files changed, 66 insertions(+), 14 deletions(-) diff --git a/plat/drivers/include/virtio/virtio_blk.h b/plat/drivers/include/virtio/virtio_blk.h index fde1953c..e2d28928 100644 --- a/plat/drivers/include/virtio/virtio_blk.h +++ b/plat/drivers/include/virtio/virtio_blk.h @@ -32,6 +32,10 @@ #define VIRTIO_BLK_F_RO 5 /* Block size of disk is in blk_size. */ #define VIRTIO_BLK_F_BLK_SIZE 6 +/* Cache flush command support. */ +#define VIRTIO_BLK_F_FLUSH 9 +/* Device can toggle its cache between writeback and writethrough modes. */ +#define VIRTIO_BLK_F_CONFIG_WCE 11 /* Device supports multi-queues */ #define VIRTIO_BLK_F_MQ 12 @@ -45,6 +49,10 @@ struct virtio_blk_config { __u32 seg_max; /* Block size */ __u32 blk_size; + /* It is set if backend has writeback cache, + * otherwise it has writethrough cache. + **/ + __u8 writeback; /* Number of vqs, only available when VIRTIO_BLK_F_MQ is set */ __u16 num_queues; }; diff --git a/plat/drivers/virtio/virtio_blk.c b/plat/drivers/virtio/virtio_blk.c index bc845d97..5b8972cf 100644 --- a/plat/drivers/virtio/virtio_blk.c +++ b/plat/drivers/virtio/virtio_blk.c @@ -25,6 +25,7 @@ #include <uk/print.h> #include <errno.h> #include <fcntl.h> +#include <stdbool.h> #include <virtio/virtio_bus.h> #include <virtio/virtio_ids.h> #include <uk/blkdev.h> @@ -49,11 +50,13 @@ * Multi-queue, * Maximum size of a segment for requests, * Maximum number of segments per request, + * Flush **/ #define VIRTIO_BLK_DRV_FEATURES(features) \ (VIRTIO_FEATURES_UPDATE(features, VIRTIO_BLK_F_RO | \ VIRTIO_BLK_F_BLK_SIZE | VIRTIO_BLK_F_MQ | \ - VIRTIO_BLK_F_SEG_MAX | VIRTIO_BLK_F_SIZE_MAX)) + VIRTIO_BLK_F_SEG_MAX | VIRTIO_BLK_F_SIZE_MAX | \ + VIRTIO_BLK_F_CONFIG_WCE | VIRTIO_BLK_F_FLUSH)) static struct uk_alloc *a; static const char *drv_name = DRIVER_NAME; @@ -77,6 +80,8 @@ struct virtio_blk_device { __u32 max_segments; /* Maximum size of a segment */ __u32 max_size_segment; + /* If it is set then flush request is permitted */ + __u8 writeback; }; struct uk_blkdev_queue { @@ -102,7 +107,8 @@ struct uk_blkdev_queue { static int virtio_blkdev_request_set_sglist(struct uk_blkdev_queue *queue, struct uk_blkdev_request *req, - __sector sector_size) + __sector sector_size, + bool have_data) { struct virtio_blk_device *vbdev; size_t data_size = 0; @@ -131,19 +137,20 @@ static int virtio_blkdev_request_set_sglist(struct uk_blkdev_queue *queue, /* Append to sglist chunks of `segment_max_size` size * Only for read / write operations **/ - for (idx = 0; idx < data_size; idx += segment_max_size) { - segment_size = data_size - idx; - segment_size = (segment_size > segment_max_size) ? - segment_max_size : segment_size; - rc = uk_sglist_append(&queue->sg, - (void *)(start_data + idx), - segment_size); - if (unlikely(rc != 0)) { - uk_pr_err("Failed to append to sg list %d\n", - rc); - goto out; + if (have_data) + for (idx = 0; idx < data_size; idx += segment_max_size) { + segment_size = data_size - idx; + segment_size = (segment_size > segment_max_size) ? + segment_max_size : segment_size; + rc = uk_sglist_append(&queue->sg, + (void *)(start_data + idx), + segment_size); + if (unlikely(rc != 0)) { + uk_pr_err("Failed to append to sg list %d\n", + rc); + goto out; + } } - } rc = uk_sglist_append(&queue->sg, &req->result_status, sizeof(req->result_status)); @@ -203,6 +210,38 @@ out: return rc; } +static int virtio_blkdev_request_flush(struct uk_blkdev_queue *queue, + struct uk_blkdev_request *req, __u16 *read_segs, + __u16 *write_segs) +{ + struct virtio_blk_device *vbdev; + int rc = 0; + + UK_ASSERT(queue); + UK_ASSERT(req); + + vbdev = queue->vbd; + if (!vbdev->writeback) + return -ENOTSUP; + + if (req->start_sector) { + uk_pr_warn("Start sector should be 0 at flush request\n"); + req->start_sector = 0; + } + + rc = virtio_blkdev_request_set_sglist(queue, req, 0, false); + if (rc) { + uk_pr_err("Failed to set sglist %d\n", rc); + goto out; + } + + *read_segs = 1; + *write_segs = 1; + +out: + return rc; +} + static int virtio_blkdev_queue_enqueue(struct uk_blkdev_queue *queue, struct uk_blkdev_request *req) { @@ -222,6 +261,9 @@ static int virtio_blkdev_queue_enqueue(struct uk_blkdev_queue *queue, req->operation == UK_BLKDEV_READ) rc = virtio_blkdev_request_write(queue, req, &read_segs, &write_segs); + else if (req->operation == UK_BLKDEV_FFLUSH) + rc = virtio_blkdev_request_flush(queue, req, &read_segs, + &write_segs); else return -EINVAL; @@ -726,6 +768,8 @@ static int virtio_blkdev_feature_negotiate(struct virtio_blk_device *vbdev) vbdev->max_vqueue_pairs = num_queues; vbdev->max_segments = max_segments; vbdev->max_size_segment = max_size_segment; + vbdev->writeback = virtio_has_features(host_features, + VIRTIO_BLK_F_FLUSH); /** * Mask out features supported by both driver and device. -- 2.11.0 _______________________________________________ Minios-devel mailing list Minios-devel@xxxxxxxxxxxxxxxxxxxx https://lists.xenproject.org/mailman/listinfo/minios-devel
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |