[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [UNIKRAFT PATCH v2 4/7] lib/ukblkdev: blkreq.h: Use UK_BLKREQ_ prefix for states and ops



Hi Simon,

Looks good to me.

Reviewed-by: Laurentiu Barbulescu <lrbarbulescu@xxxxxxxxx>

On Tue, Dec 22, 2020 at 5:38 PM Simon Kuenzer <simon.kuenzer@xxxxxxxxx> wrote:
Instead of using `UK_BLKDEV_REQ_` prefix for `struct uk_blkreq`
states (finished, unfinished) and `UK_BLKDEV_` prefix for operation
requests (read, write, flush) this commits simplifies the API by using
the `UK_BLKREQ_` prefix.

Signed-off-by: Simon Kuenzer <simon.kuenzer@xxxxxxxxx>
---
 lib/ukblkdev/include/uk/blkdev.h        | 10 +++++-----
 lib/ukblkdev/include/uk/blkdev_driver.h |  2 +-
 lib/ukblkdev/include/uk/blkreq.h        | 14 +++++++-------
 plat/drivers/virtio/virtio_blk.c        | 12 ++++++------
 plat/xen/drivers/blk/blkfront.c         | 10 +++++-----
 5 files changed, 24 insertions(+), 24 deletions(-)

diff --git a/lib/ukblkdev/include/uk/blkdev.h b/lib/ukblkdev/include/uk/blkdev.h
index 7aa099092..abc228d9c 100644
--- a/lib/ukblkdev/include/uk/blkdev.h
+++ b/lib/ukblkdev/include/uk/blkdev.h
@@ -459,18 +459,18 @@ int uk_blkdev_sync_io(struct uk_blkdev *dev,
                sector,         \
                nb_sectors,     \
                buf)            \
-       uk_blkdev_sync_io(blkdev, queue_id, UK_BLKDEV_WRITE, sector, \
-                       nb_sectors, buf) \
+       uk_blkdev_sync_io(blkdev, queue_id, UK_BLKREQ_WRITE, sector, \
+                         nb_sectors, buf)                           \

 #define uk_blkdev_sync_read(blkdev,\
                queue_id,       \
                sector,         \
                nb_sectors,     \
                buf)            \
-       uk_blkdev_sync_io(blkdev, queue_id, UK_BLKDEV_READ, sector, \
-                       nb_sectors, buf) \
+       uk_blkdev_sync_io(blkdev, queue_id, UK_BLKREQ_READ, sector, \
+                         nb_sectors, buf)                          \

-#endif
+#endif /* CONFIG_LIBUKBLKDEV_SYNC_IO_BLOCKED_WAITING */

 /**
  * Stop a Unikraft block device, and set its state to UK_BLKDEV_CONFIGURED
diff --git a/lib/ukblkdev/include/uk/blkdev_driver.h b/lib/ukblkdev/include/uk/blkdev_driver.h
index be8070691..cb1b3ba39 100644
--- a/lib/ukblkdev/include/uk/blkdev_driver.h
+++ b/lib/ukblkdev/include/uk/blkdev_driver.h
@@ -104,7 +104,7 @@ static inline void uk_blkdev_drv_queue_event(struct uk_blkdev *dev,
  *     uk_blkreq structure
  */
 #define uk_blkreq_finished(req) \
-       (ukarch_store_n(&(req)->state.counter, UK_BLKDEV_REQ_FINISHED))
+       (ukarch_store_n(&(req)->state.counter, UK_BLKREQ_FINISHED))

 /**
  * Frees the data allocated for the Unikraft Block Device.
diff --git a/lib/ukblkdev/include/uk/blkreq.h b/lib/ukblkdev/include/uk/blkreq.h
index 0bf4ae3ff..ae1a31ed5 100644
--- a/lib/ukblkdev/include/uk/blkreq.h
+++ b/lib/ukblkdev/include/uk/blkreq.h
@@ -58,8 +58,8 @@ struct uk_blkreq;
  *     Operation status
  */
 enum uk_blkreq_state {
-       UK_BLKDEV_REQ_FINISHED = 0,
-       UK_BLKDEV_REQ_UNFINISHED
+       UK_BLKREQ_FINISHED = 0,
+       UK_BLKREQ_UNFINISHED
 };

 /**
@@ -67,11 +67,11 @@ enum uk_blkreq_state {
  */
 enum uk_blkreq_op {
        /* Read operation */
-       UK_BLKDEV_READ = 0,
+       UK_BLKREQ_READ = 0,
        /* Write operation */
-       UK_BLKDEV_WRITE,
+       UK_BLKREQ_WRITE,
        /* Flush the volatile write cache */
-       UK_BLKDEV_FFLUSH = 4
+       UK_BLKREQ_FFLUSH = 4
 };

 /**
@@ -135,7 +135,7 @@ static inline void uk_blkreq_init(struct uk_blkreq *req,
        req->start_sector = start;
        req->nb_sectors = nb_sectors;
        req->aio_buf = aio_buf;
-       ukarch_store_n(&req->state.counter, UK_BLKDEV_REQ_UNFINISHED);
+       ukarch_store_n(&req->state.counter, UK_BLKREQ_UNFINISHED);
        req->cb = cb;
        req->cb_cookie = cb_cookie;
 }
@@ -147,7 +147,7 @@ static inline void uk_blkreq_init(struct uk_blkreq *req,
  *     uk_blkreq structure
  **/
 #define uk_blkreq_is_done(req) \
-               (ukarch_load_n(&(req)->state.counter) == UK_BLKDEV_REQ_FINISHED)
+               (ukarch_load_n(&(req)->state.counter) == UK_BLKREQ_FINISHED)

 #ifdef __cplusplus
 }
diff --git a/plat/drivers/virtio/virtio_blk.c b/plat/drivers/virtio/virtio_blk.c
index ff33cc119..262355b49 100644
--- a/plat/drivers/virtio/virtio_blk.c
+++ b/plat/drivers/virtio/virtio_blk.c
@@ -186,7 +186,7 @@ static int virtio_blkdev_request_write(struct uk_blkdev_queue *queue,
        vbdev = queue->vbd;
        cap = &vbdev->blkdev.capabilities;
        req = virtio_blk_req->req;
-       if (req->operation == UK_BLKDEV_WRITE &&
+       if (req->operation == UK_BLKREQ_WRITE &&
                        cap->mode == O_RDONLY)
                return -EPERM;

@@ -209,11 +209,11 @@ static int virtio_blkdev_request_write(struct uk_blkdev_queue *queue,
                goto out;
        }

-       if (req->operation == UK_BLKDEV_WRITE) {
+       if (req->operation == UK_BLKREQ_WRITE) {
                *read_segs = queue->sg.sg_nseg - 1;
                *write_segs = 1;
                virtio_blk_req->virtio_blk_outhdr.type = VIRTIO_BLK_T_OUT;
-       } else if (req->operation == UK_BLKDEV_READ) {
+       } else if (req->operation == UK_BLKREQ_READ) {
                *read_segs = 1;
                *write_segs = queue->sg.sg_nseg - 1;
                virtio_blk_req->virtio_blk_outhdr.type = VIRTIO_BLK_T_IN;
@@ -278,11 +278,11 @@ static int virtio_blkdev_queue_enqueue(struct uk_blkdev_queue *queue,

        virtio_blk_req->req = req;
        virtio_blk_req->virtio_blk_outhdr.sector = req->start_sector;
-       if (req->operation == UK_BLKDEV_WRITE ||
-                       req->operation == UK_BLKDEV_READ)
+       if (req->operation == UK_BLKREQ_WRITE ||
+                       req->operation == UK_BLKREQ_READ)
                rc = virtio_blkdev_request_write(queue, virtio_blk_req,
                                &read_segs, &write_segs);
-       else if (req->operation == UK_BLKDEV_FFLUSH)
+       else if (req->operation == UK_BLKREQ_FFLUSH)
                rc = virtio_blkdev_request_flush(queue, virtio_blk_req,
                                &read_segs, &write_segs);
        else
diff --git a/plat/xen/drivers/blk/blkfront.c b/plat/xen/drivers/blk/blkfront.c
index 5cd5cf226..60b634665 100644
--- a/plat/xen/drivers/blk/blkfront.c
+++ b/plat/xen/drivers/blk/blkfront.c
@@ -250,7 +250,7 @@ static void blkif_request_init(struct blkif_request *ring_req,
        UK_ASSERT(nb_segments <= BLKIF_MAX_SEGMENTS_PER_REQUEST);

        /* Set ring request */
-       ring_req->operation = (req->operation == UK_BLKDEV_WRITE) ?
+       ring_req->operation = (req->operation == UK_BLKREQ_WRITE) ?
                        BLKIF_OP_WRITE : BLKIF_OP_READ;
        ring_req->nr_segments = nb_segments;
        ring_req->sector_number = req->start_sector;
@@ -281,7 +281,7 @@ static int blkfront_request_write(struct blkfront_request *blkfront_req,
        dev = blkfront_req->queue->dev;
        cap = &dev->blkdev.capabilities;
        sector_size = cap->ssize;
-       if (req->operation == UK_BLKDEV_WRITE && cap->mode == O_RDONLY)
+       if (req->operation == UK_BLKREQ_WRITE && cap->mode == O_RDONLY)
                return -EPERM;

        if (req->aio_buf == NULL)
@@ -360,10 +360,10 @@ static int blkfront_queue_enqueue(struct uk_blkdev_queue *queue,
        ring_req->id = (uintptr_t) blkfront_req;
        ring_req->handle = dev->handle;

-       if (req->operation == UK_BLKDEV_READ ||
-                       req->operation == UK_BLKDEV_WRITE)
+       if (req->operation == UK_BLKREQ_READ ||
+                       req->operation == UK_BLKREQ_WRITE)
                rc = blkfront_request_write(blkfront_req, ring_req);
-       else if (req->operation == UK_BLKDEV_FFLUSH)
+       else if (req->operation == UK_BLKREQ_FFLUSH)
                rc =  blkfront_request_flush(blkfront_req, ring_req);
        else
                rc = -EINVAL;
--
2.20.1

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.