[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Minios-devel] [UNIKRAFT PATCH v2 5/6] lib/ukblkdev: Synchronous requests interface
This patch introduces sync operations. It requires the use of semaphore in the implementation. Signed-off-by: Roxana Nicolescu <nicolescu.roxana1996@xxxxxxxxx> --- lib/ukblkdev/Config.uk | 9 ++++++ lib/ukblkdev/blkdev.c | 58 +++++++++++++++++++++++++++++++++++ lib/ukblkdev/exportsyms.uk | 5 +++ lib/ukblkdev/include/uk/blkdev.h | 50 ++++++++++++++++++++++++++++++ lib/ukblkdev/include/uk/blkdev_core.h | 3 +- lib/ukblkdev/include/uk/blkreq.h | 36 ++++++++++++++++++++++ 6 files changed, 160 insertions(+), 1 deletion(-) diff --git a/lib/ukblkdev/Config.uk b/lib/ukblkdev/Config.uk index f0c89654..d5c96090 100644 --- a/lib/ukblkdev/Config.uk +++ b/lib/ukblkdev/Config.uk @@ -25,4 +25,13 @@ if LIBUKBLKDEV When this option is enabled a dispatcher thread is allocated for each configured queue. libuksched is required for this option. + + config LIBUKBLKDEV_SYNC_IO_BLOCKED_WAITING + bool "Synchronous I/O API" + default n + select LIBUKSCHED + select LIBUKLOCK + select LIBUKLOCK_SEMAPHORE + help + Use semaphore for waiting after a request I/O is done. endif diff --git a/lib/ukblkdev/blkdev.c b/lib/ukblkdev/blkdev.c index a92b29f1..21346d50 100644 --- a/lib/ukblkdev/blkdev.c +++ b/lib/ukblkdev/blkdev.c @@ -408,3 +408,61 @@ int uk_blkdev_queue_finish_reqs(struct uk_blkdev *dev, return dev->finish_reqs(dev, dev->_queue[queue_id]); } + +#if CONFIG_LIBUKBLKDEV_SYNC_IO_BLOCKED_WAITING +/** + * Used for sending a synchronous request. + */ +struct uk_blkdev_sync_io_request { + struct uk_blkreq req; /* Request structure. */ + + /* Semaphore used for waiting after the response is done. */ + struct uk_semaphore s; +}; + +static void __sync_io_callback(struct uk_blkreq *req, + void *cookie_callback) +{ + struct uk_blkdev_sync_io_request *sync_io_req; + + UK_ASSERT(req); + UK_ASSERT(cookie_callback); + + sync_io_req = (struct uk_blkdev_sync_io_request *)cookie_callback; + uk_semaphore_up(&sync_io_req->s); +} + +int uk_blkdev_sync_io(struct uk_blkdev *dev, + uint16_t queue_id, + enum uk_blkreq_op operation, + __sector start_sector, + __sector nb_sectors, + void *buf) +{ + struct uk_blkreq *req; + int rc = 0; + struct uk_blkdev_sync_io_request sync_io_req; + + UK_ASSERT(dev != NULL); + UK_ASSERT(queue_id < CONFIG_LIBUKBLKDEV_MAXNBQUEUES); + UK_ASSERT(dev->_data); + UK_ASSERT(dev->submit_one); + UK_ASSERT(dev->_data->state == UK_BLKDEV_RUNNING); + UK_ASSERT(!PTRISERR(dev->_queue[queue_id])); + + req = &sync_io_req.req; + uk_blkreq_init(req, operation, start_sector, nb_sectors, + __sync_io_callback, (void *)&sync_io_req, buf); + uk_semaphore_init(&sync_io_req.s, 0); + + rc = uk_blkdev_queue_submit_one(dev, queue_id, req); + if (unlikely(!uk_blkdev_status_successful(rc))) { + uk_pr_err("blkdev%"PRIu16"-q%"PRIu16": Failed to submit I/O req: %d\n", + dev->_data->id, queue_id, rc); + return rc; + } + + uk_semaphore_down(&sync_io_req.s); + return req->result; +} +#endif diff --git a/lib/ukblkdev/exportsyms.uk b/lib/ukblkdev/exportsyms.uk index 33402f78..85135b95 100644 --- a/lib/ukblkdev/exportsyms.uk +++ b/lib/ukblkdev/exportsyms.uk @@ -19,3 +19,8 @@ uk_blkdev_sectors uk_blkdev_ioalign uk_blkdev_queue_submit_one uk_blkdev_queue_finish_reqs +uk_blkreq_init +uk_blkdev_sync_io +uk_blkdev_sync_read +uk_blkdev_sync_write +uk_blkreq_is_done diff --git a/lib/ukblkdev/include/uk/blkdev.h b/lib/ukblkdev/include/uk/blkdev.h index b941911c..ad0f2629 100644 --- a/lib/ukblkdev/include/uk/blkdev.h +++ b/lib/ukblkdev/include/uk/blkdev.h @@ -421,6 +421,56 @@ int uk_blkdev_queue_submit_one(struct uk_blkdev *dev, uint16_t queue_id, */ int uk_blkdev_queue_finish_reqs(struct uk_blkdev *dev, uint16_t queue_id); +#if CONFIG_LIBUKBLKDEV_SYNC_IO_BLOCKED_WAITING +/** + * Make a sync io request on a specific queue. + * `uk_blkdev_queue_finish_reqs()` must be called in queue interrupt context + * or another thread context in order to avoid blocking of the thread forever. + * + * @param dev + * The Unikraft Block Device + * @param queue_id + * queue_id + * @param op + * Type of operation + * @param sector + * Start Sector + * @param nb_sectors + * Number of sectors + * @param buf + * Buffer where data is found + * @return + * - 0: Success + * - (<0): on error returned by driver + */ +int uk_blkdev_sync_io(struct uk_blkdev *dev, + uint16_t queue_id, + enum uk_blkreq_op op, + __sector sector, + __sector nb_sectors, + void *buf); + +/* + * Wrappers for uk_blkdev_sync_io + */ +#define uk_blkdev_sync_write(blkdev,\ + queue_id, \ + sector, \ + nb_sectors, \ + buf) \ + uk_blkdev_sync_io(blkdev, queue_id, UK_BLKDEV_WRITE, sector, \ + nb_sectors, buf) \ + +#define uk_blkdev_sync_read(blkdev,\ + queue_id, \ + sector, \ + nb_sectors, \ + buf) \ + uk_blkdev_sync_io(blkdev, queue_id, UK_BLKDEV_READ, sector, \ + nb_sectors, buf) \ + +#endif + #ifdef __cplusplus } #endif diff --git a/lib/ukblkdev/include/uk/blkdev_core.h b/lib/ukblkdev/include/uk/blkdev_core.h index 13cd44ba..8bf08cdd 100644 --- a/lib/ukblkdev/include/uk/blkdev_core.h +++ b/lib/ukblkdev/include/uk/blkdev_core.h @@ -39,7 +39,8 @@ #include <uk/list.h> #include <uk/config.h> #include <uk/blkreq.h> -#if defined(CONFIG_LIBUKBLKDEV_DISPATCHERTHREADS) +#if defined(CONFIG_LIBUKBLKDEV_DISPATCHERTHREADS) || \ + defined(CONFIG_LIBUKBLKDEV_SYNC_IO_BLOCKED_WAITING) #include <uk/sched.h> #include <uk/semaphore.h> #endif diff --git a/lib/ukblkdev/include/uk/blkreq.h b/lib/ukblkdev/include/uk/blkreq.h index aa2cf9c4..8d499cf6 100644 --- a/lib/ukblkdev/include/uk/blkreq.h +++ b/lib/ukblkdev/include/uk/blkreq.h @@ -105,6 +105,42 @@ struct uk_blkreq { }; +/** + * Initializes a request structure. + * + * @param req + * The request structure + * @param op + * The operation + * @param start + * The start sector + * @param nb_sectors + * Number of sectors + * @param cb + * Request callback + * @param cb_cookie + * Request callback parameters + * @param aio_buf + * Data buffer + **/ +static inline void uk_blkreq_init(struct uk_blkreq *req, + enum uk_blkreq_op op, __sector start, __sector nb_sectors, + uk_blkreq_event_t cb, void *cb_cookie, void *aio_buf) +{ + req->operation = op; + req->start_sector = start; + req->nb_sectors = nb_sectors; + uk_refcount_init(&req->state, UK_BLKDEV_REQ_UNFINISHED); + req->cb = cb; + req->cb_cookie = cb_cookie; + req->aio_buf = aio_buf; +} + +/** + * Check if request is finished. + **/ +#define uk_blkreq_is_done(req) ((req)->state == UK_BLKDEV_REQ_FINISHED) + #ifdef __cplusplus } #endif -- 2.11.0 _______________________________________________ Minios-devel mailing list Minios-devel@xxxxxxxxxxxxxxxxxxxx https://lists.xenproject.org/mailman/listinfo/minios-devel
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |