[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Minios-devel] [UNIKRAFT PATCH 5/6] lib/ukblkdev: Synchronous operations



Hi Simon,

Thank you once again. I will post the v2 with the changes. 

Roxana

On Fri, Jun 14, 2019 at 3:17 PM Simon Kuenzer <simon.kuenzer@xxxxxxxxx> wrote:
On 29.05.19 10:33, Roxana Nicolescu wrote:
> This patch introduces sync operations.
> It requires the use of semaphore in the implementation.
>
> Signed-off-by: Roxana Nicolescu <nicolescu.roxana1996@xxxxxxxxx>
> ---
>   lib/ukblkdev/Config.uk                |  8 +++++
>   lib/ukblkdev/blkdev.c                 | 64 +++++++++++++++++++++++++++++++++++
>   lib/ukblkdev/exportsyms.uk            |  3 ++
>   lib/ukblkdev/include/uk/blkdev.h      | 47 +++++++++++++++++++++++++
>   lib/ukblkdev/include/uk/blkdev_core.h |  3 +-
>   5 files changed, 124 insertions(+), 1 deletion(-)
>
> diff --git a/lib/ukblkdev/Config.uk b/lib/ukblkdev/Config.uk
> index 31b1a1d2..688a44b4 100644
> --- a/lib/ukblkdev/Config.uk
> +++ b/lib/ukblkdev/Config.uk
> @@ -26,4 +26,12 @@ if LIBUKBLKDEV
>                       allocated for each configured queue.
>                       libuksched is required for this option.
>   
> +        config LIBUKBLKDEV_SYNC_IO_BLOCKED_WAITING
> +                bool "Blocked waiting for sync I/O"

Just name it: "Synchronous I/O API" since it is the only operation mode
that the API supports for now. If we want to add a busy waiting version
at some point, we can still rename it later.

> +                default n
> +                select LIBUKSCHED
> +                select LIBUKLOCK
> +             select LIBUKLOCK_SEMAPHORE
> +                help
> +                        Use semaphore for waiting after a request I/O is done.
>   endif
> diff --git a/lib/ukblkdev/blkdev.c b/lib/ukblkdev/blkdev.c
> index 3c58061b..534a9ae1 100644
> --- a/lib/ukblkdev/blkdev.c
> +++ b/lib/ukblkdev/blkdev.c
> @@ -403,3 +403,67 @@ int uk_blkdev_queue_finish_reqs(struct uk_blkdev *dev,
>   
>       return dev->finish_reqs(dev, queue_id);
>   }
> +
> +#if CONFIG_LIBUKBLKDEV_SYNC_IO_BLOCKED_WAITING
> +/**
> + * Used for sending a synchronous request.
> + */
> +struct uk_blkdev_sync_io_request {
> +     struct uk_blkdev_request req;   /* Request structure. */
> +
> +     /* Semaphore used for waiting after the response is done. */
> +     struct uk_semaphore s;
> +};
> +
> +static int __sync_io_callback(struct uk_blkdev_request *req,
> +             void *cookie_callback)
> +{
> +     struct uk_blkdev_sync_io_request *sync_io_req;
> +
> +     UK_ASSERT(req);
> +     UK_ASSERT(cookie_callback);
> +
> +     sync_io_req = (struct uk_blkdev_sync_io_request *)cookie_callback;
> +     uk_semaphore_up(&sync_io_req->s);
> +
> +     return 0;
> +}
> +
> +int uk_blkdev_sync_io(struct uk_blkdev *dev,
> +             uint16_t queue_id,
> +             size_t sector,
> +             __sector nb_sectors,
> +             void *buf,
> +             enum uk_blkdev_op operation)
> +{
> +     struct uk_blkdev_request *req;
> +     int rc = 0;
> +     struct uk_blkdev_sync_io_request sync_io_req;
> +
> +     UK_ASSERT(dev != NULL);
> +     UK_ASSERT(queue_id < CONFIG_LIBUKBLKDEV_MAXNBQUEUES);
> +     UK_ASSERT(dev->_data);
> +     UK_ASSERT(dev->submit_one);
> +     UK_ASSERT(dev->_data->state == UK_BLKDEV_RUNNING);
> +
> +     req = &sync_io_req.req;
> +     req->aio_buf = buf;
> +     req->nb_sectors = nb_sectors;
> +     req->start_sector = sector;
> +     req->operation = operation;
> +     uk_refcount_init(&req->state, UK_BLKDEV_REQ_UNFINISHED);
> +     req->cb = __sync_io_callback;
> +     req->cookie_callback = (void *)&sync_io_req;
> +     uk_semaphore_init(&sync_io_req.s, 0);
> +
> +     rc = uk_blkdev_queue_submit_one(dev, queue_id, req);
> +     if (!uk_blkdev_status_successful(rc)) {

As an optimization you could add `unlikely()` around the condition? I
think it is unlikely to fail. By adding this you enable compiler
optimizations for the likely case if the architecture supports it.

> +             uk_pr_err("blkdev%"PRIu16"-q%"PRIu16": Failed to submit I/O req: %d\n",
> +                             dev->_data->id, queue_id, rc);
> +             return rc;
> +     }
> +
> +     uk_semaphore_down(&sync_io_req.s);
> +     return req->result_status;
> +}
> +#endif
> diff --git a/lib/ukblkdev/exportsyms.uk b/lib/ukblkdev/exportsyms.uk
> index 5888be69..0274dae8 100644
> --- a/lib/ukblkdev/exportsyms.uk
> +++ b/lib/ukblkdev/exportsyms.uk
> @@ -19,3 +19,6 @@ uk_blkdev_sectors
>   uk_blkdev_align
>   uk_blkdev_queue_submit_one
>   uk_blkdev_queue_finish_reqs
> +uk_blkdev_sync_io
> +uk_blkdev_read
> +uk_blkdev_write
> diff --git a/lib/ukblkdev/include/uk/blkdev.h b/lib/ukblkdev/include/uk/blkdev.h
> index db6d3b80..ba4c2784 100644
> --- a/lib/ukblkdev/include/uk/blkdev.h
> +++ b/lib/ukblkdev/include/uk/blkdev.h
> @@ -421,6 +421,53 @@ int uk_blkdev_queue_submit_one(struct uk_blkdev *dev,
>   int uk_blkdev_queue_finish_reqs(struct uk_blkdev *dev,
>               uint16_t queue_id);
>   
> +#if CONFIG_LIBUKBLKDEV_SYNC_IO_BLOCKED_WAITING
> +/**
> + * Make a sync io request on a specific queue.
> + *

You should probably document here that `uk_blkdev_queue_finish_reqs()`
has to be called while using the function. This can be done via queue
interrupts/events or by calling `uk_blkdev_queue_finish_reqs()` from a
different thread context. Otherwise we are blocking the thread forever
because it got blocked by the semaphore (and never unblocked).

> + * @param dev
> + *   The Unikraft Block Device
> + * @param queue_id
> + *   queue_id
> + * @param sector
> + *   Start Sector
> + * @param nb_sectors
> + * @param buf
> + *   Buffer where data is found
> + * @ param op
> + *   Type of operation
> + * @return
> + *   - 0: Success
> + *   - (<0): on error returned by driver
> + */
> +int uk_blkdev_sync_io(struct uk_blkdev *dev,
> +             uint16_t queue_id,
> +             size_t sector,

Please use the __sector datatype for the start address, I would prefer
also to have something inline with the async API: Call the parameter
`__sector start_sector` as you have in your request token.

I would also move the operation argument before the buffer.
This way you group input parameters together. The buffer can be
(dependent on the operation) an input and output buffer.

> +             __sector nb_sectors,
> +             void *buf,
> +             enum uk_blkdev_op op);
> +
> +/*
> + * Wrappers for uk_blkdev_sync_io
> + */
> +#define uk_blkdev_write(blkdev,\
> +             queue_id,       \
> +             sector,         \
> +             nb_sectors,     \
> +             buf)    \
> +     uk_blkdev_sync_io(blkdev, queue_id, sector, nb_sectors, buf,\
> +                     UK_BLKDEV_WRITE) \
> +
> +#define uk_blkdev_read(blkdev,\
> +             queue_id,       \
> +             sector,         \
> +             nb_sectors,     \
> +             buf)    \
> +     uk_blkdev_sync_io(blkdev, queue_id, sector, nb_sectors, buf,\
> +                     UK_BLKDEV_READ) \
> +

I would prefer to call these macros:
uk_blkdev_sync_write() and uk_blkdev_sync_read()
because they are both based on uk_blkdev_sync_io().

> +#endif
> +
>   #ifdef __cplusplus
>   }
>   #endif
> diff --git a/lib/ukblkdev/include/uk/blkdev_core.h b/lib/ukblkdev/include/uk/blkdev_core.h
> index 309b7a7f..66831f90 100644
> --- a/lib/ukblkdev/include/uk/blkdev_core.h
> +++ b/lib/ukblkdev/include/uk/blkdev_core.h
> @@ -39,7 +39,8 @@
>   #include <uk/list.h>
>   #include <uk/config.h>
>   #include <uk/blkreq.h>
> -#if defined(CONFIG_LIBUKBLKDEV_DISPATCHERTHREADS)
> +#if defined(CONFIG_LIBUKBLKDEV_DISPATCHERTHREADS) || \
> +             defined(CONFIG_LIBUKBLKDEV_SYNC_IO_BLOCKED_WAITING)
>   #include <uk/sched.h>
>   #include <uk/semaphore.h>
>   #endif
>
_______________________________________________
Minios-devel mailing list
Minios-devel@xxxxxxxxxxxxxxxxxxxx
https://lists.xenproject.org/mailman/listinfo/minios-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.