[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Minios-devel] [UNIKRAFT PATCH v3 06/17] plat/xen/drivers/blk: Configure block device



Reviewed-by: Costin Lupu <costin.lupu@xxxxxxxxx>

On 10/30/19 5:54 PM, Roxana Nicolescu wrote:
> This patch introduces the configuration of block device consisting of:
>       - finding multi-queue-max-queue number provided by Backend;
>       - setting the number of queues to be used (between 1 and max).
> 
> Signed-off-by: Roxana Nicolescu <nicolescu.roxana1996@xxxxxxxxx>
> ---
>  plat/xen/drivers/blk/blkfront.c    | 51 ++++++++++++++++++++++++++++++
>  plat/xen/drivers/blk/blkfront.h    |  2 ++
>  plat/xen/drivers/blk/blkfront_xb.h |  7 +++-
>  plat/xen/drivers/blk/blkfront_xs.c | 47 +++++++++++++++++++++++++++
>  4 files changed, 106 insertions(+), 1 deletion(-)
> 
> diff --git a/plat/xen/drivers/blk/blkfront.c b/plat/xen/drivers/blk/blkfront.c
> index 09302f45..ae98a79c 100644
> --- a/plat/xen/drivers/blk/blkfront.c
> +++ b/plat/xen/drivers/blk/blkfront.c
> @@ -55,6 +55,56 @@
>  static struct uk_alloc *drv_allocator;
>  
>  
> +static int blkfront_configure(struct uk_blkdev *blkdev,
> +             const struct uk_blkdev_conf *conf)
> +{
> +     struct blkfront_dev *dev;
> +     int err = 0;
> +
> +     UK_ASSERT(blkdev != NULL);
> +     UK_ASSERT(conf != NULL);
> +
> +     dev = to_blkfront(blkdev);
> +     dev->nb_queues = conf->nb_queues;
> +     err = blkfront_xb_write_nb_queues(dev);
> +     if (err) {
> +             uk_pr_err("Failed to write nb of queues: %d.\n", err);
> +             goto out;
> +     }
> +
> +     uk_pr_info(DRIVER_NAME": %"PRIu16" configured\n", dev->uid);
> +out:
> +     return err;
> +}
> +
> +static int blkfront_unconfigure(struct uk_blkdev *blkdev)
> +{
> +     struct blkfront_dev *dev;
> +
> +     UK_ASSERT(blkdev != NULL);
> +     dev = to_blkfront(blkdev);
> +
> +     return 0;
> +}
> +
> +static void blkfront_get_info(struct uk_blkdev *blkdev,
> +             struct uk_blkdev_info *dev_info)
> +{
> +     struct blkfront_dev *dev = NULL;
> +
> +     UK_ASSERT(blkdev);
> +     UK_ASSERT(dev_info);
> +
> +     dev = to_blkfront(blkdev);
> +     dev_info->max_queues = dev->nb_queues;
> +}
> +
> +static const struct uk_blkdev_ops blkfront_ops = {
> +     .get_info = blkfront_get_info,
> +     .dev_configure = blkfront_configure,
> +     .dev_unconfigure = blkfront_unconfigure,
> +};
> +
>  /**
>   * Assign callbacks to uk_blkdev
>   */
> @@ -70,6 +120,7 @@ static int blkfront_add_dev(struct xenbus_device *dev)
>               return -ENOMEM;
>  
>       d->xendev = dev;
> +     d->blkdev.dev_ops = &blkfront_ops;
>  
>       /* Xenbus initialization */
>       rc = blkfront_xb_init(d);
> diff --git a/plat/xen/drivers/blk/blkfront.h b/plat/xen/drivers/blk/blkfront.h
> index f00ed41b..5833f826 100644
> --- a/plat/xen/drivers/blk/blkfront.h
> +++ b/plat/xen/drivers/blk/blkfront.h
> @@ -53,6 +53,8 @@ struct blkfront_dev {
>       struct uk_blkdev blkdev;
>       /* Blkfront device number from Xenstore path. */
>       blkif_vdev_t    handle;
> +     /* Number of configured queues used for requests */
> +     uint16_t nb_queues;
>       /* The blkdev identifier */
>       __u16 uid;
>  };
> diff --git a/plat/xen/drivers/blk/blkfront_xb.h 
> b/plat/xen/drivers/blk/blkfront_xb.h
> index 7b62dbeb..668206a0 100644
> --- a/plat/xen/drivers/blk/blkfront_xb.h
> +++ b/plat/xen/drivers/blk/blkfront_xb.h
> @@ -46,7 +46,7 @@
>  
>  /*
>   * Get initial info from the xenstore.
> - * Ex: backend path, handle.
> + * Ex: backend path, handle, max-queues.
>   *
>   * Return 0 on success, a negative errno value on error.
>   */
> @@ -57,4 +57,9 @@ int blkfront_xb_init(struct blkfront_dev *dev);
>   */
>  void blkfront_xb_fini(struct blkfront_dev *dev);
>  
> +/**
> + * Write nb of queues for further use to Xenstore.
> + * Return 0 on success, a negative errno value on error.
> + */
> +int blkfront_xb_write_nb_queues(struct blkfront_dev *dev);
>  #endif /* __BLKFRONT_XB_H__ */
> diff --git a/plat/xen/drivers/blk/blkfront_xs.c 
> b/plat/xen/drivers/blk/blkfront_xs.c
> index 366c069b..9f44246e 100644
> --- a/plat/xen/drivers/blk/blkfront_xs.c
> +++ b/plat/xen/drivers/blk/blkfront_xs.c
> @@ -66,6 +66,25 @@ out:
>       return err;
>  }
>  
> +static int blkfront_xb_get_nb_max_queues(struct blkfront_dev *dev)
> +{
> +     int err = 0;
> +     struct xenbus_device *xendev;
> +
> +     UK_ASSERT(dev != NULL);
> +     xendev = dev->xendev;
> +
> +     err = xs_scanf(XBT_NIL, xendev->otherend, "multi-queue-max-queues",
> +                             "%"PRIu16,
> +                             &dev->nb_queues);
> +     if (err < 0) {
> +             uk_pr_err("Failed to read multi-queue-max-queues: %d\n", err);
> +             return err;
> +     }
> +
> +     return 0;
> +}
> +
>  int blkfront_xb_init(struct blkfront_dev *dev)
>  {
>       struct xenbus_device *xendev;
> @@ -101,6 +120,11 @@ int blkfront_xb_init(struct blkfront_dev *dev)
>               goto out;
>       }
>  
> +     err = blkfront_xb_get_nb_max_queues(dev);
> +     if (err) {
> +             uk_pr_err("Failed to read multi-queue-max-queues: %d\n", err);
> +             goto out;
> +     }
>  out:
>       return err;
>  }
> @@ -117,3 +141,26 @@ void blkfront_xb_fini(struct blkfront_dev *dev)
>               xendev->otherend = NULL;
>       }
>  }
> +
> +int blkfront_xb_write_nb_queues(struct blkfront_dev *dev)
> +{
> +     int err;
> +     struct xenbus_device *xendev;
> +
> +     UK_ASSERT(dev);
> +
> +     xendev = dev->xendev;
> +     err = xs_printf(XBT_NIL, xendev->nodename,
> +                     "multi-queue-num-queues",
> +                     "%u",
> +                     dev->nb_queues);
> +     if (err < 0) {
> +             uk_pr_err("Failed to write multi-queue-num-queue: %d\n", err);
> +             goto out;
> +     }
> +
> +     err = 0;
> +
> +out:
> +     return err;
> +}
> 

_______________________________________________
Minios-devel mailing list
Minios-devel@xxxxxxxxxxxxxxxxxxxx
https://lists.xenproject.org/mailman/listinfo/minios-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.