[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [PATCH 11/12] hw/xen: automatically assign device index to block devices



On Mon, 16 Oct 2023 16:19:08 +0100
David Woodhouse <dwmw2@xxxxxxxxxxxxx> wrote:

> From: David Woodhouse <dwmw@xxxxxxxxxxxx>
> 

is this index a user (guest) visible?

> There's no need to force the user to assign a vdev. We can automatically
> assign one, starting at xvda and searching until we find the first disk
> name that's unused.
> 
> This means we can now allow '-drive if=xen,file=xxx' to work without an
> explicit separate -driver argument, just like if=virtio.
> 
> Signed-off-by: David Woodhouse <dwmw@xxxxxxxxxxxx>
> ---
>  blockdev.c           | 15 ++++++++++++---
>  hw/block/xen-block.c | 25 +++++++++++++++++++++++++
>  2 files changed, 37 insertions(+), 3 deletions(-)
> 
> diff --git a/blockdev.c b/blockdev.c
> index 325b7a3bef..9dec4c9c74 100644
> --- a/blockdev.c
> +++ b/blockdev.c
> @@ -255,13 +255,13 @@ void drive_check_orphaned(void)
>           * Ignore default drives, because we create certain default
>           * drives unconditionally, then leave them unclaimed.  Not the
>           * users fault.
> -         * Ignore IF_VIRTIO, because it gets desugared into -device,
> -         * so we can leave failing to -device.
> +         * Ignore IF_VIRTIO or IF_XEN, because it gets desugared into
> +         * -device, so we can leave failing to -device.
>           * Ignore IF_NONE, because leaving unclaimed IF_NONE remains
>           * available for device_add is a feature.
>           */
>          if (dinfo->is_default || dinfo->type == IF_VIRTIO
> -            || dinfo->type == IF_NONE) {
> +            || dinfo->type == IF_XEN || dinfo->type == IF_NONE) {
>              continue;
>          }
>          if (!blk_get_attached_dev(blk)) {
> @@ -977,6 +977,15 @@ DriveInfo *drive_new(QemuOpts *all_opts, 
> BlockInterfaceType block_default_type,
>          qemu_opt_set(devopts, "driver", "virtio-blk", &error_abort);
>          qemu_opt_set(devopts, "drive", qdict_get_str(bs_opts, "id"),
>                       &error_abort);
> +    } else if (type == IF_XEN) {
> +        QemuOpts *devopts;
> +        devopts = qemu_opts_create(qemu_find_opts("device"), NULL, 0,
> +                                   &error_abort);
> +        qemu_opt_set(devopts, "driver",
> +                     (media == MEDIA_CDROM) ? "xen-cdrom" : "xen-disk",
> +                     &error_abort);
> +        qemu_opt_set(devopts, "drive", qdict_get_str(bs_opts, "id"),
> +                     &error_abort);
>      }
>  
>      filename = qemu_opt_get(legacy_opts, "file");
> diff --git a/hw/block/xen-block.c b/hw/block/xen-block.c
> index 9262338535..20fa783cbe 100644
> --- a/hw/block/xen-block.c
> +++ b/hw/block/xen-block.c
> @@ -34,6 +34,31 @@ static char *xen_block_get_name(XenDevice *xendev, Error 
> **errp)
>      XenBlockDevice *blockdev = XEN_BLOCK_DEVICE(xendev);
>      XenBlockVdev *vdev = &blockdev->props.vdev;
>  
> +    if (blockdev->props.vdev.type == XEN_BLOCK_VDEV_TYPE_INVALID) {
> +        char name[11];
> +        int disk = 0;
> +        unsigned long idx;
> +
> +        /* Find an unoccupied device name */
> +        while (disk < (1 << 20)) {
> +            if (disk < (1 << 4)) {
> +                idx = (202 << 8) | (disk << 4);
> +            } else {
> +                idx = (1 << 28) | (disk << 8);
> +            }
> +            snprintf(name, sizeof(name), "%lu", idx);
> +            if (!xen_backend_exists("qdisk", name)) {
> +                vdev->type = XEN_BLOCK_VDEV_TYPE_XVD;
> +                vdev->partition = 0;
> +                vdev->disk = disk;
> +                vdev->number = idx;
> +                return g_strdup(name);
> +            }
> +            disk++;
> +        }
> +        error_setg(errp, "cannot find device vdev for block device");
> +        return NULL;
> +    }
>      return g_strdup_printf("%lu", vdev->number);
>  }
>  




 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.