[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Xen-devel] [RFC XEN PATCH 12/16] tools/libxl: build qemu options from xl vNVDIMM configs



On Mon, Oct 10, 2016 at 08:32:31AM +0800, Haozhong Zhang wrote:
> For xl vNVDIMM configs
>   vnvdimms = [ '/path/to/pmem0', '/path/to/pmem1', ... ]
> 
> the following qemu options are built
>   -machine <existing options>,nvdimm
>   -m <existing options>,slots=$NR_SLOTS,maxmem=$MEM_SIZE
>   -object memory-backend-xen,id=mem1,size=$PMEM0_SIZE,mem-path=/path/to/pmem0
>   -device nvdimm,id=nvdimm1,memdev=mem1
>   -object memory-backend-xen,id=mem2,size=$PMEM1_SIZE,mem-path=/path/to/pmem1
>   -device nvdimm,id=nvdimm2,memdev=mem2
>   ...
> where
> * NR_SLOTS is the number of entries in vnvdimms + 1,
> * MEM_SIZE is the total size of all RAM and NVDIMM devices,
> * PMEM#_SIZE is the size of the host pmem device/file '/path/to/pmem#'.
> 
> Signed-off-by: Haozhong Zhang <haozhong.zhang@xxxxxxxxx>
> ---
> Cc: Ian Jackson <ian.jackson@xxxxxxxxxxxxx>
> Cc: Wei Liu <wei.liu2@xxxxxxxxxx>
> ---
>  tools/libxl/libxl_dm.c      | 113 
> +++++++++++++++++++++++++++++++++++++++++++-
>  tools/libxl/libxl_types.idl |   8 ++++
>  tools/libxl/xl_cmdimpl.c    |  16 +++++++

You probably also want this new parameter in the xl manpage.

>  3 files changed, 135 insertions(+), 2 deletions(-)
> 
> diff --git a/tools/libxl/libxl_dm.c b/tools/libxl/libxl_dm.c
> index ad366a8..6b8c019 100644
> --- a/tools/libxl/libxl_dm.c
> +++ b/tools/libxl/libxl_dm.c
> @@ -24,6 +24,10 @@
>  #include <sys/types.h>
>  #include <pwd.h>
>  
> +#if defined(__linux__)
> +#include <linux/fs.h>
> +#endif
> +
>  static const char *libxl_tapif_script(libxl__gc *gc)
>  {
>  #if defined(__linux__) || defined(__FreeBSD__)
> @@ -905,6 +909,86 @@ static char *qemu_disk_ide_drive_string(libxl__gc *gc, 
> const char *target_path,
>      return drive;
>  }
>  
> +#if defined(__linux__)
> +
> +static uint64_t libxl__build_dm_vnvdimm_args(libxl__gc *gc, flexarray_t 
> *dm_args,
> +                                             struct libxl_device_vnvdimm 
> *dev,
> +                                             int dev_no)
> +{
> +    int fd, rc;
> +    struct stat st;
> +    uint64_t size = 0;
> +    char *arg;
> +
> +    fd = open(dev->file, O_RDONLY);
> +    if (fd < 0) {
> +        LOG(ERROR, "failed to open file %s: %s",
> +            dev->file, strerror(errno));
> +        goto out;
> +    }
> +
> +    if (stat(dev->file, &st)) {
> +        LOG(ERROR, "failed to get status of file %s: %s",
> +            dev->file, strerror(errno));
> +        goto out_fclose;
> +    }
> +
> +    switch (st.st_mode & S_IFMT) {
> +    case S_IFBLK:
> +        rc = ioctl(fd, BLKGETSIZE64, &size);
> +        if (rc == -1) {
> +            LOG(ERROR, "failed to get size of block device %s: %s",
> +                dev->file, strerror(errno));
> +            size = 0;
> +        }
> +        break;
> +
> +    case S_IFREG:
> +        size = st.st_size;
> +        break;
> +
> +    default:
> +        LOG(ERROR, "%s is not a block device or regular file", dev->file);
> +        break;
> +    }
> +
> +    if (!size)
> +        goto out_fclose;
> +
> +    flexarray_append(dm_args, "-object");
> +    arg = GCSPRINTF("memory-backend-xen,id=mem%d,size=%"PRIu64",mem-path=%s",
> +                    dev_no + 1, size, dev->file);
> +    flexarray_append(dm_args, arg);
> +
> +    flexarray_append(dm_args, "-device");
> +    arg = GCSPRINTF("nvdimm,id=nvdimm%d,memdev=mem%d", dev_no + 1, dev_no + 
> 1);
> +    flexarray_append(dm_args, arg);
> +
> + out_fclose:
> +    close(fd);
> + out:
> +    return size;
> +}
> +
> +static uint64_t libxl__build_dm_vnvdimms_args(
> +    libxl__gc *gc, flexarray_t *dm_args,
> +    struct libxl_device_vnvdimm *vnvdimms, int num_vnvdimms)
> +{
> +    uint64_t total_size = 0, size;
> +    int i;

unsigned int
> +

_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
https://lists.xen.org/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.