[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Xen-devel] [PATCH 01 of 11] libxc: abstract xenctl_cpumap to just xenctl_map



On Thu, May 31, 2012 at 1:11 PM, Dario Faggioli <raistlin@xxxxxxxx> wrote:
> More specifically:
>  1. replaces xenctl_cpumap with xenctl_map
>  2. provides bitmap_to_xenctl_map and the reverse;
>  3. re-implement cpumask_to_xenctl_map with bitmap_to_xenctl_map
>    and the reverse;
>
> Other than #3, no functional changes. Interface only slightly
> afected.
>
> This is in preparation of introducing NUMA nodes maps.

Dario,

What changes are there in this since the last time you posted this?
Anything other than updating the patch description?

 -George

>
> Signed-off-by: Dario Faggioli <dario.faggioli@xxxxxxxxxxxxx>
>
> diff --git a/tools/libxc/xc_cpupool.c b/tools/libxc/xc_cpupool.c
> --- a/tools/libxc/xc_cpupool.c
> +++ b/tools/libxc/xc_cpupool.c
> @@ -90,7 +90,7 @@ xc_cpupoolinfo_t *xc_cpupool_getinfo(xc_
>     sysctl.u.cpupool_op.op = XEN_SYSCTL_CPUPOOL_OP_INFO;
>     sysctl.u.cpupool_op.cpupool_id = poolid;
>     set_xen_guest_handle(sysctl.u.cpupool_op.cpumap.bitmap, local);
> -    sysctl.u.cpupool_op.cpumap.nr_cpus = local_size * 8;
> +    sysctl.u.cpupool_op.cpumap.nr_elems = local_size * 8;
>
>     err = do_sysctl_save(xch, &sysctl);
>
> @@ -184,7 +184,7 @@ xc_cpumap_t xc_cpupool_freeinfo(xc_inter
>     sysctl.cmd = XEN_SYSCTL_cpupool_op;
>     sysctl.u.cpupool_op.op = XEN_SYSCTL_CPUPOOL_OP_FREEINFO;
>     set_xen_guest_handle(sysctl.u.cpupool_op.cpumap.bitmap, local);
> -    sysctl.u.cpupool_op.cpumap.nr_cpus = mapsize * 8;
> +    sysctl.u.cpupool_op.cpumap.nr_elems = mapsize * 8;
>
>     err = do_sysctl_save(xch, &sysctl);
>
> diff --git a/tools/libxc/xc_domain.c b/tools/libxc/xc_domain.c
> --- a/tools/libxc/xc_domain.c
> +++ b/tools/libxc/xc_domain.c
> @@ -142,7 +142,7 @@ int xc_vcpu_setaffinity(xc_interface *xc
>
>     set_xen_guest_handle(domctl.u.vcpuaffinity.cpumap.bitmap, local);
>
> -    domctl.u.vcpuaffinity.cpumap.nr_cpus = cpusize * 8;
> +    domctl.u.vcpuaffinity.cpumap.nr_elems = cpusize * 8;
>
>     ret = do_domctl(xch, &domctl);
>
> @@ -182,7 +182,7 @@ int xc_vcpu_getaffinity(xc_interface *xc
>     domctl.u.vcpuaffinity.vcpu = vcpu;
>
>     set_xen_guest_handle(domctl.u.vcpuaffinity.cpumap.bitmap, local);
> -    domctl.u.vcpuaffinity.cpumap.nr_cpus = cpusize * 8;
> +    domctl.u.vcpuaffinity.cpumap.nr_elems = cpusize * 8;
>
>     ret = do_domctl(xch, &domctl);
>
> diff --git a/tools/libxc/xc_tbuf.c b/tools/libxc/xc_tbuf.c
> --- a/tools/libxc/xc_tbuf.c
> +++ b/tools/libxc/xc_tbuf.c
> @@ -134,7 +134,7 @@ int xc_tbuf_set_cpu_mask(xc_interface *x
>     bitmap_64_to_byte(bytemap, &mask64, sizeof (mask64) * 8);
>
>     set_xen_guest_handle(sysctl.u.tbuf_op.cpu_mask.bitmap, bytemap);
> -    sysctl.u.tbuf_op.cpu_mask.nr_cpus = sizeof(bytemap) * 8;
> +    sysctl.u.tbuf_op.cpu_mask.nr_elems = sizeof(bytemap) * 8;
>
>     ret = do_sysctl(xch, &sysctl);
>
> diff --git a/xen/arch/x86/cpu/mcheck/mce.c b/xen/arch/x86/cpu/mcheck/mce.c
> --- a/xen/arch/x86/cpu/mcheck/mce.c
> +++ b/xen/arch/x86/cpu/mcheck/mce.c
> @@ -1545,8 +1545,7 @@ long do_mca(XEN_GUEST_HANDLE(xen_mc_t) u
>             cpumap = &cpu_online_map;
>         else
>         {
> -            ret = xenctl_cpumap_to_cpumask(&cmv,
> -                                           &op->u.mc_inject_v2.cpumap);
> +            ret = xenctl_map_to_cpumask(&cmv, &op->u.mc_inject_v2.cpumap);
>             if ( ret )
>                 break;
>             cpumap = cmv;
> diff --git a/xen/arch/x86/platform_hypercall.c 
> b/xen/arch/x86/platform_hypercall.c
> --- a/xen/arch/x86/platform_hypercall.c
> +++ b/xen/arch/x86/platform_hypercall.c
> @@ -365,7 +365,7 @@ ret_t do_platform_op(XEN_GUEST_HANDLE(xe
>     {
>         uint32_t cpu;
>         uint64_t idletime, now = NOW();
> -        struct xenctl_cpumap ctlmap;
> +        struct xenctl_map ctlmap;
>         cpumask_var_t cpumap;
>         XEN_GUEST_HANDLE(uint8) cpumap_bitmap;
>         XEN_GUEST_HANDLE(uint64) idletimes;
> @@ -378,11 +378,11 @@ ret_t do_platform_op(XEN_GUEST_HANDLE(xe
>         if ( cpufreq_controller != FREQCTL_dom0_kernel )
>             break;
>
> -        ctlmap.nr_cpus  = op->u.getidletime.cpumap_nr_cpus;
> +        ctlmap.nr_elems  = op->u.getidletime.cpumap_nr_cpus;
>         guest_from_compat_handle(cpumap_bitmap,
>                                  op->u.getidletime.cpumap_bitmap);
>         ctlmap.bitmap.p = cpumap_bitmap.p; /* handle -> handle_64 conversion 
> */
> -        if ( (ret = xenctl_cpumap_to_cpumask(&cpumap, &ctlmap)) != 0 )
> +        if ( (ret = xenctl_map_to_cpumask(&cpumap, &ctlmap)) != 0 )
>             goto out;
>         guest_from_compat_handle(idletimes, op->u.getidletime.idletime);
>
> @@ -401,7 +401,7 @@ ret_t do_platform_op(XEN_GUEST_HANDLE(xe
>
>         op->u.getidletime.now = now;
>         if ( ret == 0 )
> -            ret = cpumask_to_xenctl_cpumap(&ctlmap, cpumap);
> +            ret = cpumask_to_xenctl_map(&ctlmap, cpumap);
>         free_cpumask_var(cpumap);
>
>         if ( ret == 0 && copy_to_guest(u_xenpf_op, op, 1) )
> diff --git a/xen/common/cpupool.c b/xen/common/cpupool.c
> --- a/xen/common/cpupool.c
> +++ b/xen/common/cpupool.c
> @@ -489,7 +489,7 @@ int cpupool_do_sysctl(struct xen_sysctl_
>         op->cpupool_id = c->cpupool_id;
>         op->sched_id = c->sched->sched_id;
>         op->n_dom = c->n_dom;
> -        ret = cpumask_to_xenctl_cpumap(&op->cpumap, c->cpu_valid);
> +        ret = cpumask_to_xenctl_map(&op->cpumap, c->cpu_valid);
>         cpupool_put(c);
>     }
>     break;
> @@ -584,7 +584,7 @@ int cpupool_do_sysctl(struct xen_sysctl_
>
>     case XEN_SYSCTL_CPUPOOL_OP_FREEINFO:
>     {
> -        ret = cpumask_to_xenctl_cpumap(
> +        ret = cpumask_to_xenctl_map(
>             &op->cpumap, &cpupool_free_cpus);
>     }
>     break;
> diff --git a/xen/common/domctl.c b/xen/common/domctl.c
> --- a/xen/common/domctl.c
> +++ b/xen/common/domctl.c
> @@ -31,28 +31,29 @@
>  static DEFINE_SPINLOCK(domctl_lock);
>  DEFINE_SPINLOCK(vcpu_alloc_lock);
>
> -int cpumask_to_xenctl_cpumap(
> -    struct xenctl_cpumap *xenctl_cpumap, const cpumask_t *cpumask)
> +int bitmap_to_xenctl_map(struct xenctl_map *xenctl_map,
> +                         const unsigned long *bitmap,
> +                         unsigned int nbits)
>  {
>     unsigned int guest_bytes, copy_bytes, i;
>     uint8_t zero = 0;
>     int err = 0;
> -    uint8_t *bytemap = xmalloc_array(uint8_t, (nr_cpu_ids + 7) / 8);
> +    uint8_t *bytemap = xmalloc_array(uint8_t, (nbits + 7) / 8);
>
>     if ( !bytemap )
>         return -ENOMEM;
>
> -    guest_bytes = (xenctl_cpumap->nr_cpus + 7) / 8;
> -    copy_bytes  = min_t(unsigned int, guest_bytes, (nr_cpu_ids + 7) / 8);
> +    guest_bytes = (xenctl_map->nr_elems + 7) / 8;
> +    copy_bytes  = min_t(unsigned int, guest_bytes, (nbits + 7) / 8);
>
> -    bitmap_long_to_byte(bytemap, cpumask_bits(cpumask), nr_cpu_ids);
> +    bitmap_long_to_byte(bytemap, bitmap, nbits);
>
>     if ( copy_bytes != 0 )
> -        if ( copy_to_guest(xenctl_cpumap->bitmap, bytemap, copy_bytes) )
> +        if ( copy_to_guest(xenctl_map->bitmap, bytemap, copy_bytes) )
>             err = -EFAULT;
>
>     for ( i = copy_bytes; !err && i < guest_bytes; i++ )
> -        if ( copy_to_guest_offset(xenctl_cpumap->bitmap, i, &zero, 1) )
> +        if ( copy_to_guest_offset(xenctl_map->bitmap, i, &zero, 1) )
>             err = -EFAULT;
>
>     xfree(bytemap);
> @@ -60,36 +61,58 @@ int cpumask_to_xenctl_cpumap(
>     return err;
>  }
>
> -int xenctl_cpumap_to_cpumask(
> -    cpumask_var_t *cpumask, const struct xenctl_cpumap *xenctl_cpumap)
> +int xenctl_map_to_bitmap(unsigned long *bitmap,
> +                         const struct xenctl_map *xenctl_map,
> +                         unsigned int nbits)
>  {
>     unsigned int guest_bytes, copy_bytes;
>     int err = 0;
> -    uint8_t *bytemap = xzalloc_array(uint8_t, (nr_cpu_ids + 7) / 8);
> +    uint8_t *bytemap = xzalloc_array(uint8_t, (nbits + 7) / 8);
>
>     if ( !bytemap )
>         return -ENOMEM;
>
> -    guest_bytes = (xenctl_cpumap->nr_cpus + 7) / 8;
> -    copy_bytes  = min_t(unsigned int, guest_bytes, (nr_cpu_ids + 7) / 8);
> +    guest_bytes = (xenctl_map->nr_elems + 7) / 8;
> +    copy_bytes  = min_t(unsigned int, guest_bytes, (nbits + 7) / 8);
>
>     if ( copy_bytes != 0 )
>     {
> -        if ( copy_from_guest(bytemap, xenctl_cpumap->bitmap, copy_bytes) )
> +        if ( copy_from_guest(bytemap, xenctl_map->bitmap, copy_bytes) )
>             err = -EFAULT;
> -        if ( (xenctl_cpumap->nr_cpus & 7) && (guest_bytes <= 
> sizeof(bytemap)) )
> -            bytemap[guest_bytes-1] &= ~(0xff << (xenctl_cpumap->nr_cpus & 
> 7));
> +        if ( (xenctl_map->nr_elems & 7) && (guest_bytes <= sizeof(bytemap)) )
> +            bytemap[guest_bytes-1] &= ~(0xff << (xenctl_map->nr_elems & 7));
>     }
>
> -    if ( err )
> -        /* nothing */;
> -    else if ( alloc_cpumask_var(cpumask) )
> -        bitmap_byte_to_long(cpumask_bits(*cpumask), bytemap, nr_cpu_ids);
> +    if ( !err )
> +        bitmap_byte_to_long(bitmap, bytemap, nbits);
> +
> +    xfree(bytemap);
> +
> +    return err;
> +}
> +
> +int cpumask_to_xenctl_map(struct xenctl_map *xenctl_cpumap,
> +                          const cpumask_t *cpumask)
> +{
> +    return bitmap_to_xenctl_map(xenctl_cpumap, cpumask_bits(cpumask),
> +                                nr_cpu_ids);
> +}
> +
> +int xenctl_map_to_cpumask(cpumask_var_t *cpumask,
> +                          const struct xenctl_map *xenctl_cpumap)
> +{
> +    int err = 0;
> +
> +    if ( alloc_cpumask_var(cpumask) ) {
> +        err = xenctl_map_to_bitmap(cpumask_bits(*cpumask), xenctl_cpumap,
> +                                   nr_cpu_ids);
> +        /* In case of error, cleanup is up to us, as the caller won't care! 
> */
> +        if ( err )
> +            free_cpumask_var(*cpumask);
> +    }
>     else
>         err = -ENOMEM;
>
> -    xfree(bytemap);
> -
>     return err;
>  }
>
> @@ -617,7 +640,7 @@ long do_domctl(XEN_GUEST_HANDLE(xen_domc
>         {
>             cpumask_var_t new_affinity;
>
> -            ret = xenctl_cpumap_to_cpumask(
> +            ret = xenctl_map_to_cpumask(
>                 &new_affinity, &op->u.vcpuaffinity.cpumap);
>             if ( !ret )
>             {
> @@ -627,7 +650,7 @@ long do_domctl(XEN_GUEST_HANDLE(xen_domc
>         }
>         else
>         {
> -            ret = cpumask_to_xenctl_cpumap(
> +            ret = cpumask_to_xenctl_map(
>                 &op->u.vcpuaffinity.cpumap, v->cpu_affinity);
>         }
>
> diff --git a/xen/common/trace.c b/xen/common/trace.c
> --- a/xen/common/trace.c
> +++ b/xen/common/trace.c
> @@ -384,7 +384,7 @@ int tb_control(xen_sysctl_tbuf_op_t *tbc
>     {
>         cpumask_var_t mask;
>
> -        rc = xenctl_cpumap_to_cpumask(&mask, &tbc->cpu_mask);
> +        rc = xenctl_map_to_cpumask(&mask, &tbc->cpu_mask);
>         if ( !rc )
>         {
>             cpumask_copy(&tb_cpu_mask, mask);
> diff --git a/xen/include/public/arch-x86/xen-mca.h 
> b/xen/include/public/arch-x86/xen-mca.h
> --- a/xen/include/public/arch-x86/xen-mca.h
> +++ b/xen/include/public/arch-x86/xen-mca.h
> @@ -414,7 +414,7 @@ struct xen_mc_mceinject {
>
>  struct xen_mc_inject_v2 {
>        uint32_t flags;
> -       struct xenctl_cpumap cpumap;
> +       struct xenctl_map cpumap;
>  };
>  #endif
>
> diff --git a/xen/include/public/domctl.h b/xen/include/public/domctl.h
> --- a/xen/include/public/domctl.h
> +++ b/xen/include/public/domctl.h
> @@ -283,7 +283,7 @@ DEFINE_XEN_GUEST_HANDLE(xen_domctl_getvc
>  /* XEN_DOMCTL_getvcpuaffinity */
>  struct xen_domctl_vcpuaffinity {
>     uint32_t  vcpu;              /* IN */
> -    struct xenctl_cpumap cpumap; /* IN/OUT */
> +    struct xenctl_map cpumap;    /* IN/OUT */
>  };
>  typedef struct xen_domctl_vcpuaffinity xen_domctl_vcpuaffinity_t;
>  DEFINE_XEN_GUEST_HANDLE(xen_domctl_vcpuaffinity_t);
> diff --git a/xen/include/public/sysctl.h b/xen/include/public/sysctl.h
> --- a/xen/include/public/sysctl.h
> +++ b/xen/include/public/sysctl.h
> @@ -71,8 +71,8 @@ struct xen_sysctl_tbuf_op {
>  #define XEN_SYSCTL_TBUFOP_disable      5
>     uint32_t cmd;
>     /* IN/OUT variables */
> -    struct xenctl_cpumap cpu_mask;
> -    uint32_t             evt_mask;
> +    struct xenctl_map cpu_mask;
> +    uint32_t          evt_mask;
>     /* OUT variables */
>     uint64_aligned_t buffer_mfn;
>     uint32_t size;  /* Also an IN variable! */
> @@ -531,7 +531,7 @@ struct xen_sysctl_cpupool_op {
>     uint32_t domid;       /* IN: M              */
>     uint32_t cpu;         /* IN: AR             */
>     uint32_t n_dom;       /*            OUT: I  */
> -    struct xenctl_cpumap cpumap; /*     OUT: IF */
> +    struct xenctl_map cpumap;    /*     OUT: IF */
>  };
>  typedef struct xen_sysctl_cpupool_op xen_sysctl_cpupool_op_t;
>  DEFINE_XEN_GUEST_HANDLE(xen_sysctl_cpupool_op_t);
> diff --git a/xen/include/public/xen.h b/xen/include/public/xen.h
> --- a/xen/include/public/xen.h
> +++ b/xen/include/public/xen.h
> @@ -822,9 +822,9 @@ typedef uint8_t xen_domain_handle_t[16];
>  #endif
>
>  #ifndef __ASSEMBLY__
> -struct xenctl_cpumap {
> +struct xenctl_map {
>     XEN_GUEST_HANDLE_64(uint8) bitmap;
> -    uint32_t nr_cpus;
> +    uint32_t nr_elems;
>  };
>  #endif
>
> diff --git a/xen/include/xen/cpumask.h b/xen/include/xen/cpumask.h
> --- a/xen/include/xen/cpumask.h
> +++ b/xen/include/xen/cpumask.h
> @@ -424,8 +424,8 @@ extern cpumask_t cpu_present_map;
>  #define for_each_present_cpu(cpu)  for_each_cpu(cpu, &cpu_present_map)
>
>  /* Copy to/from cpumap provided by control tools. */
> -struct xenctl_cpumap;
> -int cpumask_to_xenctl_cpumap(struct xenctl_cpumap *, const cpumask_t *);
> -int xenctl_cpumap_to_cpumask(cpumask_var_t *, const struct xenctl_cpumap *);
> +struct xenctl_map;
> +int cpumask_to_xenctl_map(struct xenctl_map *, const cpumask_t *);
> +int xenctl_map_to_cpumask(cpumask_var_t *, const struct xenctl_map *);
>
>  #endif /* __XEN_CPUMASK_H */
> diff --git a/xen/include/xlat.lst b/xen/include/xlat.lst
> --- a/xen/include/xlat.lst
> +++ b/xen/include/xlat.lst
> @@ -2,7 +2,7 @@
>  # ! - needs translation
>  # ? - needs checking
>  ?      dom0_vga_console_info           xen.h
> -?      xenctl_cpumap                   xen.h
> +?      xenctl_map                      xen.h
>  ?      mmu_update                      xen.h
>  !      mmuext_op                       xen.h
>  !      start_info                      xen.h
>
> _______________________________________________
> Xen-devel mailing list
> Xen-devel@xxxxxxxxxxxxx
> http://lists.xen.org/xen-devel

_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.