[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Xen-devel] [PATCH v4 12/15] libxl: get and set soft affinity



On Fri, 2013-11-22 at 19:58 +0100, Dario Faggioli wrote:
> Make space for two new cpumap-s, one in vcpu_info (for getting
> soft affinity) and build_info (for setting it) and amend the
> API for setting vCPU affinity.
> 
> libxl_set_vcpuaffinity() now takes two cpumaps, one for hard
> and one for soft affinity (LIBXL_API_VERSION is exploited to
> retain source level backword compatibility). Either of the
> two cpumap can be NULL, in which case, only the affinity
> corresponding to the non-NULL cpumap will be affected.
> 
> Getting soft affinity happens indirectly, via `xl vcpu-list'
> (as it is already for hard affinity).
> 
> This commit also introduces some logic to check whether the
> affinity which will be used by Xen to schedule the vCPU(s)
> does actually match with the cpumaps provided. In fact, we
> want to allow every possible combination of hard and soft
> affinity to be set, but we warn the user upon particularly
> weird situations (e.g., hard and soft being disjoint sets
> of pCPUs).
> 
> This is the first change breaking the libxl ABI, so it bumps
> the MAJOR.
> 
> Signed-off-by: Dario Faggioli <dario.faggioli@xxxxxxxxxx>
> ---
> Changes from v3:
>  * only introduce one LIBXL_HAVE_ symbol for soft affinity,
>    as requested during review;
>  * use LIBXL_API_VERSION instead of having multiple version
>    of the same function, as suggested during review;
>  * use libxl_get_nr_cpus() rather than libxl_get_cputopology(),
>    as suggested during review;
>  * use LOGE() instead of LIBXL__LOG_ERRNO(), as requested
>    during review;
>  * kill the flags and use just one _set_vcpuaffinity()
>    function with two cpumaps, allowing either of them to
>    be NULL, as suggested during review;
>  * avoid overflowing the bitmaps in libxl_bitmap_equal(),
>    as suggested during review.
> 
> Changes from v2:
>  * interface completely redesigned, as discussed during
>    review.
> ---
>  tools/libxl/Makefile        |    2 +
>  tools/libxl/libxl.c         |   79 
> ++++++++++++++++++++++++++++++++++++++-----
>  tools/libxl/libxl.h         |   38 ++++++++++++++++++++-
>  tools/libxl/libxl_create.c  |    6 +++
>  tools/libxl/libxl_dom.c     |    3 +-
>  tools/libxl/libxl_types.idl |    4 ++
>  tools/libxl/libxl_utils.h   |   25 +++++++++++++-
>  tools/libxl/xl_cmdimpl.c    |    6 ++-
>  8 files changed, 145 insertions(+), 18 deletions(-)
> 
> diff --git a/tools/libxl/Makefile b/tools/libxl/Makefile
> index d8495bb..1410c44 100644
> --- a/tools/libxl/Makefile
> +++ b/tools/libxl/Makefile
> @@ -5,7 +5,7 @@
>  XEN_ROOT = $(CURDIR)/../..
>  include $(XEN_ROOT)/tools/Rules.mk
>  
> -MAJOR = 4.3
> +MAJOR = 4.4
>  MINOR = 0
>  
>  XLUMAJOR = 4.3
> diff --git a/tools/libxl/libxl.c b/tools/libxl/libxl.c
> index 961d55e..4b871d7 100644
> --- a/tools/libxl/libxl.c
> +++ b/tools/libxl/libxl.c
> @@ -4538,6 +4538,8 @@ libxl_vcpuinfo *libxl_list_vcpu(libxl_ctx *ctx, 
> uint32_t domid,
>      for (*nb_vcpu = 0; *nb_vcpu <= domaininfo.max_vcpu_id; ++*nb_vcpu, 
> ++ptr) {
>          if (libxl_cpu_bitmap_alloc(ctx, &ptr->cpumap, 0))
>              return NULL;
> +        if (libxl_cpu_bitmap_alloc(ctx, &ptr->cpumap_soft, 0))
> +            return NULL;

Leaks ptr and ptr->cpumap...

>          if (xc_vcpu_getinfo(ctx->xch, domid, *nb_vcpu, &vcpuinfo) == -1) {
>              LIBXL__LOG_ERRNO(ctx, LIBXL__LOG_ERROR, "getting vcpu info");
>              return NULL;
> @@ -4548,6 +4550,12 @@ libxl_vcpuinfo *libxl_list_vcpu(libxl_ctx *ctx, 
> uint32_t domid,
>              LIBXL__LOG_ERRNO(ctx, LIBXL__LOG_ERROR, "getting vcpu affinity");
>              return NULL;
>          }
> +        if (xc_vcpu_getaffinity(ctx->xch, domid, *nb_vcpu,
> +                                XEN_VCPUAFFINITY_SOFT,
> +                                ptr->cpumap_soft.map) == -1) {
> +            LIBXL__LOG_ERRNO(ctx, LIBXL__LOG_ERROR, "getting vcpu affinity");
> +            return NULL;
> +        }
>          ptr->vcpuid = *nb_vcpu;
>          ptr->cpu = vcpuinfo.cpu;
>          ptr->online = !!vcpuinfo.online;
> @@ -4559,28 +4567,81 @@ libxl_vcpuinfo *libxl_list_vcpu(libxl_ctx *ctx, 
> uint32_t domid,
>  }
>  
>  int libxl_set_vcpuaffinity(libxl_ctx *ctx, uint32_t domid, uint32_t vcpuid,
> -                           libxl_bitmap *cpumap)
> +                           const libxl_bitmap *cpumap_hard,
> +                           const libxl_bitmap *cpumap_soft)
>  {
> -    if (xc_vcpu_setaffinity(ctx->xch, domid, vcpuid, cpumap->map,
> -                            XEN_VCPUAFFINITY_HARD, NULL)) {
> -        LIBXL__LOG_ERRNO(ctx, LIBXL__LOG_ERROR, "setting vcpu affinity");
> -        return ERROR_FAIL;
> +    GC_INIT(ctx);
> +    libxl_bitmap ecpumap;
> +    int nr_cpus = 0, rc;
> +
> +    if (!cpumap_hard && !cpumap_soft)
> +        return ERROR_INVAL;
> +
> +    rc = libxl_cpu_bitmap_alloc(ctx, &ecpumap, 0);
> +    if (rc)
> +        return rc;
> +
> +    nr_cpus = libxl_get_nr_cpus(ctx);
> +    if (nr_cpus <= 0) {
> +        rc = ERROR_FAIL;
> +        goto out;
>      }
> +
> +    if (cpumap_hard && xc_vcpu_setaffinity(ctx->xch, domid, vcpuid,
> +                                           cpumap_hard->map,
> +                                           XEN_VCPUAFFINITY_HARD,
> +                                           ecpumap.map)) {
> +        LOGE(ERROR, "setting vcpu hard affinity");
> +        rc = ERROR_FAIL;
> +        goto out;
> +    }
> +    if (cpumap_hard && !libxl_bitmap_equal(cpumap_hard, &ecpumap, nr_cpus))
> +        LOG(DEBUG, "New hard affinity for vcpu %d contains unreachable cpus",
> +            vcpuid);
> +
> +    if (cpumap_soft && xc_vcpu_setaffinity(ctx->xch, domid, vcpuid,
> +                                           cpumap_soft->map,
> +                                           XEN_VCPUAFFINITY_SOFT,
> +                                           ecpumap.map)) {
> +        LOGE(ERROR, "setting vcpu soft affinity");
> +        rc = ERROR_FAIL;
> +        goto out;
> +    }
> +    if (cpumap_soft && !libxl_bitmap_equal(cpumap_soft, &ecpumap, nr_cpus))
> +        LOG(DEBUG, "New soft affinity for vcpu %d contains unreachable cpus",
> +            vcpuid);
> +
> +    /*
> +     * When setting hard affinity, it is guaranteed that the result will not
> +     * be empty, so we need to check for that only if soft.
> +     */
> +    if (cpumap_soft && libxl_bitmap_is_empty(&ecpumap))
> +        LOG(WARN, "New soft affinity for vcpu %d has only unreachable cpus."
> +            " Only hard affinity will be considered for scheduling", vcpuid);
> +
> +    rc = 0;
> + out:
> +    libxl_bitmap_dispose(&ecpumap);
> +    GC_FREE;
>      return 0;

ITYM rreturn rc.

>  }
>  
>  int libxl_set_vcpuaffinity_all(libxl_ctx *ctx, uint32_t domid,
> -                               unsigned int max_vcpus, libxl_bitmap *cpumap)
> +                               unsigned int max_vcpus,
> +                               const libxl_bitmap *cpumap_hard,
> +                               const libxl_bitmap *cpumap_soft)
>  {
> +    GC_INIT(ctx);
>      int i, rc = 0;
>  
>      for (i = 0; i < max_vcpus; i++) {
> -        if (libxl_set_vcpuaffinity(ctx, domid, i, cpumap)) {
> -            LIBXL__LOG(ctx, LIBXL__LOG_WARNING,
> -                       "failed to set affinity for %d", i);
> +        if (libxl_set_vcpuaffinity(ctx, domid, i, cpumap_hard, cpumap_soft)) 
> {
> +            LOG(WARN, "failed to set affinity for %d", i);
>              rc = ERROR_FAIL;
>          }
>      }
> +
> +    GC_FREE;
>      return rc;
>  }
>  
> diff --git a/tools/libxl/libxl.h b/tools/libxl/libxl.h
> index 2fab5ba..9864927 100644
> --- a/tools/libxl/libxl.h
> +++ b/tools/libxl/libxl.h
> @@ -82,6 +82,14 @@
>  #define LIBXL_HAVE_DOMAIN_NODEAFFINITY 1
>  
>  /*
> + * LIBXL_HAVE_SOFT_AFFINITY indicates that a 'cpumap_soft'
> + * field (of libxl_bitmap type) is present in both
> + * libxl_domain_build_info and libxl_vcpuinfo, containing
> + * the soft affinity for the vcpu.

"the vcpu", in the build_info case I guess it applies to all vcpus?


> + */
> +#define LIBXL_HAVE_SOFT_AFFINITY 1
> +
> +/*
>   * LIBXL_HAVE_BUILDINFO_HVM_VENDOR_DEVICE indicates that the
>   * libxl_vendor_device field is present in the hvm sections of
>   * libxl_domain_build_info. This field tells libxl which
> @@ -994,9 +1002,35 @@ int libxl_userdata_retrieve(libxl_ctx *ctx, uint32_t 
> domid,
>  
>  int libxl_get_physinfo(libxl_ctx *ctx, libxl_physinfo *physinfo);
>  int libxl_set_vcpuaffinity(libxl_ctx *ctx, uint32_t domid, uint32_t vcpuid,
> -                           libxl_bitmap *cpumap);
> +                           const libxl_bitmap *cpumap_hard,
> +                           const libxl_bitmap *cpumap_soft);
>  int libxl_set_vcpuaffinity_all(libxl_ctx *ctx, uint32_t domid,
> -                               unsigned int max_vcpus, libxl_bitmap *cpumap);
> +                               unsigned int max_vcpus,
> +                               const libxl_bitmap *cpumap_hard,
> +                               const libxl_bitmap *cpumap_soft);
> +
> +#if defined (LIBXL_API_VERSION) && LIBXL_API_VERSION < 0x040400
> +
> +static inline
> +int libxl_set_vcpuaffinity_0x040200(libxl_ctx *ctx, uint32_t domid,
> +                                    uint32_t vcpuid, libxl_bitmap *cpumap)
> +{
> +    return libxl_set_vcpuaffinity(ctx, domid, vcpuid, cpumap, NULL);
> +}
> +
> +static inline
> +int libxl_set_vcpuaffinity_all_0x040200(libxl_ctx *ctx, uint32_t domid,
> +                                        unsigned int max_vcpus,
> +                                        libxl_bitmap *cpumap)
> +{
> +    return libxl_set_vcpuaffinity_all(ctx, domid, max_vcpus, cpumap, NULL);
> +}
> +
> +#define libxl_set_vcpuaffinity libxl_set_vcpuaffinity_0x040200
> +#define libxl_set_vcpuaffinity_all libxl_set_vcpuaffinity_all_0x040200

This looks correct, thanks. It could also have jsut been #defined since
the inline is only used in the libxl_domain_create_restore_0x040200 case
because there is a local variable.
> +
> +#endif
> +
>  int libxl_domain_set_nodeaffinity(libxl_ctx *ctx, uint32_t domid,
>                                    libxl_bitmap *nodemap);
>  int libxl_domain_get_nodeaffinity(libxl_ctx *ctx, uint32_t domid,
> diff --git a/tools/libxl/libxl_create.c b/tools/libxl/libxl_create.c
> index fe7ba0d..58f80df 100644
> --- a/tools/libxl/libxl_create.c
> +++ b/tools/libxl/libxl_create.c
> @@ -193,6 +193,12 @@ int libxl__domain_build_info_setdefault(libxl__gc *gc,
>          libxl_bitmap_set_any(&b_info->cpumap);
>      }
>  
> +    if (!b_info->cpumap_soft.size) {
> +        if (libxl_cpu_bitmap_alloc(CTX, &b_info->cpumap_soft, 0))
> +            return ERROR_FAIL;
> +        libxl_bitmap_set_any(&b_info->cpumap_soft);
> +    }
> +
>      libxl_defbool_setdefault(&b_info->numa_placement, true);
>  
>      if (!b_info->nodemap.size) {
> diff --git a/tools/libxl/libxl_dom.c b/tools/libxl/libxl_dom.c
> index 72489f8..4bfed60 100644
> --- a/tools/libxl/libxl_dom.c
> +++ b/tools/libxl/libxl_dom.c
> @@ -236,7 +236,8 @@ int libxl__build_pre(libxl__gc *gc, uint32_t domid,
>              return rc;
>      }
>      libxl_domain_set_nodeaffinity(ctx, domid, &info->nodemap);
> -    libxl_set_vcpuaffinity_all(ctx, domid, info->max_vcpus, &info->cpumap);
> +    libxl_set_vcpuaffinity_all(ctx, domid, info->max_vcpus, &info->cpumap,
> +                               &info->cpumap_soft);
>  
>      xc_domain_setmaxmem(ctx->xch, domid, info->target_memkb + 
> LIBXL_MAXMEM_CONSTANT);
>      xs_domid = xs_read(ctx->xsh, XBT_NULL, "/tool/xenstored/domid", NULL);
> diff --git a/tools/libxl/libxl_types.idl b/tools/libxl/libxl_types.idl
> index cba8eff..48699c9 100644
> --- a/tools/libxl/libxl_types.idl
> +++ b/tools/libxl/libxl_types.idl
> @@ -298,6 +298,7 @@ libxl_domain_build_info = Struct("domain_build_info",[
>      ("max_vcpus",       integer),
>      ("avail_vcpus",     libxl_bitmap),
>      ("cpumap",          libxl_bitmap),
> +    ("cpumap_soft",     libxl_bitmap),
>      ("nodemap",         libxl_bitmap),
>      ("numa_placement",  libxl_defbool),
>      ("tsc_mode",        libxl_tsc_mode),
> @@ -510,7 +511,8 @@ libxl_vcpuinfo = Struct("vcpuinfo", [
>      ("blocked", bool),
>      ("running", bool),
>      ("vcpu_time", uint64), # total vcpu time ran (ns)
> -    ("cpumap", libxl_bitmap), # current cpu's affinities
> +    ("cpumap", libxl_bitmap), # current hard cpu affinity
> +    ("cpumap_soft", libxl_bitmap), # current soft cpu affinity
>      ], dir=DIR_OUT)
>  
>  libxl_physinfo = Struct("physinfo", [
> diff --git a/tools/libxl/libxl_utils.h b/tools/libxl/libxl_utils.h
> index b11cf28..c205dd7 100644
> --- a/tools/libxl/libxl_utils.h
> +++ b/tools/libxl/libxl_utils.h
> @@ -90,7 +90,7 @@ static inline void libxl_bitmap_set_none(libxl_bitmap 
> *bitmap)
>  {
>      memset(bitmap->map, 0, bitmap->size);
>  }
> -static inline int libxl_bitmap_cpu_valid(libxl_bitmap *bitmap, int bit)
> +static inline int libxl_bitmap_valid(const libxl_bitmap *bitmap, int bit)

This name suggests to me that it checks whether the entire bitmap is
somehow valid.

..._bit_valid? (or valid_bit).

An alternative might be for the places which use this to assume that any
bit past the end of the mask must be zero (or 1, but I think 0 makes
more sense).

>  {
>      return bit >= 0 && bit < (bitmap->size * 8);
>  }
> @@ -98,6 +98,29 @@ static inline int libxl_bitmap_cpu_valid(libxl_bitmap 
> *bitmap, int bit)
>  #define libxl_for_each_set_bit(v, m) for (v = 0; v < (m).size * 8; v++) \
>                                               if (libxl_bitmap_test(&(m), v))
>  
> +static inline int libxl_bitmap_equal(const libxl_bitmap *ba,
> +                                     const libxl_bitmap *bb,
> +                                     int nr_bits)
> +{
> +    int i;
> +
> +    /* Only check nr_bits (all bits if <= 0) */
> +    nr_bits = (nr_bits <= 0) ? ba->size * 8 : nr_bits;
> +    for (i = 0; i < nr_bits; i++) {
> +        /* If overflowing one of the bitmaps, we call them different */
> +        if (!libxl_bitmap_valid(ba, i) || !libxl_bitmap_valid(bb, i))
> +            return 0;
> +        if (libxl_bitmap_test(ba, i) != libxl_bitmap_test(bb, i))
> +            return 0;
> +    }
> +    return 1;
> +}
> +
> +static inline int libxl_bitmap_cpu_valid(libxl_bitmap *cpumap, int cpu)
> +{
> +    return libxl_bitmap_valid(cpumap, cpu);
> +}
> +
>  int libxl_cpu_bitmap_alloc(libxl_ctx *ctx,
>                             libxl_bitmap *cpumap,
>                             int max_cpus);
> diff --git a/tools/libxl/xl_cmdimpl.c b/tools/libxl/xl_cmdimpl.c
> index 7b4d058..17fffdd 100644
> --- a/tools/libxl/xl_cmdimpl.c
> +++ b/tools/libxl/xl_cmdimpl.c
> @@ -2222,7 +2222,7 @@ start:
>              } else {
>                  libxl_bitmap_set_any(&vcpu_cpumap);
>              }
> -            if (libxl_set_vcpuaffinity(ctx, domid, i, &vcpu_cpumap)) {
> +            if (libxl_set_vcpuaffinity(ctx, domid, i, &vcpu_cpumap, NULL)) {
>                  fprintf(stderr, "setting affinity failed on vcpu `%d'.\n", 
> i);
>                  libxl_bitmap_dispose(&vcpu_cpumap);
>                  free(vcpu_to_pcpu);
> @@ -4630,7 +4630,7 @@ static int vcpupin(uint32_t domid, const char *vcpu, 
> char *cpu)
>      }
>  
>      if (vcpuid != -1) {
> -        if (libxl_set_vcpuaffinity(ctx, domid, vcpuid, &cpumap) == -1) {
> +        if (libxl_set_vcpuaffinity(ctx, domid, vcpuid, &cpumap, NULL)) {
>              fprintf(stderr, "Could not set affinity for vcpu `%u'.\n", 
> vcpuid);
>              goto out;
>          }
> @@ -4642,7 +4642,7 @@ static int vcpupin(uint32_t domid, const char *vcpu, 
> char *cpu)
>          }
>          for (i = 0; i < nb_vcpu; i++) {
>              if (libxl_set_vcpuaffinity(ctx, domid, vcpuinfo[i].vcpuid,
> -                                       &cpumap) == -1) {
> +                                       &cpumap, NULL)) {
>                  fprintf(stderr, "libxl_set_vcpuaffinity failed"
>                                  " on vcpu `%u'.\n", vcpuinfo[i].vcpuid);
>              }
> 



_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.