[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH v2 12/16] libxl: get and set soft affinity
which basically means making space for a new cpumap in both vcpu_info (for getting soft affinity) and build_info (for setting it), along with providing the get/set functions, and wiring them to the proper xc calls. Interface is as follows: * libxl_{get,set}_vcpuaffinity() deals with hard affinity, as it always has happened; * libxl_get,set}_vcpuaffinity_soft() deals with soft affinity. *_set_* functions include some logic for checking whether the affinity that would indeed be used matches the one requested by the caller, and printing some warnings if that is not the case. That is because, despite what the user asks, for instance, for soft affinity, the scheduler only considers running a vCPU where its hard affinity and cpupool mandate. So, although we want to allow any possible combinations (e.g., we do not want to error out if hard affinity and soft afinity are disjoint), we at very least print some warnings, hoping to help the sysadmin to figure out what is really going on. Also, as this apparently is the first change being checked in that breaks libxl ABI, bump MAJOR to 4.4. Signed-off-by: Dario Faggioli <dario.faggioli@xxxxxxxxxx> --- tools/libxl/Makefile | 1 tools/libxl/libxl.c | 206 ++++++++++++++++++++++++++++++++++++++++--- tools/libxl/libxl.h | 23 +++++ tools/libxl/libxl_create.c | 6 + tools/libxl/libxl_types.idl | 4 + tools/libxl/libxl_utils.h | 13 +++ 6 files changed, 239 insertions(+), 14 deletions(-) diff --git a/tools/libxl/Makefile b/tools/libxl/Makefile index cf214bb..b7f39bd 100644 --- a/tools/libxl/Makefile +++ b/tools/libxl/Makefile @@ -5,6 +5,7 @@ XEN_ROOT = $(CURDIR)/../.. include $(XEN_ROOT)/tools/Rules.mk +#MAJOR = 4.4 MAJOR = 4.3 MINOR = 0 diff --git a/tools/libxl/libxl.c b/tools/libxl/libxl.c index 0de1112..d32414d 100644 --- a/tools/libxl/libxl.c +++ b/tools/libxl/libxl.c @@ -4208,12 +4208,24 @@ libxl_vcpuinfo *libxl_list_vcpu(libxl_ctx *ctx, uint32_t domid, LIBXL__LOG_ERRNO(ctx, LIBXL__LOG_ERROR, "allocating cpumap"); return NULL; } + if (libxl_cpu_bitmap_alloc(ctx, &ptr->cpumap_soft, 0)) { + LIBXL__LOG_ERRNO(ctx, LIBXL__LOG_ERROR, "allocating cpumap_soft"); + return NULL; + } if (xc_vcpu_getinfo(ctx->xch, domid, *nb_vcpu, &vcpuinfo) == -1) { LIBXL__LOG_ERRNO(ctx, LIBXL__LOG_ERROR, "getting vcpu info"); return NULL; } - if (xc_vcpu_getaffinity(ctx->xch, domid, *nb_vcpu, ptr->cpumap.map) == -1) { - LIBXL__LOG_ERRNO(ctx, LIBXL__LOG_ERROR, "getting vcpu affinity"); + if (xc_vcpu_getaffinity(ctx->xch, domid, *nb_vcpu, + ptr->cpumap.map) == -1) { + LIBXL__LOG_ERRNO(ctx, LIBXL__LOG_ERROR, + "getting vcpu hard affinity"); + return NULL; + } + if (xc_vcpu_getaffinity_soft(ctx->xch, domid, *nb_vcpu, + ptr->cpumap_soft.map) == -1) { + LIBXL__LOG_ERRNO(ctx, LIBXL__LOG_ERROR, + "getting vcpu soft affinity"); return NULL; } ptr->vcpuid = *nb_vcpu; @@ -4226,14 +4238,160 @@ libxl_vcpuinfo *libxl_list_vcpu(libxl_ctx *ctx, uint32_t domid, return ret; } +static int libxl__set_vcpuaffinity(libxl_ctx *ctx, uint32_t domid, + uint32_t vcpuid, const libxl_bitmap *cpumap, + uint32_t flags, libxl_bitmap *ecpumap) +{ + if (flags & XEN_VCPUAFFINITY_HARD) { + if (xc_vcpu_setaffinity_hard(ctx->xch, domid, vcpuid, + cpumap->map, ecpumap->map)) { + LIBXL__LOG_ERRNO(ctx, LIBXL__LOG_ERROR, + "failed to set hard affinity for %d", + vcpuid); + return ERROR_FAIL; + } + } else if (flags & XEN_VCPUAFFINITY_SOFT) { + if (xc_vcpu_setaffinity_soft(ctx->xch, domid, vcpuid, + cpumap->map, ecpumap->map)) { + LIBXL__LOG_ERRNO(ctx, LIBXL__LOG_ERROR, + "failed to set soft affinity for %d", + vcpuid); + return ERROR_FAIL; + } + } else + return ERROR_INVAL; + + return 0; +} + +static int libxl__get_vcpuaffinity(libxl_ctx *ctx, uint32_t domid, + uint32_t vcpuid, libxl_bitmap *cpumap, + uint32_t flags) +{ + if (flags & XEN_VCPUAFFINITY_HARD) { + if (xc_vcpu_getaffinity_hard(ctx->xch, domid, vcpuid, cpumap->map)) { + LIBXL__LOG_ERRNO(ctx, LIBXL__LOG_ERROR, + "failed to get hard affinity for %d", + vcpuid); + return ERROR_FAIL; + } + } else if (flags & XEN_VCPUAFFINITY_SOFT) { + if (xc_vcpu_getaffinity_soft(ctx->xch, domid, vcpuid, cpumap->map)) { + LIBXL__LOG_ERRNO(ctx, LIBXL__LOG_ERROR, + "failed to get soft affinity for %d", + vcpuid); + return ERROR_FAIL; + } + } else + return ERROR_INVAL; + + return 0; +} + int libxl_set_vcpuaffinity(libxl_ctx *ctx, uint32_t domid, uint32_t vcpuid, libxl_bitmap *cpumap) { - if (xc_vcpu_setaffinity(ctx->xch, domid, vcpuid, cpumap->map)) { - LIBXL__LOG_ERRNO(ctx, LIBXL__LOG_ERROR, "setting vcpu affinity"); - return ERROR_FAIL; + libxl_bitmap ecpumap, scpumap; + int rc; + + libxl_bitmap_init(&ecpumap); + libxl_bitmap_init(&scpumap); + + rc = libxl_cpu_bitmap_alloc(ctx, &ecpumap, 0); + if (rc) { + LIBXL__LOG_ERRNO(ctx, LIBXL__LOG_ERROR, "allocating ecpumap"); + goto out; } - return 0; + + /* + * Set the new hard affinity and check how it went. If we were + * setting it to "all", and no error occurred, there is no chance + * we are breaking the soft affinity, so we can just leave. + */ + rc = libxl__set_vcpuaffinity(ctx, domid, vcpuid, cpumap, + XEN_VCPUAFFINITY_HARD, &ecpumap); + if (rc || libxl_bitmap_is_full(cpumap)) + goto out; + + /* If not setting "all", let's figure out what happened. */ + rc = libxl_cpu_bitmap_alloc(ctx, &scpumap, 0); + if (rc) { + LIBXL__LOG_ERRNO(ctx, LIBXL__LOG_ERROR, "allocating scpumap"); + goto out; + } + /* Retrieve the soft affinity to check how it combines with the new hard */ + rc = libxl__get_vcpuaffinity(ctx, domid, vcpuid, &scpumap, + XEN_VCPUAFFINITY_SOFT); + if (rc) + goto out; + + /* + * If the new hard affinity breaks the current soft affinity or, even + * worse, if it makes the interesction of hard and soft affinity empty, + * inform the user about that. Just avoid bothering him in case soft + * affinity is "all", as that means something like "I don't care much + * about it! anyway." + */ + if (!libxl_bitmap_is_full(&scpumap) && + !libxl_bitmap_equal(&scpumap, &ecpumap)) + LIBXL__LOG(ctx, LIBXL__LOG_WARNING, + "Soft affinity for vcpu %d now contains unreachable cpus", + vcpuid); + if (libxl_bitmap_is_empty(&ecpumap)) + LIBXL__LOG(ctx, LIBXL__LOG_WARNING, + "No reachable cpu in vcpu %d soft affinity. " + "Only hard affinity will be considered for scheduling", + vcpuid); + + out: + libxl_bitmap_dispose(&scpumap); + libxl_bitmap_dispose(&ecpumap); + return rc; +} + +int libxl_set_vcpuaffinity_soft(libxl_ctx *ctx, uint32_t domid, + uint32_t vcpuid, libxl_bitmap *cpumap) +{ + libxl_bitmap ecpumap; + int rc; + + libxl_bitmap_init(&ecpumap); + + rc = libxl_cpu_bitmap_alloc(ctx, &ecpumap, 0); + if (rc) { + LIBXL__LOG_ERRNO(ctx, LIBXL__LOG_ERROR, "allocating ecpumap"); + goto out; + } + + /* + * If error, or if setting the soft affinity to "all", we can just + * leave without much other checking, as a full mask already means + * something like "I don't care much about it!". + */ + rc = libxl__set_vcpuaffinity(ctx, domid, vcpuid, cpumap, + XEN_VCPUAFFINITY_SOFT, &ecpumap); + + if (rc || libxl_bitmap_is_full(cpumap)) + goto out; + + /* + * Check if the soft affinity we just set is something that can actually + * be used by the scheduler or, because of interactions with hard affinity + * and cpupools, that won't be entirely possible. + */ + if (!libxl_bitmap_equal(cpumap, &ecpumap)) + LIBXL__LOG(ctx, LIBXL__LOG_WARNING, + "Soft affinity for vcpu %d contains unreachable cpus", + vcpuid); + if (libxl_bitmap_is_empty(&ecpumap)) + LIBXL__LOG(ctx, LIBXL__LOG_WARNING, + "No reachable cpu in vcpu %d soft affinity. " + "Only hard affinity will be considered for scheduling", + vcpuid); + + out: + libxl_bitmap_dispose(&ecpumap); + return rc; } int libxl_set_vcpuaffinity_all(libxl_ctx *ctx, uint32_t domid, @@ -4241,16 +4399,38 @@ int libxl_set_vcpuaffinity_all(libxl_ctx *ctx, uint32_t domid, { int i, rc = 0; - for (i = 0; i < max_vcpus; i++) { - if (libxl_set_vcpuaffinity(ctx, domid, i, cpumap)) { - LIBXL__LOG(ctx, LIBXL__LOG_WARNING, - "failed to set affinity for %d", i); - rc = ERROR_FAIL; - } - } + for (i = 0; i < max_vcpus; i++) + rc = libxl_set_vcpuaffinity(ctx, domid, i, cpumap); + return rc; } +int libxl_set_vcpuaffinity_all_soft(libxl_ctx *ctx, uint32_t domid, + unsigned int max_vcpus, + libxl_bitmap *cpumap) +{ + int i, rc = 0; + + for (i = 0; i < max_vcpus; i++) + rc = libxl_set_vcpuaffinity_soft(ctx, domid, i, cpumap); + + return rc; +} + +int libxl_get_vcpuaffinity(libxl_ctx *ctx, uint32_t domid, + uint32_t vcpuid, libxl_bitmap *cpumap) +{ + return libxl__get_vcpuaffinity(ctx, domid, vcpuid, cpumap, + XEN_VCPUAFFINITY_HARD); +} + +int libxl_get_vcpuaffinity_soft(libxl_ctx *ctx, uint32_t domid, + uint32_t vcpuid, libxl_bitmap *cpumap) +{ + return libxl__get_vcpuaffinity(ctx, domid, vcpuid, cpumap, + XEN_VCPUAFFINITY_SOFT); +} + int libxl_domain_set_nodeaffinity(libxl_ctx *ctx, uint32_t domid, libxl_bitmap *nodemap) { diff --git a/tools/libxl/libxl.h b/tools/libxl/libxl.h index c7dceda..5020e0d 100644 --- a/tools/libxl/libxl.h +++ b/tools/libxl/libxl.h @@ -82,6 +82,20 @@ #define LIBXL_HAVE_DOMAIN_NODEAFFINITY 1 /* + * LIBXL_HAVE_VCPUINFO_SOFTAFFINITY indicates that a 'cpumap_soft' + * field (of libxl_bitmap type) is present in libxl_vcpuinfo, + * containing the soft affinity for the vcpu. + */ +#define LIBXL_HAVE_VCPUINFO_SOFTAFFINITY 1 + +/* + * LIBXL_HAVE_BUILDINFO_SOFTAFFINITY indicates that a 'cpumap_soft' + * field (of libxl_bitmap type) is present in libxl_domain_build_info, + * containing the soft affinity for the vcpu. + */ +#define LIBXL_HAVE_BUILDINFO_SOFTAFFINITY 1 + +/* * LIBXL_HAVE_BUILDINFO_HVM_VENDOR_DEVICE indicates that the * libxl_vendor_device field is present in the hvm sections of * libxl_domain_build_info. This field tells libxl which @@ -971,8 +985,17 @@ int libxl_userdata_retrieve(libxl_ctx *ctx, uint32_t domid, int libxl_get_physinfo(libxl_ctx *ctx, libxl_physinfo *physinfo); int libxl_set_vcpuaffinity(libxl_ctx *ctx, uint32_t domid, uint32_t vcpuid, libxl_bitmap *cpumap); +int libxl_set_vcpuaffinity_soft(libxl_ctx *ctx, uint32_t domid, + uint32_t vcpuid, libxl_bitmap *cpumap); int libxl_set_vcpuaffinity_all(libxl_ctx *ctx, uint32_t domid, unsigned int max_vcpus, libxl_bitmap *cpumap); +int libxl_set_vcpuaffinity_all_soft(libxl_ctx *ctx, uint32_t domid, + unsigned int max_vcpus, + libxl_bitmap *cpumap); +int libxl_get_vcpuaffinity(libxl_ctx *ctx, uint32_t domid, + uint32_t vcpuid, libxl_bitmap *cpumap); +int libxl_get_vcpuaffinity_soft(libxl_ctx *ctx, uint32_t domid, + uint32_t vcpuid, libxl_bitmap *cpumap); int libxl_domain_set_nodeaffinity(libxl_ctx *ctx, uint32_t domid, libxl_bitmap *nodemap); int libxl_domain_get_nodeaffinity(libxl_ctx *ctx, uint32_t domid, diff --git a/tools/libxl/libxl_create.c b/tools/libxl/libxl_create.c index 5e9cdcc..c314bec 100644 --- a/tools/libxl/libxl_create.c +++ b/tools/libxl/libxl_create.c @@ -192,6 +192,12 @@ int libxl__domain_build_info_setdefault(libxl__gc *gc, libxl_bitmap_set_any(&b_info->cpumap); } + if (!b_info->cpumap_soft.size) { + if (libxl_cpu_bitmap_alloc(CTX, &b_info->cpumap_soft, 0)) + return ERROR_FAIL; + libxl_bitmap_set_any(&b_info->cpumap_soft); + } + libxl_defbool_setdefault(&b_info->numa_placement, true); if (!b_info->nodemap.size) { diff --git a/tools/libxl/libxl_types.idl b/tools/libxl/libxl_types.idl index de5bac3..4001761 100644 --- a/tools/libxl/libxl_types.idl +++ b/tools/libxl/libxl_types.idl @@ -297,6 +297,7 @@ libxl_domain_build_info = Struct("domain_build_info",[ ("max_vcpus", integer), ("avail_vcpus", libxl_bitmap), ("cpumap", libxl_bitmap), + ("cpumap_soft", libxl_bitmap), ("nodemap", libxl_bitmap), ("numa_placement", libxl_defbool), ("tsc_mode", libxl_tsc_mode), @@ -509,7 +510,8 @@ libxl_vcpuinfo = Struct("vcpuinfo", [ ("blocked", bool), ("running", bool), ("vcpu_time", uint64), # total vcpu time ran (ns) - ("cpumap", libxl_bitmap), # current cpu's affinities + ("cpumap", libxl_bitmap), # current hard cpu affinity + ("cpumap_soft", libxl_bitmap), # current soft cpu affinity ], dir=DIR_OUT) libxl_physinfo = Struct("physinfo", [ diff --git a/tools/libxl/libxl_utils.h b/tools/libxl/libxl_utils.h index 7b84e6a..fef83ca 100644 --- a/tools/libxl/libxl_utils.h +++ b/tools/libxl/libxl_utils.h @@ -98,6 +98,19 @@ static inline int libxl_bitmap_cpu_valid(libxl_bitmap *bitmap, int bit) #define libxl_for_each_set_bit(v, m) for (v = 0; v < (m).size * 8; v++) \ if (libxl_bitmap_test(&(m), v)) +static inline int libxl_bitmap_equal(const libxl_bitmap *ba, + const libxl_bitmap *bb) +{ + int i; + + libxl_for_each_bit(i, *ba) { + if (libxl_bitmap_test(ba, i) != libxl_bitmap_test(bb, i)) + return 0; + } + + return 1; +} + static inline int libxl_cpu_bitmap_alloc(libxl_ctx *ctx, libxl_bitmap *cpumap, int max_cpus) { _______________________________________________ Xen-devel mailing list Xen-devel@xxxxxxxxxxxxx http://lists.xen.org/xen-devel
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |