[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH v3 11/14] libxl: get and set soft affinity
Make space for two new cpumap-s, one in vcpu_info (for getting soft affinity) and build_info (for setting it). Provide two new API calls: * libxl_set_vcpuaffinity2, taking a cpumap and setting either hard, soft or both affinity to it, depending on 'flags'; * libxl_set_vcpuaffinity3, taking two cpumap, one for hard and one for soft affinity. The bheavior of the existing libxl_set_vcpuaffinity is left unchanged, i.e., it only set hard affinity. Getting soft affinity happens indirectly, via `xl vcpu-list' (as it is already for hard affinity). The new calls include logic to check whether the affinity which will be used by Xen to schedule the vCPU(s) does actually match with the cpumap provided. In fact, we want to allow every possible combination of hard and soft affinities to be set, but we warn the user upon particularly weird combinations (e.g., hard and soft being disjoint sets of pCPUs). Also, this is the first change breaking the libxl ABI, so it bumps the MAJOR. Signed-off-by: Dario Faggioli <dario.faggioli@xxxxxxxxxx> --- Changes from v2: * interface completely redesigned, as discussed during review. --- tools/libxl/Makefile | 2 - tools/libxl/libxl.c | 131 +++++++++++++++++++++++++++++++++++++++++++ tools/libxl/libxl.h | 30 ++++++++++ tools/libxl/libxl_create.c | 6 ++ tools/libxl/libxl_types.idl | 4 + tools/libxl/libxl_utils.h | 15 +++++ 6 files changed, 186 insertions(+), 2 deletions(-) diff --git a/tools/libxl/Makefile b/tools/libxl/Makefile index cf214bb..cba32d5 100644 --- a/tools/libxl/Makefile +++ b/tools/libxl/Makefile @@ -5,7 +5,7 @@ XEN_ROOT = $(CURDIR)/../.. include $(XEN_ROOT)/tools/Rules.mk -MAJOR = 4.3 +MAJOR = 4.4 MINOR = 0 XLUMAJOR = 4.3 diff --git a/tools/libxl/libxl.c b/tools/libxl/libxl.c index d0db3f0..1122360 100644 --- a/tools/libxl/libxl.c +++ b/tools/libxl/libxl.c @@ -4204,6 +4204,8 @@ libxl_vcpuinfo *libxl_list_vcpu(libxl_ctx *ctx, uint32_t domid, for (*nb_vcpu = 0; *nb_vcpu <= domaininfo.max_vcpu_id; ++*nb_vcpu, ++ptr) { if (libxl_cpu_bitmap_alloc(ctx, &ptr->cpumap, 0)) return NULL; + if (libxl_cpu_bitmap_alloc(ctx, &ptr->cpumap_soft, 0)) + return NULL; if (xc_vcpu_getinfo(ctx->xch, domid, *nb_vcpu, &vcpuinfo) == -1) { LIBXL__LOG_ERRNO(ctx, LIBXL__LOG_ERROR, "getting vcpu info"); return NULL; @@ -4214,6 +4216,12 @@ libxl_vcpuinfo *libxl_list_vcpu(libxl_ctx *ctx, uint32_t domid, LIBXL__LOG_ERRNO(ctx, LIBXL__LOG_ERROR, "getting vcpu affinity"); return NULL; } + if (xc_vcpu_getaffinity(ctx->xch, domid, *nb_vcpu, + XEN_VCPUAFFINITY_SOFT, + ptr->cpumap_soft.map) == -1) { + LIBXL__LOG_ERRNO(ctx, LIBXL__LOG_ERROR, "getting vcpu affinity"); + return NULL; + } ptr->vcpuid = *nb_vcpu; ptr->cpu = vcpuinfo.cpu; ptr->online = !!vcpuinfo.online; @@ -4250,6 +4258,129 @@ int libxl_set_vcpuaffinity_all(libxl_ctx *ctx, uint32_t domid, return rc; } +int libxl_set_vcpuaffinity2(libxl_ctx *ctx, uint32_t domid, uint32_t vcpuid, + const libxl_bitmap *cpumap, int flags) +{ + libxl_cputopology *topology; + libxl_bitmap ecpumap; + int nr_cpus = 0, rc; + + topology = libxl_get_cpu_topology(ctx, &nr_cpus); + if (!topology) { + LIBXL__LOG(ctx, LIBXL__LOG_ERROR, "failed to retrieve CPU topology"); + return ERROR_FAIL; + } + libxl_cputopology_list_free(topology, nr_cpus); + + rc = libxl_cpu_bitmap_alloc(ctx, &ecpumap, 0); + if (rc) + return rc; + + if (xc_vcpu_setaffinity(ctx->xch, domid, vcpuid, cpumap->map, + flags, ecpumap.map)) { + LIBXL__LOG_ERRNO(ctx, LIBXL__LOG_ERROR, "setting vcpu affinity"); + rc = ERROR_FAIL; + goto out; + } + + if (!libxl_bitmap_equal(cpumap, &ecpumap, nr_cpus)) + LIBXL__LOG(ctx, LIBXL__LOG_DEBUG, + "New affinity for vcpu %d contains unreachable cpus", + vcpuid); + if (libxl_bitmap_is_empty(&ecpumap)) + LIBXL__LOG(ctx, LIBXL__LOG_WARNING, + "New affinity for vcpu %d has only unreachabel cpus. " + "Only hard affinity will be considered for scheduling", + vcpuid); + + rc = 0; + out: + libxl_bitmap_dispose(&ecpumap); + return 0; +} + +int libxl_set_vcpuaffinity_all2(libxl_ctx *ctx, uint32_t domid, + unsigned int max_vcpus, + const libxl_bitmap *cpumap, int flags) +{ + int i, rc = 0; + + for (i = 0; i < max_vcpus; i++) { + if (libxl_set_vcpuaffinity2(ctx, domid, i, cpumap, flags)) { + LIBXL__LOG(ctx, LIBXL__LOG_WARNING, + "failed to set affinity for %d", i); + rc = ERROR_FAIL; + } + } + return rc; +} + +int libxl_set_vcpuaffinity3(libxl_ctx *ctx, uint32_t domid, uint32_t vcpuid, + const libxl_bitmap *cpumap_hard, + const libxl_bitmap *cpumap_soft) +{ + libxl_cputopology *topology; + libxl_bitmap ecpumap; + int nr_cpus = 0, rc; + + topology = libxl_get_cpu_topology(ctx, &nr_cpus); + if (!topology) { + LIBXL__LOG(ctx, LIBXL__LOG_ERROR, "failed to retrieve CPU topology"); + return ERROR_FAIL; + } + libxl_cputopology_list_free(topology, nr_cpus); + + rc = libxl_cpu_bitmap_alloc(ctx, &ecpumap, 0); + if (rc) + return rc; + + if (xc_vcpu_setaffinity(ctx->xch, domid, vcpuid, cpumap_hard->map, + XEN_VCPUAFFINITY_HARD, NULL)) { + LIBXL__LOG_ERRNO(ctx, LIBXL__LOG_ERROR, "setting vcpu hard affinity"); + rc = ERROR_FAIL; + goto out; + } + + if (xc_vcpu_setaffinity(ctx->xch, domid, vcpuid, cpumap_soft->map, + XEN_VCPUAFFINITY_SOFT, ecpumap.map)) { + LIBXL__LOG_ERRNO(ctx, LIBXL__LOG_ERROR, "setting vcpu soft affinity"); + rc = ERROR_FAIL; + goto out; + } + + if (!libxl_bitmap_equal(cpumap_soft, &ecpumap, nr_cpus)) + LIBXL__LOG(ctx, LIBXL__LOG_DEBUG, + "New soft affinity for vcpu %d contains unreachable cpus", + vcpuid); + if (libxl_bitmap_is_empty(&ecpumap)) + LIBXL__LOG(ctx, LIBXL__LOG_WARNING, + "New soft affinity for vcpu %d has only unreachabel cpus. " + "Only hard affinity will be considered for scheduling", + vcpuid); + + rc = 0; + out: + libxl_bitmap_dispose(&ecpumap); + return 0; +} + +int libxl_set_vcpuaffinity_all3(libxl_ctx *ctx, uint32_t domid, + unsigned int max_vcpus, + const libxl_bitmap *cpumap_hard, + const libxl_bitmap *cpumap_soft) +{ + int i, rc = 0; + + for (i = 0; i < max_vcpus; i++) { + if (libxl_set_vcpuaffinity3(ctx, domid, i, cpumap_hard, cpumap_soft)) { + LIBXL__LOG(ctx, LIBXL__LOG_WARNING, + "failed to set affinity for %d", i); + rc = ERROR_FAIL; + } + } + return rc; +} + int libxl_domain_set_nodeaffinity(libxl_ctx *ctx, uint32_t domid, libxl_bitmap *nodemap) { diff --git a/tools/libxl/libxl.h b/tools/libxl/libxl.h index c7dceda..504c57b 100644 --- a/tools/libxl/libxl.h +++ b/tools/libxl/libxl.h @@ -82,6 +82,20 @@ #define LIBXL_HAVE_DOMAIN_NODEAFFINITY 1 /* + * LIBXL_HAVE_VCPUINFO_SOFTAFFINITY indicates that a 'cpumap_soft' + * field (of libxl_bitmap type) is present in libxl_vcpuinfo, + * containing the soft affinity for the vcpu. + */ +#define LIBXL_HAVE_VCPUINFO_SOFTAFFINITY 1 + +/* + * LIBXL_HAVE_BUILDINFO_SOFTAFFINITY indicates that a 'cpumap_soft' + * field (of libxl_bitmap type) is present in libxl_domain_build_info, + * containing the soft affinity for the vcpu. + */ +#define LIBXL_HAVE_BUILDINFO_SOFTAFFINITY 1 + +/* * LIBXL_HAVE_BUILDINFO_HVM_VENDOR_DEVICE indicates that the * libxl_vendor_device field is present in the hvm sections of * libxl_domain_build_info. This field tells libxl which @@ -973,6 +987,22 @@ int libxl_set_vcpuaffinity(libxl_ctx *ctx, uint32_t domid, uint32_t vcpuid, libxl_bitmap *cpumap); int libxl_set_vcpuaffinity_all(libxl_ctx *ctx, uint32_t domid, unsigned int max_vcpus, libxl_bitmap *cpumap); +int libxl_set_vcpuaffinity2(libxl_ctx *ctx, uint32_t domid, uint32_t vcpuid, + const libxl_bitmap *cpumap, int flags); +int libxl_set_vcpuaffinity_all2(libxl_ctx *ctx, uint32_t domid, + unsigned int max_vcpus, + const libxl_bitmap *cpumap, int flags); +int libxl_set_vcpuaffinity3(libxl_ctx *ctx, uint32_t domid, uint32_t vcpuid, + const libxl_bitmap *cpumap_hard, + const libxl_bitmap *cpumap_soft); +int libxl_set_vcpuaffinity_all3(libxl_ctx *ctx, uint32_t domid, + unsigned int max_vcpus, + const libxl_bitmap *cpumap_hard, + const libxl_bitmap *cpumap_soft); +/* Flags, consistent with domctl.h */ +#define LIBXL_VCPUAFFINITY_HARD 1 +#define LIBXL_VCPUAFFINITY_SOFT 2 + int libxl_domain_set_nodeaffinity(libxl_ctx *ctx, uint32_t domid, libxl_bitmap *nodemap); int libxl_domain_get_nodeaffinity(libxl_ctx *ctx, uint32_t domid, diff --git a/tools/libxl/libxl_create.c b/tools/libxl/libxl_create.c index 5e9cdcc..c314bec 100644 --- a/tools/libxl/libxl_create.c +++ b/tools/libxl/libxl_create.c @@ -192,6 +192,12 @@ int libxl__domain_build_info_setdefault(libxl__gc *gc, libxl_bitmap_set_any(&b_info->cpumap); } + if (!b_info->cpumap_soft.size) { + if (libxl_cpu_bitmap_alloc(CTX, &b_info->cpumap_soft, 0)) + return ERROR_FAIL; + libxl_bitmap_set_any(&b_info->cpumap_soft); + } + libxl_defbool_setdefault(&b_info->numa_placement, true); if (!b_info->nodemap.size) { diff --git a/tools/libxl/libxl_types.idl b/tools/libxl/libxl_types.idl index de5bac3..4001761 100644 --- a/tools/libxl/libxl_types.idl +++ b/tools/libxl/libxl_types.idl @@ -297,6 +297,7 @@ libxl_domain_build_info = Struct("domain_build_info",[ ("max_vcpus", integer), ("avail_vcpus", libxl_bitmap), ("cpumap", libxl_bitmap), + ("cpumap_soft", libxl_bitmap), ("nodemap", libxl_bitmap), ("numa_placement", libxl_defbool), ("tsc_mode", libxl_tsc_mode), @@ -509,7 +510,8 @@ libxl_vcpuinfo = Struct("vcpuinfo", [ ("blocked", bool), ("running", bool), ("vcpu_time", uint64), # total vcpu time ran (ns) - ("cpumap", libxl_bitmap), # current cpu's affinities + ("cpumap", libxl_bitmap), # current hard cpu affinity + ("cpumap_soft", libxl_bitmap), # current soft cpu affinity ], dir=DIR_OUT) libxl_physinfo = Struct("physinfo", [ diff --git a/tools/libxl/libxl_utils.h b/tools/libxl/libxl_utils.h index b11cf28..fc3afee 100644 --- a/tools/libxl/libxl_utils.h +++ b/tools/libxl/libxl_utils.h @@ -98,6 +98,21 @@ static inline int libxl_bitmap_cpu_valid(libxl_bitmap *bitmap, int bit) #define libxl_for_each_set_bit(v, m) for (v = 0; v < (m).size * 8; v++) \ if (libxl_bitmap_test(&(m), v)) +static inline int libxl_bitmap_equal(const libxl_bitmap *ba, + const libxl_bitmap *bb, + int nr_bits) +{ + int i; + + /* Only check nr_bits (all bits if <= 0) */ + nr_bits = nr_bits <=0 ? ba->size * 8 : nr_bits; + for (i = 0; i < nr_bits; i++) { + if (libxl_bitmap_test(ba, i) != libxl_bitmap_test(bb, i)) + return 0; + } + return 1; +} + int libxl_cpu_bitmap_alloc(libxl_ctx *ctx, libxl_bitmap *cpumap, int max_cpus); _______________________________________________ Xen-devel mailing list Xen-devel@xxxxxxxxxxxxx http://lists.xen.org/xen-devel
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |