[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH V5 03/32] xl / libxl: push VCPU affinity pinning down to libxl
This patch introduces a key value list called "vcpu_affinity" in libxl IDL to preserve VCPU to PCPU mapping. This is necessary for libxl to preserve all information to construct a domain. Also define LIBXL_HAVE_AFFINITY_KEY_VALUE_LIST in libxl.h to mark the change in API. Signed-off-by: Wei Liu <wei.liu2@xxxxxxxxxx> Cc: Dario Faggioli <dario.faggioli@xxxxxxxxxx> --- tools/libxl/libxl.h | 12 +++++++ tools/libxl/libxl_dom.c | 48 ++++++++++++++++++++++++++++ tools/libxl/libxl_types.idl | 1 + tools/libxl/xl_cmdimpl.c | 74 ++++++++++++++++--------------------------- 4 files changed, 89 insertions(+), 46 deletions(-) diff --git a/tools/libxl/libxl.h b/tools/libxl/libxl.h index 2b06094..a5a5155 100644 --- a/tools/libxl/libxl.h +++ b/tools/libxl/libxl.h @@ -302,6 +302,18 @@ #endif /* + * LIBXL_HAVE_VCPU_AFFINITY_KEY_VALUE_LIST + * + * If this is defined, then libxl_domain_build_info structure will + * contain vcpu_affinity, a libxl_key_value_list type that contains + * the necessary information to pin a VCPU to a PCPU. Libxl will try + * to pin VCPUs to PCPUs according to this list. + * + * The key is the ID of VCPU and the value is the ID of PCPU. + */ +#define LIBXL_HAVE_VCPU_AFFINITY_KEY_VALUE_LIST 1 + +/* * LIBXL_HAVE_BUILDINFO_USBDEVICE_LIST * * If this is defined, then the libxl_domain_build_info structure will diff --git a/tools/libxl/libxl_dom.c b/tools/libxl/libxl_dom.c index 661999c..b818815 100644 --- a/tools/libxl/libxl_dom.c +++ b/tools/libxl/libxl_dom.c @@ -263,6 +263,54 @@ int libxl__build_pre(libxl__gc *gc, uint32_t domid, libxl_domain_set_nodeaffinity(ctx, domid, &info->nodemap); libxl_set_vcpuaffinity_all(ctx, domid, info->max_vcpus, &info->cpumap); + /* If we have vcpu affinity list, pin vcpu to pcpu. */ + if (d_config->b_info.vcpu_affinity) { + int i; + libxl_bitmap vcpu_cpumap; + int *vcpu_to_pcpu, sz = sizeof(int) * d_config->b_info.max_vcpus; + + vcpu_to_pcpu = libxl__zalloc(gc, sz); + memset(vcpu_to_pcpu, -1, sz); + + for (i = 0; i < d_config->b_info.max_vcpus; i++) { + libxl_key_value_list kvl = d_config->b_info.vcpu_affinity; + const char *key, *val; + int k, v; + + key = kvl[i * 2]; + if (!key) + break; + val = kvl[i * 2 + 1]; + + k = atoi(key); + v = atoi(val); + vcpu_to_pcpu[k] = v; + } + + rc = libxl_cpu_bitmap_alloc(ctx, &vcpu_cpumap, 0); + if (rc) { + libxl_bitmap_dispose(&vcpu_cpumap); + return ERROR_FAIL; + } + + for (i = 0; i < d_config->b_info.max_vcpus; i++) { + if (vcpu_to_pcpu[i] != -1) { + libxl_bitmap_set_none(&vcpu_cpumap); + libxl_bitmap_set(&vcpu_cpumap, vcpu_to_pcpu[i]); + } else { + libxl_bitmap_set_any(&vcpu_cpumap); + } + if (libxl_set_vcpuaffinity(ctx, domid, i, &vcpu_cpumap)) { + LIBXL__LOG(ctx, LIBXL__LOG_ERROR, + "setting affinity failed on vcpu `%d'.", i); + libxl_bitmap_dispose(&vcpu_cpumap); + return ERROR_FAIL; + } + } + + libxl_bitmap_dispose(&vcpu_cpumap); + } + if (xc_domain_setmaxmem(ctx->xch, domid, info->target_memkb + LIBXL_MAXMEM_CONSTANT) < 0) { LIBXL__LOG_ERRNO(ctx, LIBXL__LOG_ERROR, "Couldn't set max memory"); diff --git a/tools/libxl/libxl_types.idl b/tools/libxl/libxl_types.idl index 0dfafe7..7b0901c 100644 --- a/tools/libxl/libxl_types.idl +++ b/tools/libxl/libxl_types.idl @@ -305,6 +305,7 @@ libxl_domain_build_info = Struct("domain_build_info",[ ("avail_vcpus", libxl_bitmap), ("cpumap", libxl_bitmap), ("nodemap", libxl_bitmap), + ("vcpu_affinity", libxl_key_value_list), ("numa_placement", libxl_defbool), ("tsc_mode", libxl_tsc_mode), ("max_memkb", MemKB), diff --git a/tools/libxl/xl_cmdimpl.c b/tools/libxl/xl_cmdimpl.c index a1cb5b8..3200d40 100644 --- a/tools/libxl/xl_cmdimpl.c +++ b/tools/libxl/xl_cmdimpl.c @@ -88,9 +88,6 @@ xlchild children[child_max]; static const char *common_domname; static int fd_lock = -1; -/* Stash for specific vcpu to pcpu mappping */ -static int *vcpu_to_pcpu; - static const char savefileheader_magic[32]= "Xen saved domain, xl format\n \0 \r"; @@ -804,26 +801,19 @@ static void parse_config_data(const char *config_source, if (!xlu_cfg_get_list (config, "cpus", &cpus, 0, 1)) { int n_cpus = 0; + libxl_key_value_list kvl = NULL; + int len = 0; if (libxl_cpu_bitmap_alloc(ctx, &b_info->cpumap, 0)) { fprintf(stderr, "Unable to allocate cpumap\n"); exit(1); } - /* Prepare the array for single vcpu to pcpu mappings */ - vcpu_to_pcpu = xmalloc(sizeof(int) * b_info->max_vcpus); - memset(vcpu_to_pcpu, -1, sizeof(int) * b_info->max_vcpus); - - /* - * Idea here is to let libxl think all the domain's vcpus - * have cpu affinity with all the pcpus on the list. - * It is then us, here in xl, that matches each single vcpu - * to its pcpu (and that's why we need to stash such info in - * the vcpu_to_pcpu array now) after the domain has been created. - * Doing it like this saves the burden of passing to libxl - * some big array hosting the single mappings. Also, using - * the cpumap derived from the list ensures memory is being - * allocated on the proper nodes anyway. + /* Construct key value list for vcpu affinity. The key is vcpu + * id, value is pcpu id. + * + * For example, for a list of ['2', '3'], we generate + * kvl = [ '0':'2', '1':'3' ] */ libxl_bitmap_set_none(&b_info->cpumap); while ((buf = xlu_cfg_get_listitem(cpus, n_cpus)) != NULL) { @@ -833,11 +823,30 @@ static void parse_config_data(const char *config_source, exit(1); } libxl_bitmap_set(&b_info->cpumap, i); - if (n_cpus < b_info->max_vcpus) - vcpu_to_pcpu[n_cpus] = i; + if (n_cpus < b_info->max_vcpus) { + kvl = xrealloc(kvl, sizeof(char *) * (len * 2 + 2)); + /* key */ + if (asprintf(&kvl[len*2], "%d", n_cpus) == -1) { + LOG("failed to allocate memory for vcpu affinity list: key"); + exit(1); + } + /* value */ + kvl[len*2+1] = strdup(buf); + if (!kvl[len*2+1]) { + LOG("failed to allocate memory for vcpu affinity list: value"); + exit(1); + } + len++; + } n_cpus++; } + /* One extra slot for sentinel */ + kvl = xrealloc(kvl, sizeof(char *) * (len * 2 + 1)); + kvl[len * 2] = NULL; + + b_info->vcpu_affinity = kvl; + /* We have a cpumap, disable automatic placement */ libxl_defbool_set(&b_info->numa_placement, false); } @@ -2188,33 +2197,6 @@ start: if ( ret ) goto error_out; - /* If single vcpu to pcpu mapping was requested, honour it */ - if (vcpu_to_pcpu) { - libxl_bitmap vcpu_cpumap; - - ret = libxl_cpu_bitmap_alloc(ctx, &vcpu_cpumap, 0); - if (ret) - goto error_out; - for (i = 0; i < d_config.b_info.max_vcpus; i++) { - - if (vcpu_to_pcpu[i] != -1) { - libxl_bitmap_set_none(&vcpu_cpumap); - libxl_bitmap_set(&vcpu_cpumap, vcpu_to_pcpu[i]); - } else { - libxl_bitmap_set_any(&vcpu_cpumap); - } - if (libxl_set_vcpuaffinity(ctx, domid, i, &vcpu_cpumap)) { - fprintf(stderr, "setting affinity failed on vcpu `%d'.\n", i); - libxl_bitmap_dispose(&vcpu_cpumap); - free(vcpu_to_pcpu); - ret = ERROR_FAIL; - goto error_out; - } - } - libxl_bitmap_dispose(&vcpu_cpumap); - free(vcpu_to_pcpu); vcpu_to_pcpu = NULL; - } - ret = libxl_userdata_store(ctx, domid, "xl", config_data, config_len); if (ret) { -- 1.7.10.4 _______________________________________________ Xen-devel mailing list Xen-devel@xxxxxxxxxxxxx http://lists.xen.org/xen-devel
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |