[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH v8 10/13] libxl/xl: deprecate the build_info->cpumap field
as, thanks to previous change ("libxl/xl: push VCPU affinity pinning down to libxl"), we now have an array of libxl_bitmap-s that can be used to transfer to libxl the vcpu (hard) affinity of each vcpu of the domain. Therefore, the cpumap field is no longer necessary: if we want all the vcpus to have the same affinity, we just put it in all the elements of the array. This makes the libxl code simpler and easier to understand and maintain (only one place where to read the affinity), and does not complicate things much on the xl side, that is why we go for it. Another benefit is that, by unifying the parsing (at the xl level) and the place where the information is consumed and the affinity are actually set (at the libxl level), it becomes possible to do things like: cpus = ["3-4", "2-6"] meaning we want vcpu 0 to be pinned to pcpu 3,4 and vcpu 1 to be pinned to pcpu 2,3,4,5,6. Before this change, in fact, the list variant (["xx", "yy"]) supported only single values. BEWARE that, although still being there, for backward compatibility reasons, the cpumap field in build_info is no longer used anywhere in libxl. Signed-off-by: Dario Faggioli <dario.faggioli@xxxxxxxxxx> --- docs/man/xl.cfg.pod.5 | 8 +++--- tools/libxl/libxl_create.c | 6 ---- tools/libxl/libxl_dom.c | 4 +-- tools/libxl/libxl_types.idl | 6 ++++ tools/libxl/xl_cmdimpl.c | 61 +++++++++++++++++-------------------------- 5 files changed, 34 insertions(+), 51 deletions(-) diff --git a/docs/man/xl.cfg.pod.5 b/docs/man/xl.cfg.pod.5 index c087cbc..af48622 100644 --- a/docs/man/xl.cfg.pod.5 +++ b/docs/man/xl.cfg.pod.5 @@ -143,11 +143,11 @@ Combining this with "all" is also possible, meaning "all,^nodes:1" results in all the vcpus of the guest running on all the cpus on the host, except for the cpus belonging to the host NUMA node 1. -=item ["2", "3"] (or [2, 3]) +=item ["2", "3-8,^5"] -To ask for specific vcpu mapping. That means (in this example), vcpu #0 -of the guest will run on cpu #2 of the host and vcpu #1 of the guest will -run on cpu #3 of the host. +To ask for specific vcpu mapping. That means (in this example), vcpu 0 +of the guest will run on cpu 2 of the host and vcpu 1 of the guest will +run on cpus 3,4,6,7,8 of the host. =back diff --git a/tools/libxl/libxl_create.c b/tools/libxl/libxl_create.c index d015cf4..443fe7d 100644 --- a/tools/libxl/libxl_create.c +++ b/tools/libxl/libxl_create.c @@ -187,12 +187,6 @@ int libxl__domain_build_info_setdefault(libxl__gc *gc, } else if (b_info->avail_vcpus.size > HVM_MAX_VCPUS) return ERROR_FAIL; - if (!b_info->cpumap.size) { - if (libxl_cpu_bitmap_alloc(CTX, &b_info->cpumap, 0)) - return ERROR_FAIL; - libxl_bitmap_set_any(&b_info->cpumap); - } - libxl_defbool_setdefault(&b_info->numa_placement, true); if (!b_info->nodemap.size) { diff --git a/tools/libxl/libxl_dom.c b/tools/libxl/libxl_dom.c index 1767659..0b00470 100644 --- a/tools/libxl/libxl_dom.c +++ b/tools/libxl/libxl_dom.c @@ -250,7 +250,7 @@ int libxl__build_pre(libxl__gc *gc, uint32_t domid, * whatever that turns out to be. */ if (libxl_defbool_val(info->numa_placement)) { - if (!libxl_bitmap_is_full(&info->cpumap)) { + if (d_config->b_info.num_vcpu_hard_affinity) { LOG(ERROR, "Can run NUMA placement only if no vcpu " "affinity is specified"); return ERROR_INVAL; @@ -261,8 +261,6 @@ int libxl__build_pre(libxl__gc *gc, uint32_t domid, return rc; } libxl_domain_set_nodeaffinity(ctx, domid, &info->nodemap); - libxl_set_vcpuaffinity_all(ctx, domid, info->max_vcpus, - &info->cpumap, NULL); /* If we have the vcpu hard affinity list, honour it */ if (d_config->b_info.num_vcpu_hard_affinity) { diff --git a/tools/libxl/libxl_types.idl b/tools/libxl/libxl_types.idl index 05978d7..cd5c0d4 100644 --- a/tools/libxl/libxl_types.idl +++ b/tools/libxl/libxl_types.idl @@ -297,7 +297,11 @@ libxl_domain_sched_params = Struct("domain_sched_params",[ libxl_domain_build_info = Struct("domain_build_info",[ ("max_vcpus", integer), ("avail_vcpus", libxl_bitmap), - ("cpumap", libxl_bitmap), + ("cpumap", libxl_bitmap), # DEPRECATED! + # The cpumap field above has been deprecated by the introduction of the + # vcpu_hard_affinity array. It is no longer used anywhere in libxl code, + # so one better avoid setting and, in general, using it at all. To do so, + # is indeed harmless, but won't produce any actual effect on the domain. ("nodemap", libxl_bitmap), ("vcpu_hard_affinity", Array(libxl_bitmap, "num_vcpu_hard_affinity")), ("numa_placement", libxl_defbool), diff --git a/tools/libxl/xl_cmdimpl.c b/tools/libxl/xl_cmdimpl.c index ac603c8..d9e235e 100644 --- a/tools/libxl/xl_cmdimpl.c +++ b/tools/libxl/xl_cmdimpl.c @@ -656,14 +656,16 @@ static int update_cpumap_range(const char *str, libxl_bitmap *cpumap) static int vcpupin_parse(const char *cpu, libxl_bitmap *cpumap) { char *ptr, *saveptr = NULL; + char *buf = strdup(cpu); int rc = 0; - for (ptr = strtok_r(cpu, ",", &saveptr); ptr; + for (ptr = strtok_r(buf, ",", &saveptr); ptr; ptr = strtok_r(NULL, ",", &saveptr)) { rc = update_cpumap_range(ptr, cpumap); if (rc) break; } + free(buf); return rc; } @@ -821,14 +823,11 @@ static void parse_config_data(const char *config_source, if (!xlu_cfg_get_long (config, "maxvcpus", &l, 0)) b_info->max_vcpus = l; - if (!xlu_cfg_get_list (config, "cpus", &cpus, 0, 1)) { + buf = NULL; + if (!xlu_cfg_get_list (config, "cpus", &cpus, 0, 1) || + !xlu_cfg_get_string (config, "cpus", &buf, 0)) { b_info->num_vcpu_hard_affinity = b_info->max_vcpus; - int n_cpus = 0; - - if (libxl_cpu_bitmap_alloc(ctx, &b_info->cpumap, 0)) { - fprintf(stderr, "Unable to allocate cpumap\n"); - exit(1); - } + const char *buf2; b_info->vcpu_hard_affinity = xmalloc(b_info->num_vcpu_hard_affinity * sizeof(libxl_bitmap)); @@ -840,42 +839,30 @@ static void parse_config_data(const char *config_source, fprintf(stderr, "Unable to allocate cpumap for vcpu %d\n", i); exit(1); } - libxl_bitmap_set_any(&b_info->vcpu_hard_affinity[i]); + libxl_bitmap_set_none(&b_info->vcpu_hard_affinity[i]); } - libxl_bitmap_set_none(&b_info->cpumap); - while ((buf = xlu_cfg_get_listitem(cpus, n_cpus)) != NULL) { - i = atoi(buf); - if (!libxl_bitmap_cpu_valid(&b_info->cpumap, i)) { - fprintf(stderr, "cpu %d illegal\n", i); - exit(1); - } - libxl_bitmap_set(&b_info->cpumap, i); - if (n_cpus < b_info->max_vcpus) { - libxl_bitmap_set_none(&b_info->vcpu_hard_affinity[n_cpus]); - libxl_bitmap_set(&b_info->vcpu_hard_affinity[n_cpus], i); - } - n_cpus++; + /* + * When buf is !NULL, we've been passed a string, and what we do + * is parse it and put the result in all the entries of the vcpu + * affinity array. If it's NULL, what we have is a list, and what + * we put in each entry of the vcpu affinity array is the result of + * the parsing of each element of the list (if there are more + * vcpus than elements, the missing ones have their affinity masks + * completely full). + */ + for (i = 0; i < b_info->num_vcpu_hard_affinity; i++) { + if (buf || ((buf2 = xlu_cfg_get_listitem(cpus, i)) != NULL)) { + if (vcpupin_parse(buf ? buf : buf2, + &b_info->vcpu_hard_affinity[i])) + exit(1); + } else + libxl_bitmap_set_any(&b_info->vcpu_hard_affinity[i]); } /* We have a list of cpumaps, disable automatic placement */ libxl_defbool_set(&b_info->numa_placement, false); } - else if (!xlu_cfg_get_string (config, "cpus", &buf, 0)) { - char *buf2 = strdup(buf); - - if (libxl_cpu_bitmap_alloc(ctx, &b_info->cpumap, 0)) { - fprintf(stderr, "Unable to allocate cpumap\n"); - exit(1); - } - - libxl_bitmap_set_none(&b_info->cpumap); - if (vcpupin_parse(buf2, &b_info->cpumap)) - exit(1); - free(buf2); - - libxl_defbool_set(&b_info->numa_placement, false); - } if (!xlu_cfg_get_long (config, "memory", &l, 0)) { b_info->max_memkb = l * 1024; _______________________________________________ Xen-devel mailing list Xen-devel@xxxxxxxxxxxxx http://lists.xen.org/xen-devel
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |