[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH v2 4/7] xl: enable using ranges of pCPUs when manipulating cpupools
in fact, right now, xl sub-commands 'cpupool-cpu-add' and 'cpupool-cpu-remove' only accept the specification of one pCPU to be added or removed to/from a cpupool. With this change, they can deal with ranges, like "4-8", or "node:1,12-18,^14". The syntax is exactly the same one that is supported by the 'vcpu-pin' subcommand, and specifying just one pCPU still works, of course. This make things more flexible, more consistent, and also improves error handling, as the pCPU range parsing routine already present in xl is more reliable than just a call to atoi(). Signed-off-by: Dario Faggioli <dario.faggioli@xxxxxxxxxx> Cc: Ian Campbell <ian.campbell@xxxxxxxxxx> Cc: Ian Jackson <Ian.Jackson@xxxxxxxxxxxxx> Cc: Stefano Stabellini <stefano.stabellini@xxxxxxxxxxxxx> Cc: Wei Liu <wei.liu2@xxxxxxxxxx> Cc: Juergen Gross <JGross@xxxxxxxx> --- Changes from v1: * reword the man page change, in line with what was requested during review. --- docs/man/xl.pod.1 | 25 +++++++++++-- tools/libxl/xl_cmdimpl.c | 92 ++++++++++++++++++++-------------------------- 2 files changed, 60 insertions(+), 57 deletions(-) diff --git a/docs/man/xl.pod.1 b/docs/man/xl.pod.1 index 5da9b15..16783c8 100644 --- a/docs/man/xl.pod.1 +++ b/docs/man/xl.pod.1 @@ -1147,13 +1147,30 @@ This is possible only if no domain is active in the cpu-pool. Renames a cpu-pool to I<newname>. -=item B<cpupool-cpu-add> I<cpu-pool> I<cpu-nr|node:node-nr> +=item B<cpupool-cpu-add> I<cpu-pool> I<cpus|node:nodes> -Adds a cpu or all cpus of a numa node to a cpu-pool. +Adds one or more CPUs or NUMA nodes to I<cpu-pool>. CPUs and NUMA +nodes can be specified as single CPU/node IDs or as ranges. -=item B<cpupool-cpu-remove> I<cpu-nr|node:node-nr> +For example: + + (a) xl cpupool-cpu-add mypool 4 + (b) xl cpupool-cpu-add mypool 1,5,10-16,^13 + (c) xl cpupool-cpu-add mypool node:0,nodes:2-3,^10-12,8 + +means adding CPU 4 to mypool, in (a); adding CPUs 1,5,10,11,12,14,15 +and 16, in (b); and adding all the CPUs of NUMA nodes 0, 2 and 3, +plus CPU 8, but keeping out CPUs 10,11,12, in (c). + +All the specified CPUs that can be added to the cpupool will be added +to it. If some CPU can't (e.g., because they're already part of another +cpupool), an error is reported about each one of them. + +=item B<cpupool-cpu-remove> I<cpus|node:nodes> -Removes a cpu or all cpus of a numa node from a cpu-pool. +Removes one or more CPUs or NUMA nodes from I<cpu-pool>. CPUs and NUMA +nodes can be specified as single CPU/node IDs or as ranges, using the +exact same syntax as in B<cpupool-cpu-add> above. =item B<cpupool-migrate> I<domain> I<cpu-pool> diff --git a/tools/libxl/xl_cmdimpl.c b/tools/libxl/xl_cmdimpl.c index 0481261..ba5b51e 100644 --- a/tools/libxl/xl_cmdimpl.c +++ b/tools/libxl/xl_cmdimpl.c @@ -760,7 +760,7 @@ static int update_cpumap_range(const char *str, libxl_bitmap *cpumap) * single cpus or as eintire NUMA nodes) and turns it into the * corresponding libxl_bitmap (in cpumap). */ -static int vcpupin_parse(const char *cpu, libxl_bitmap *cpumap) +static int cpurange_parse(const char *cpu, libxl_bitmap *cpumap) { char *ptr, *saveptr = NULL, *buf = strdup(cpu); int rc = 0; @@ -872,7 +872,7 @@ static void parse_vcpu_affinity(libxl_domain_build_info *b_info, exit(1); } - if (vcpupin_parse(buf, &vcpu_affinity_array[j])) + if (cpurange_parse(buf, &vcpu_affinity_array[j])) exit(1); j++; @@ -889,7 +889,7 @@ static void parse_vcpu_affinity(libxl_domain_build_info *b_info, exit(1); } - if (vcpupin_parse(buf, &vcpu_affinity_array[0])) + if (cpurange_parse(buf, &vcpu_affinity_array[0])) exit(1); for (i = 1; i < b_info->max_vcpus; i++) { @@ -4964,7 +4964,7 @@ int main_vcpupin(int argc, char **argv) */ if (!strcmp(hard_str, "-")) hard = NULL; - else if (vcpupin_parse(hard_str, hard)) + else if (cpurange_parse(hard_str, hard)) goto out; /* * Soft affinity is handled similarly. Only difference: we also want @@ -4972,7 +4972,7 @@ int main_vcpupin(int argc, char **argv) */ if (argc <= optind+3 || !strcmp(soft_str, "-")) soft = NULL; - else if (vcpupin_parse(soft_str, soft)) + else if (cpurange_parse(soft_str, soft)) goto out; if (dryrun_only) { @@ -7310,44 +7310,37 @@ int main_cpupoolcpuadd(int argc, char **argv) int opt; const char *pool; uint32_t poolid; - int cpu; - int node; - int n; + libxl_bitmap cpumap; + int rc = 1; SWITCH_FOREACH_OPT(opt, "", NULL, "cpupool-cpu-add", 2) { /* No options */ } - pool = argv[optind++]; - node = -1; - cpu = -1; - if (strncmp(argv[optind], "node:", 5) == 0) { - node = atoi(argv[optind] + 5); - } else { - cpu = atoi(argv[optind]); + libxl_bitmap_init(&cpumap); + if (libxl_cpu_bitmap_alloc(ctx, &cpumap, 0)) { + fprintf(stderr, "Unable to allocate cpumap"); + return 1; } + pool = argv[optind++]; + if (cpurange_parse(argv[optind], &cpumap)) + goto out; + if (libxl_cpupool_qualifier_to_cpupoolid(ctx, pool, &poolid, NULL) || !libxl_cpupoolid_is_valid(ctx, poolid)) { fprintf(stderr, "unknown cpupool \'%s\'\n", pool); - return -ERROR_FAIL; - } - - if (cpu >= 0) { - return -libxl_cpupool_cpuadd(ctx, poolid, cpu); + goto out; } - if (libxl_cpupool_cpuadd_node(ctx, poolid, node, &n)) { - fprintf(stderr, "libxl_cpupool_cpuadd_node failed\n"); - return -ERROR_FAIL; - } + if (libxl_cpupool_cpuadd_cpumap(ctx, poolid, &cpumap)) + fprintf(stderr, "some cpus may not have been added to %s\n", pool); - if (n > 0) { - return 0; - } + rc = 0; - fprintf(stderr, "no free cpu found\n"); - return -ERROR_FAIL; +out: + libxl_bitmap_dispose(&cpumap); + return rc; } int main_cpupoolcpuremove(int argc, char **argv) @@ -7355,44 +7348,37 @@ int main_cpupoolcpuremove(int argc, char **argv) int opt; const char *pool; uint32_t poolid; - int cpu; - int node; - int n; + libxl_bitmap cpumap; + int rc = 1; + + libxl_bitmap_init(&cpumap); + if (libxl_cpu_bitmap_alloc(ctx, &cpumap, 0)) { + fprintf(stderr, "Unable to allocate cpumap"); + return 1; + } SWITCH_FOREACH_OPT(opt, "", NULL, "cpupool-cpu-remove", 2) { /* No options */ } pool = argv[optind++]; - node = -1; - cpu = -1; - if (strncmp(argv[optind], "node:", 5) == 0) { - node = atoi(argv[optind] + 5); - } else { - cpu = atoi(argv[optind]); - } + if (cpurange_parse(argv[optind], &cpumap)) + goto out; if (libxl_cpupool_qualifier_to_cpupoolid(ctx, pool, &poolid, NULL) || !libxl_cpupoolid_is_valid(ctx, poolid)) { fprintf(stderr, "unknown cpupool \'%s\'\n", pool); - return -ERROR_FAIL; - } - - if (cpu >= 0) { - return -libxl_cpupool_cpuremove(ctx, poolid, cpu); + goto out; } - if (libxl_cpupool_cpuremove_node(ctx, poolid, node, &n)) { - fprintf(stderr, "libxl_cpupool_cpuremove_node failed\n"); - return -ERROR_FAIL; - } + if (libxl_cpupool_cpuremove_cpumap(ctx, poolid, &cpumap)) + fprintf(stderr, "some cpus may not have been removed from %s\n", pool); - if (n == 0) { - fprintf(stderr, "no cpu of node found in cpupool\n"); - return -ERROR_FAIL; - } + rc = 0; - return 0; +out: + libxl_bitmap_dispose(&cpumap); + return rc; } int main_cpupoolmigrate(int argc, char **argv) _______________________________________________ Xen-devel mailing list Xen-devel@xxxxxxxxxxxxx http://lists.xen.org/xen-devel
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |