[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH 6/9] xl: enable using ranges of pCPUs when manipulating cpupools



in fact, right now, xl sub-commands 'cpupool-cpu-add' and
'cpupool-cpu-remove' only accept the specification of one
pCPU to be added or removed to/from a cpupool.

With this change, they can deal with ranges, like "4-8",
or "node:1,12-18,^14". The syntax is exactly the same one
that is supported by the 'vcpu-pin' subcommand, and
specifying just one pCPU still works, of course.

This make things more flexible, more consistent, and also
improves error handling, as the pCPU range parsing routine
already present in xl is more reliable than just a call
to atoi().

Signed-off-by: Dario Faggioli <dario.faggioli@xxxxxxxxxx>
Cc: Ian Campbell <ian.campbell@xxxxxxxxxx>
Cc: Ian Jackson <Ian.Jackson@xxxxxxxxxxxxx>
Cc: Stefano Stabellini <stefano.stabellini@xxxxxxxxxxxxx>
Cc: Wei Liu <wei.liu2@xxxxxxxxxx>
Cc: Juergen Gross <JGross@xxxxxxxx>
---
 docs/man/xl.pod.1        |   25 ++++++++++--
 tools/libxl/xl_cmdimpl.c |   94 ++++++++++++++++++++--------------------------
 2 files changed, 62 insertions(+), 57 deletions(-)

diff --git a/docs/man/xl.pod.1 b/docs/man/xl.pod.1
index c330016..ef0d763 100644
--- a/docs/man/xl.pod.1
+++ b/docs/man/xl.pod.1
@@ -1147,13 +1147,30 @@ This is possible only if no domain is active in the 
cpu-pool.
 
 Renames a cpu-pool to I<newname>.
 
-=item B<cpupool-cpu-add> I<cpu-pool> I<cpu-nr|node:node-nr>
+=item B<cpupool-cpu-add> I<cpu-pool> I<cpus|node:nodes>
 
-Adds a cpu or all cpus of a numa node to a cpu-pool.
+Adds I<cpus> or full NUMA I<nodes> to I<cpu-pool>. pCPUs and NUMA nodes
+can be specified as single pCPU/node IDs or as ranges.
 
-=item B<cpupool-cpu-remove> I<cpu-nr|node:node-nr>
+For example:
+
+ (a) xl cpupool-cpu-add mypool 4
+ (b) xl cpupool-cpu-add mypool 1,5,10-16,^13
+ (c) xl cpupool-cpu-add mypool node:0,nodes:2-3,^10-12,8
+
+means adding pCPU 4 to mypool, in (a); adding pCPUs 1,5,10,11,12,14,15
+and 16, in (b); and adding all the pCPUs of NUMA nodes 0, 2 and 3,
+plus pCPU 8, but keeping out pCPUs 10,11,12, in (c).
+
+All the specified pCPUs that can be added to the cpupool will be added
+to it. If some pCPU can't (e.g., because they're already part of another
+cpupool), an error is reported about each one of them.
+
+=item B<cpupool-cpu-remove> I<cpus|node:nodes>
 
-Removes a cpu or all cpus of a numa node from a cpu-pool.
+Removes I<cpus> or full NUMA I<nodes> from I<cpu-pool>. pCPUs and NUMA
+nodes can be specified as single pCPU/node IDs or as ranges, using the
+exact same syntax as in B<cpupool-cpu-add> above.
 
 =item B<cpupool-migrate> I<domain> I<cpu-pool>
 
diff --git a/tools/libxl/xl_cmdimpl.c b/tools/libxl/xl_cmdimpl.c
index 42b3954..c748ba0 100644
--- a/tools/libxl/xl_cmdimpl.c
+++ b/tools/libxl/xl_cmdimpl.c
@@ -760,7 +760,7 @@ static int update_cpumap_range(const char *str, 
libxl_bitmap *cpumap)
  * single cpus or as eintire NUMA nodes) and turns it into the
  * corresponding libxl_bitmap (in cpumap).
  */
-static int vcpupin_parse(const char *cpu, libxl_bitmap *cpumap)
+static int cpurange_parse(const char *cpu, libxl_bitmap *cpumap)
 {
     char *ptr, *saveptr = NULL, *buf = strdup(cpu);
     int rc = 0;
@@ -870,7 +870,7 @@ static void parse_vcpu_affinity(libxl_domain_build_info 
*b_info,
                 exit(1);
             }
 
-            if (vcpupin_parse(buf, &vcpu_affinity_array[j]))
+            if (cpurange_parse(buf, &vcpu_affinity_array[j]))
                 exit(1);
 
             j++;
@@ -887,7 +887,7 @@ static void parse_vcpu_affinity(libxl_domain_build_info 
*b_info,
             exit(1);
         }
 
-        if (vcpupin_parse(buf, &vcpu_affinity_array[0]))
+        if (cpurange_parse(buf, &vcpu_affinity_array[0]))
             exit(1);
 
         for (i = 1; i < b_info->max_vcpus; i++) {
@@ -4979,7 +4979,7 @@ int main_vcpupin(int argc, char **argv)
      */
     if (!strcmp(hard_str, "-"))
         hard = NULL;
-    else if (vcpupin_parse(hard_str, hard))
+    else if (cpurange_parse(hard_str, hard))
         goto out;
     /*
      * Soft affinity is handled similarly. Only difference: we also want
@@ -4987,7 +4987,7 @@ int main_vcpupin(int argc, char **argv)
      */
     if (argc <= optind+3 || !strcmp(soft_str, "-"))
         soft = NULL;
-    else if (vcpupin_parse(soft_str, soft))
+    else if (cpurange_parse(soft_str, soft))
         goto out;
 
     if (dryrun_only) {
@@ -7325,44 +7325,38 @@ int main_cpupoolcpuadd(int argc, char **argv)
     int opt;
     const char *pool;
     uint32_t poolid;
-    int cpu;
-    int node;
-    int n;
+    libxl_bitmap cpumap;
+    int n, rc = 1;
 
     SWITCH_FOREACH_OPT(opt, "", NULL, "cpupool-cpu-add", 2) {
         /* No options */
     }
 
-    pool = argv[optind++];
-    node = -1;
-    cpu = -1;
-    if (strncmp(argv[optind], "node:", 5) == 0) {
-        node = atoi(argv[optind] + 5);
-    } else {
-        cpu = atoi(argv[optind]);
+    libxl_bitmap_init(&cpumap);
+    if (libxl_cpu_bitmap_alloc(ctx, &cpumap, 0)) {
+        fprintf(stderr, "Unable to allocate cpumap");
+        return 1;
     }
 
+    pool = argv[optind++];
+    if (cpurange_parse(argv[optind], &cpumap))
+        goto out;
+
     if (libxl_cpupool_qualifier_to_cpupoolid(ctx, pool, &poolid, NULL) ||
         !libxl_cpupoolid_is_valid(ctx, poolid)) {
         fprintf(stderr, "unknown cpupool \'%s\'\n", pool);
-        return -ERROR_FAIL;
-    }
-
-    if (cpu >= 0) {
-        return -libxl_cpupool_cpuadd(ctx, poolid, cpu);
+        goto out;
     }
 
-    if (libxl_cpupool_cpuadd_node(ctx, poolid, node, &n)) {
-        fprintf(stderr, "libxl_cpupool_cpuadd_node failed\n");
-        return -ERROR_FAIL;
-    }
+    n = libxl_cpupool_cpuadd_cpumap(ctx, poolid, &cpumap);
+    if (n != libxl_bitmap_count_set(&cpumap))
+        fprintf(stderr, "not all cpus have been added to the cpupool\n");
 
-    if (n > 0) {
-        return 0;
-    }
+    rc = 0;
 
-    fprintf(stderr, "no free cpu found\n");
-    return -ERROR_FAIL;
+out:
+    libxl_bitmap_dispose(&cpumap);
+    return rc;
 }
 
 int main_cpupoolcpuremove(int argc, char **argv)
@@ -7370,44 +7364,38 @@ int main_cpupoolcpuremove(int argc, char **argv)
     int opt;
     const char *pool;
     uint32_t poolid;
-    int cpu;
-    int node;
-    int n;
+    libxl_bitmap cpumap;
+    int n, rc = 1;
+
+    libxl_bitmap_init(&cpumap);
+    if (libxl_cpu_bitmap_alloc(ctx, &cpumap, 0)) {
+        fprintf(stderr, "Unable to allocate cpumap");
+        return 1;
+    }
 
     SWITCH_FOREACH_OPT(opt, "", NULL, "cpupool-cpu-remove", 2) {
         /* No options */
     }
 
     pool = argv[optind++];
-    node = -1;
-    cpu = -1;
-    if (strncmp(argv[optind], "node:", 5) == 0) {
-        node = atoi(argv[optind] + 5);
-    } else {
-        cpu = atoi(argv[optind]);
-    }
+    if (cpurange_parse(argv[optind], &cpumap))
+        goto out;
 
     if (libxl_cpupool_qualifier_to_cpupoolid(ctx, pool, &poolid, NULL) ||
         !libxl_cpupoolid_is_valid(ctx, poolid)) {
         fprintf(stderr, "unknown cpupool \'%s\'\n", pool);
-        return -ERROR_FAIL;
-    }
-
-    if (cpu >= 0) {
-        return -libxl_cpupool_cpuremove(ctx, poolid, cpu);
+        goto out;
     }
 
-    if (libxl_cpupool_cpuremove_node(ctx, poolid, node, &n)) {
-        fprintf(stderr, "libxl_cpupool_cpuremove_node failed\n");
-        return -ERROR_FAIL;
-    }
+    n = libxl_cpupool_cpuremove_cpumap(ctx, poolid, &cpumap);
+    if (n != libxl_bitmap_count_set(&cpumap))
+        fprintf(stderr, "not all cpus could be removed from the cpupool\n");
 
-    if (n == 0) {
-        fprintf(stderr, "no cpu of node found in cpupool\n");
-        return -ERROR_FAIL;
-    }
+    rc = 0;
 
-    return 0;
+out:
+    libxl_bitmap_dispose(&cpumap);
+    return rc;
 }
 
 int main_cpupoolmigrate(int argc, char **argv)


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.