[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH 5/9] libxl: introduce libxl_cpupool_cpu{add, remove}_cpumap()



To add (removes) to (from) a cpupool all the pCPUs corresponding
to the bits that are set in the passed bitmap.

This is convenient and useful in order to implement, in xl,
the possibility of specifying ranges of pCPUs to be added
(removed) to (from) a cpupool, in the relevant commands.

While there, convert libxl_cpupool_cpu{add,remove} to use the
appropriate logging macro (LOGE()).

Signed-off-by: Dario Faggioli <dario.faggioli@xxxxxxxxxx>
Cc: Ian Campbell <ian.campbell@xxxxxxxxxx>
Cc: Ian Jackson <Ian.Jackson@xxxxxxxxxxxxx>
Cc: Stefano Stabellini <stefano.stabellini@xxxxxxxxxxxxx>
Cc: Wei Liu <wei.liu2@xxxxxxxxxx>
Cc: Juergen Gross <JGross@xxxxxxxx>
---
 tools/libxl/libxl.c |   36 ++++++++++++++++++++++++++++++++----
 tools/libxl/libxl.h |    4 ++++
 2 files changed, 36 insertions(+), 4 deletions(-)

diff --git a/tools/libxl/libxl.c b/tools/libxl/libxl.c
index 088786e..e06fe32 100644
--- a/tools/libxl/libxl.c
+++ b/tools/libxl/libxl.c
@@ -6343,17 +6343,31 @@ out:
 
 int libxl_cpupool_cpuadd(libxl_ctx *ctx, uint32_t poolid, int cpu)
 {
+    GC_INIT(ctx);
     int rc;
 
     rc = xc_cpupool_addcpu(ctx->xch, poolid, cpu);
     if (rc) {
-        LIBXL__LOG_ERRNOVAL(ctx, LIBXL__LOG_ERROR, rc,
-            "Error moving cpu to cpupool");
+        LOGE(ERROR, "Error moving cpu %d to cpupool", cpu);
+        GC_FREE;
         return ERROR_FAIL;
     }
+    GC_FREE;
     return 0;
 }
 
+int libxl_cpupool_cpuadd_cpumap(libxl_ctx *ctx, uint32_t poolid,
+                                const libxl_bitmap *cpumap)
+{
+    int c, ncpus = 0;
+
+    libxl_for_each_set_bit(c, *cpumap) {
+        if (!libxl_cpupool_cpuadd(ctx, poolid, c))
+            ncpus++;
+    }
+    return ncpus;
+}
+
 int libxl_cpupool_cpuadd_node(libxl_ctx *ctx, uint32_t poolid, int node, int 
*cpus)
 {
     int rc = 0;
@@ -6388,17 +6402,31 @@ out:
 
 int libxl_cpupool_cpuremove(libxl_ctx *ctx, uint32_t poolid, int cpu)
 {
+    GC_INIT(ctx);
     int rc;
 
     rc = xc_cpupool_removecpu(ctx->xch, poolid, cpu);
     if (rc) {
-        LIBXL__LOG_ERRNOVAL(ctx, LIBXL__LOG_ERROR, rc,
-            "Error removing cpu from cpupool");
+        LOGE(ERROR, "Error removing cpu %d from cpupool", cpu);
+        GC_FREE;
         return ERROR_FAIL;
     }
+    GC_FREE;
     return 0;
 }
 
+int libxl_cpupool_cpuremove_cpumap(libxl_ctx *ctx, uint32_t poolid,
+                                   const libxl_bitmap *cpumap)
+{
+    int c, ncpus = 0;
+
+    libxl_for_each_set_bit(c, *cpumap) {
+        if (!libxl_cpupool_cpuremove(ctx, poolid, c))
+            ncpus++;
+    }
+    return ncpus;
+}
+
 int libxl_cpupool_cpuremove_node(libxl_ctx *ctx, uint32_t poolid, int node, 
int *cpus)
 {
     int ret = 0;
diff --git a/tools/libxl/libxl.h b/tools/libxl/libxl.h
index 6bbc52d..7661999 100644
--- a/tools/libxl/libxl.h
+++ b/tools/libxl/libxl.h
@@ -1456,8 +1456,12 @@ int libxl_cpupool_destroy(libxl_ctx *ctx, uint32_t 
poolid);
 int libxl_cpupool_rename(libxl_ctx *ctx, const char *name, uint32_t poolid);
 int libxl_cpupool_cpuadd(libxl_ctx *ctx, uint32_t poolid, int cpu);
 int libxl_cpupool_cpuadd_node(libxl_ctx *ctx, uint32_t poolid, int node, int 
*cpus);
+int libxl_cpupool_cpuadd_cpumap(libxl_ctx *ctx, uint32_t poolid,
+                                const libxl_bitmap *cpumap);
 int libxl_cpupool_cpuremove(libxl_ctx *ctx, uint32_t poolid, int cpu);
 int libxl_cpupool_cpuremove_node(libxl_ctx *ctx, uint32_t poolid, int node, 
int *cpus);
+int libxl_cpupool_cpuremove_cpumap(libxl_ctx *ctx, uint32_t poolid,
+                                   const libxl_bitmap *cpumap);
 int libxl_cpupool_movedomain(libxl_ctx *ctx, uint32_t poolid, uint32_t domid);
 int libxl_cpupool_info(libxl_ctx *ctx, libxl_cpupoolinfo *info, uint32_t 
poolid);
 


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.