[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH 1/2] tools/libxc: Introduce XC_CPUPOOL_POOLID_ANY



Callers to xc_cpupool_create() can either request a specific pool id,
or request that Xen do it for them.  But at the moment, the
"automatic" selection is indicated by using a magic value, 0.  This is
undesirable both because it doesn't obviously have meaning, but also
because '0' is a valid cpupool (albeit one which at the moment can't
be changed).

Introduce a constant, XC_CPUPOOL_POOLID_ANY, to indicate this instead.
Have it be the default for the python bindings.

Manually translate it, even though it's the same underlying value,
because we don't yet have a relaible way of enforcing that these
values are the same.

Signed-off-by: George Dunlap <george.dunlap@xxxxxxxxxx>
Reviewed-by: Dario Faggioli <dario.faggioli@xxxxxxxxxx>
Acked-by: Wei Liu <wei.liu2@xxxxxxxxxx>
---
v2: Rebase over libxl.c-split series

CC: Ian Jackson <ian.jackson@xxxxxxxxxx>
CC: Wei Liu <wei.liu2@xxxxxxxxxx>
CC: Juergen Gross <jgross@xxxxxxxx>
CC: Dario Faggioli <dario.faggioli@xxxxxxxxxx>
CC: Ronald Rojas <ronladred@xxxxxxxxx>
---
 tools/libxc/include/xenctrl.h     | 2 ++
 tools/libxc/xc_cpupool.c          | 2 +-
 tools/libxl/libxl_cpupool.c       | 7 ++++++-
 tools/python/xen/lowlevel/xc/xc.c | 2 +-
 4 files changed, 10 insertions(+), 3 deletions(-)

diff --git a/tools/libxc/include/xenctrl.h b/tools/libxc/include/xenctrl.h
index 85d7fe5..927e373 100644
--- a/tools/libxc/include/xenctrl.h
+++ b/tools/libxc/include/xenctrl.h
@@ -1052,6 +1052,8 @@ typedef struct xc_cpupoolinfo {
     xc_cpumap_t cpumap;
 } xc_cpupoolinfo_t;
 
+#define XC_CPUPOOL_POOLID_ANY 0xFFFFFFFF
+
 /**
  * Create a new cpupool.
  *
diff --git a/tools/libxc/xc_cpupool.c b/tools/libxc/xc_cpupool.c
index 70011d1..fbd8cc9 100644
--- a/tools/libxc/xc_cpupool.c
+++ b/tools/libxc/xc_cpupool.c
@@ -43,7 +43,7 @@ int xc_cpupool_create(xc_interface *xch,
 
     sysctl.cmd = XEN_SYSCTL_cpupool_op;
     sysctl.u.cpupool_op.op = XEN_SYSCTL_CPUPOOL_OP_CREATE;
-    sysctl.u.cpupool_op.cpupool_id = (*ppoolid == 0) ?
+    sysctl.u.cpupool_op.cpupool_id = (*ppoolid == XC_CPUPOOL_POOLID_ANY) ?
         XEN_SYSCTL_CPUPOOL_PAR_ANY : *ppoolid;
     sysctl.u.cpupool_op.sched_id = sched_id;
     if ( (err = do_sysctl_save(xch, &sysctl)) != 0 )
diff --git a/tools/libxl/libxl_cpupool.c b/tools/libxl/libxl_cpupool.c
index f3d22b1..0ff8724 100644
--- a/tools/libxl/libxl_cpupool.c
+++ b/tools/libxl/libxl_cpupool.c
@@ -137,6 +137,10 @@ int libxl_cpupool_create(libxl_ctx *ctx, const char *name,
     int i;
     xs_transaction_t t;
     char *uuid_string;
+    uint32_t xcpoolid;
+
+    /* Zero means "choose a poolid for me" */
+    xcpoolid = (*poolid) ? (*poolid) : XC_CPUPOOL_POOLID_ANY;
 
     uuid_string = libxl__uuid2string(gc, *uuid);
     if (!uuid_string) {
@@ -144,12 +148,13 @@ int libxl_cpupool_create(libxl_ctx *ctx, const char *name,
         return ERROR_NOMEM;
     }
 
-    rc = xc_cpupool_create(ctx->xch, poolid, sched);
+    rc = xc_cpupool_create(ctx->xch, &xcpoolid, sched);
     if (rc) {
         LOGEV(ERROR, rc, "Could not create cpupool");
         GC_FREE;
         return ERROR_FAIL;
     }
+    *poolid = xcpoolid;
 
     libxl_for_each_bit(i, cpumap)
         if (libxl_bitmap_test(&cpumap, i)) {
diff --git a/tools/python/xen/lowlevel/xc/xc.c 
b/tools/python/xen/lowlevel/xc/xc.c
index 39be1d5..9e93d49 100644
--- a/tools/python/xen/lowlevel/xc/xc.c
+++ b/tools/python/xen/lowlevel/xc/xc.c
@@ -1715,7 +1715,7 @@ static PyObject *pyxc_cpupool_create(XcObject *self,
                                      PyObject *args,
                                      PyObject *kwds)
 {
-    uint32_t cpupool = 0, sched = XEN_SCHEDULER_CREDIT;
+    uint32_t cpupool = XC_CPUPOOL_POOLID_ANY, sched = XEN_SCHEDULER_CREDIT;
 
     static char *kwd_list[] = { "pool", "sched", NULL };
 
-- 
2.1.4


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
https://lists.xen.org/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.