[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] [xen-unstable] tools: cpupools: Support arbitrary numbers of physical cpus



# HG changeset patch
# User Juergen Gross <juergen.gross@xxxxxxxxxxxxxx>
# Date 1287682376 -3600
# Node ID ee0e2acc0d99e7a97f57ef055a6296bf33904247
# Parent  bfb3c97ef5071b99d33683ce9c8b7a6f727146bd
tools: cpupools: Support arbitrary numbers of physical cpus

To be able to support arbitrary numbers of physical cpus it was necessary to
include the size of cpumaps in the xc-interfaces for cpu pools.
These were:
  definition of xc_cpupoolinfo_t
  xc_cpupool_getinfo()
  xc_cpupool_freeinfo()
xc_cpupool_getinfo() and xc_cpupool_freeinfo() are changed to allocate the
needed buffer and return it.

Signed-off-by: juergen.gross@xxxxxxxxxxxxxx
Signed-off-by: Ian Jackson <ian.jackson@xxxxxxxxxxxxx>
---
 tools/libxc/xc_cpupool.c          |  144 ++++++++++++++++++++++----------------
 tools/libxc/xc_misc.c             |   14 +++
 tools/libxc/xenctrl.h             |   30 +++----
 tools/libxl/libxl.c               |   55 +++++++-------
 tools/libxl/libxl.h               |    3 
 tools/libxl/libxl_utils.c         |    5 +
 tools/libxl/xl_cmdimpl.c          |   22 ++---
 tools/python/xen/lowlevel/xc/xc.c |  108 ++++++++++++----------------
 8 files changed, 206 insertions(+), 175 deletions(-)

diff -r bfb3c97ef507 -r ee0e2acc0d99 tools/libxc/xc_cpupool.c
--- a/tools/libxc/xc_cpupool.c  Thu Oct 21 18:14:50 2010 +0100
+++ b/tools/libxc/xc_cpupool.c  Thu Oct 21 18:32:56 2010 +0100
@@ -32,6 +32,11 @@ static int do_sysctl_save(xc_interface *
     } while ( (ret < 0) && (errno == EAGAIN) );
 
     return ret;
+}
+
+static int get_cpumap_size(xc_interface *xch)
+{
+    return (xc_get_max_cpus(xch) + 7) / 8;
 }
 
 int xc_cpupool_create(xc_interface *xch,
@@ -64,50 +69,61 @@ int xc_cpupool_destroy(xc_interface *xch
     return do_sysctl_save(xch, &sysctl);
 }
 
-int xc_cpupool_getinfo(xc_interface *xch, 
-                       uint32_t first_poolid,
-                       uint32_t n_max, 
-                       xc_cpupoolinfo_t *info)
+xc_cpupoolinfo_t *xc_cpupool_getinfo(xc_interface *xch, 
+                       uint32_t poolid)
 {
     int err = 0;
-    int p;
-    uint32_t poolid = first_poolid;
-    uint8_t local[sizeof (info->cpumap)];
-    DECLARE_SYSCTL;
-
-    memset(info, 0, n_max * sizeof(xc_cpupoolinfo_t));
-
-    for (p = 0; p < n_max; p++)
-    {
-        sysctl.cmd = XEN_SYSCTL_cpupool_op;
-        sysctl.u.cpupool_op.op = XEN_SYSCTL_CPUPOOL_OP_INFO;
-        sysctl.u.cpupool_op.cpupool_id = poolid;
-        set_xen_guest_handle(sysctl.u.cpupool_op.cpumap.bitmap, local);
-        sysctl.u.cpupool_op.cpumap.nr_cpus = sizeof(info->cpumap) * 8;
-
-        if ( (err = lock_pages(xch, local, sizeof(local))) != 0 )
-        {
-            PERROR("Could not lock memory for Xen hypercall");
-            break;
-        }
-        err = do_sysctl_save(xch, &sysctl);
-        unlock_pages(xch, local, sizeof (local));
-
-        if ( err < 0 )
-            break;
-
-        info->cpupool_id = sysctl.u.cpupool_op.cpupool_id;
-        info->sched_id = sysctl.u.cpupool_op.sched_id;
-        info->n_dom = sysctl.u.cpupool_op.n_dom;
-        bitmap_byte_to_64(&(info->cpumap), local, sizeof(local) * 8);
-        poolid = sysctl.u.cpupool_op.cpupool_id + 1;
-        info++;
-    }
-
-    if ( p == 0 )
-        return err;
-
-    return p;
+    xc_cpupoolinfo_t *info;
+    uint8_t *local;
+    int local_size;
+    int cpumap_size;
+    int size;
+    DECLARE_SYSCTL;
+
+    local_size = get_cpumap_size(xch);
+    if (!local_size)
+    {
+        PERROR("Could not get number of cpus");
+        return NULL;
+    }
+    local = alloca(local_size);
+    cpumap_size = (local_size + sizeof(*info->cpumap) - 1) / 
sizeof(*info->cpumap);
+    size = sizeof(xc_cpupoolinfo_t) + cpumap_size * sizeof(*info->cpumap);
+    info = malloc(size);
+    if ( !info )
+        return NULL;
+
+    memset(info, 0, size);
+    info->cpumap_size = local_size * 8;
+    info->cpumap = (uint64_t *)(info + 1);
+
+    sysctl.cmd = XEN_SYSCTL_cpupool_op;
+    sysctl.u.cpupool_op.op = XEN_SYSCTL_CPUPOOL_OP_INFO;
+    sysctl.u.cpupool_op.cpupool_id = poolid;
+    set_xen_guest_handle(sysctl.u.cpupool_op.cpumap.bitmap, local);
+    sysctl.u.cpupool_op.cpumap.nr_cpus = local_size * 8;
+
+    if ( (err = lock_pages(xch, local, local_size)) != 0 )
+    {
+        PERROR("Could not lock memory for Xen hypercall");
+        free(info);
+        return NULL;
+    }
+    err = do_sysctl_save(xch, &sysctl);
+    unlock_pages(xch, local, local_size);
+
+    if ( err < 0 )
+    {
+        free(info);
+        return NULL;
+    }
+
+    info->cpupool_id = sysctl.u.cpupool_op.cpupool_id;
+    info->sched_id = sysctl.u.cpupool_op.sched_id;
+    info->n_dom = sysctl.u.cpupool_op.n_dom;
+    bitmap_byte_to_64(info->cpumap, local, local_size * 8);
+
+    return info;
 }
 
 int xc_cpupool_addcpu(xc_interface *xch,
@@ -149,31 +165,41 @@ int xc_cpupool_movedomain(xc_interface *
     return do_sysctl_save(xch, &sysctl);
 }
 
-int xc_cpupool_freeinfo(xc_interface *xch,
-                        uint64_t *cpumap)
+uint64_t * xc_cpupool_freeinfo(xc_interface *xch,
+                        int *cpusize)
 {
     int err;
-    uint8_t local[sizeof (*cpumap)];
-    DECLARE_SYSCTL;
+    uint8_t *local;
+    uint64_t *cpumap;
+    DECLARE_SYSCTL;
+
+    *cpusize = get_cpumap_size(xch);
+    if (*cpusize == 0)
+        return NULL;
+    local = alloca(*cpusize);
+    cpumap = calloc((*cpusize + sizeof(*cpumap) - 1) / sizeof(*cpumap), 
sizeof(*cpumap));
+    if (cpumap == NULL)
+        return NULL;
 
     sysctl.cmd = XEN_SYSCTL_cpupool_op;
     sysctl.u.cpupool_op.op = XEN_SYSCTL_CPUPOOL_OP_FREEINFO;
     set_xen_guest_handle(sysctl.u.cpupool_op.cpumap.bitmap, local);
-    sysctl.u.cpupool_op.cpumap.nr_cpus = sizeof(*cpumap) * 8;
-
-    if ( (err = lock_pages(xch, local, sizeof(local))) != 0 )
+    sysctl.u.cpupool_op.cpumap.nr_cpus = *cpusize * 8;
+
+    if ( (err = lock_pages(xch, local, *cpusize)) != 0 )
     {
         PERROR("Could not lock memory for Xen hypercall");
-        return err;
+        free(cpumap);
+        return NULL;
     }
 
     err = do_sysctl_save(xch, &sysctl);
-    unlock_pages(xch, local, sizeof (local));
-
-    if (err < 0)
-        return err;
-
-    bitmap_byte_to_64(cpumap, local, sizeof(local) * 8);
-
-    return 0;
-}
+    unlock_pages(xch, local, *cpusize);
+    bitmap_byte_to_64(cpumap, local, *cpusize * 8);
+
+    if (err >= 0)
+        return cpumap;
+
+    free(cpumap);
+    return NULL;
+}
diff -r bfb3c97ef507 -r ee0e2acc0d99 tools/libxc/xc_misc.c
--- a/tools/libxc/xc_misc.c     Thu Oct 21 18:14:50 2010 +0100
+++ b/tools/libxc/xc_misc.c     Thu Oct 21 18:32:56 2010 +0100
@@ -21,6 +21,20 @@
 #include "xc_private.h"
 #include <xen/hvm/hvm_op.h>
 
+int xc_get_max_cpus(xc_interface *xch)
+{
+    static int max_cpus = 0;
+    xc_physinfo_t physinfo;
+
+    if ( max_cpus )
+        return max_cpus;
+
+    if ( !xc_physinfo(xch, &physinfo) )
+        max_cpus = physinfo.max_cpu_id + 1;
+
+    return max_cpus;
+}
+
 int xc_readconsolering(xc_interface *xch,
                        char *buffer,
                        unsigned int *pnr_chars,
diff -r bfb3c97ef507 -r ee0e2acc0d99 tools/libxc/xenctrl.h
--- a/tools/libxc/xenctrl.h     Thu Oct 21 18:14:50 2010 +0100
+++ b/tools/libxc/xenctrl.h     Thu Oct 21 18:32:56 2010 +0100
@@ -216,6 +216,9 @@ typedef union
 } start_info_any_t;
 
 
+/* return maximum number of cpus the hypervisor supports */
+int xc_get_max_cpus(xc_interface *xch);
+
 int xc_domain_create(xc_interface *xch,
                      uint32_t ssidref,
                      xen_domain_handle_t handle,
@@ -535,7 +538,8 @@ typedef struct xc_cpupoolinfo {
     uint32_t cpupool_id;
     uint32_t sched_id;
     uint32_t n_dom;
-    uint64_t cpumap;
+    uint32_t cpumap_size;    /* max number of cpus in map */
+    uint64_t *cpumap;
 } xc_cpupoolinfo_t;
 
 /**
@@ -564,15 +568,11 @@ int xc_cpupool_destroy(xc_interface *xch
  * Get cpupool info. Returns info for up to the specified number of cpupools
  * starting at the given id.
  * @parm xc_handle a handle to an open hypervisor interface
- * @parm first_poolid lowest id for which info is returned
- * @parm n_max maximum number of cpupools to return info
- * @parm info pointer to xc_cpupoolinfo_t array
- * return number of cpupool infos
- */
-int xc_cpupool_getinfo(xc_interface *xch,
-                       uint32_t first_poolid,
-                       uint32_t n_max,
-                       xc_cpupoolinfo_t *info);
+ * @parm poolid lowest id for which info is returned
+ * return cpupool info ptr (obtained by malloc)
+ */
+xc_cpupoolinfo_t *xc_cpupool_getinfo(xc_interface *xch,
+                       uint32_t poolid);
 
 /**
  * Add cpu to a cpupool. cpu may be -1 indicating the first unassigned.
@@ -614,11 +614,11 @@ int xc_cpupool_movedomain(xc_interface *
  * Return map of cpus not in any cpupool.
  *
  * @parm xc_handle a handle to an open hypervisor interface
- * @parm cpumap pointer where to store the cpumap
- * return 0 on success, -1 on failure
- */
-int xc_cpupool_freeinfo(xc_interface *xch,
-                        uint64_t *cpumap);
+ * @parm cpusize where to store array size in bytes
+ * return cpumap array on success, NULL else
+ */
+uint64_t *xc_cpupool_freeinfo(xc_interface *xch,
+                        int *cpusize);
 
 
 /*
diff -r bfb3c97ef507 -r ee0e2acc0d99 tools/libxl/libxl.c
--- a/tools/libxl/libxl.c       Thu Oct 21 18:14:50 2010 +0100
+++ b/tools/libxl/libxl.c       Thu Oct 21 18:32:56 2010 +0100
@@ -609,27 +609,31 @@ int libxl_domain_info(libxl_ctx *ctx, li
 
 libxl_poolinfo * libxl_list_pool(libxl_ctx *ctx, int *nb_pool)
 {
-    libxl_poolinfo *ptr;
-    int i, ret;
-    xc_cpupoolinfo_t info[256];
-    int size = 256;
-
-    ptr = calloc(size, sizeof(libxl_poolinfo));
-    if (!ptr) {
-        LIBXL__LOG_ERRNO(ctx, LIBXL__LOG_ERROR, "allocating cpupool info");
-        return NULL;
-    }
-
-    ret = xc_cpupool_getinfo(ctx->xch, 0, 256, info);
-    if (ret<0) {
-        LIBXL__LOG_ERRNO(ctx, LIBXL__LOG_ERROR, "getting cpupool info");
-        return NULL;
-    }
-
-    for (i = 0; i < ret; i++) {
-        ptr[i].poolid = info[i].cpupool_id;
-    }
-    *nb_pool = ret;
+    libxl_poolinfo *ptr, *tmp;
+    int i;
+    xc_cpupoolinfo_t *info;
+    uint32_t poolid;
+
+    ptr = NULL;
+
+    poolid = 0;
+    for (i = 0;; i++) {
+        info = xc_cpupool_getinfo(ctx->xch, poolid);
+        if (info == NULL)
+            break;
+        tmp = realloc(ptr, (i + 1) * sizeof(libxl_poolinfo));
+        if (!tmp) {
+            LIBXL__LOG_ERRNO(ctx, LIBXL__LOG_ERROR, "allocating cpupool info");
+            free(ptr);
+            return NULL;
+        }
+        ptr = tmp;
+        ptr[i].poolid = info->cpupool_id;
+        poolid = info->cpupool_id + 1;
+        free(info);
+    }
+
+    *nb_pool = i;
     return ptr;
 }
 
@@ -3207,24 +3211,19 @@ libxl_vcpuinfo *libxl_list_vcpu(libxl_ct
     libxl_vcpuinfo *ptr, *ret;
     xc_domaininfo_t domaininfo;
     xc_vcpuinfo_t vcpuinfo;
-    xc_physinfo_t physinfo = { 0 };
     unsigned num_cpuwords;
 
     if (xc_domain_getinfolist(ctx->xch, domid, 1, &domaininfo) != 1) {
         LIBXL__LOG_ERRNO(ctx, LIBXL__LOG_ERROR, "getting infolist");
         return NULL;
     }
-    if (xc_physinfo(ctx->xch, &physinfo) == -1) {
-        LIBXL__LOG_ERRNO(ctx, LIBXL__LOG_ERROR, "getting physinfo");
-        return NULL;
-    }
-    *nrcpus = physinfo.max_cpu_id + 1;
+    *nrcpus = xc_get_max_cpus(ctx->xch);
     ret = ptr = calloc(domaininfo.max_vcpu_id + 1, sizeof (libxl_vcpuinfo));
     if (!ptr) {
         return NULL;
     }
 
-    num_cpuwords = ((physinfo.max_cpu_id + 64) / 64);
+    num_cpuwords = ((*nrcpus + 63) / 64);
     for (*nb_vcpu = 0; *nb_vcpu <= domaininfo.max_vcpu_id; ++*nb_vcpu, ++ptr) {
         ptr->cpumap = malloc(num_cpuwords * sizeof(*ptr->cpumap));
         if (!ptr->cpumap) {
diff -r bfb3c97ef507 -r ee0e2acc0d99 tools/libxl/libxl.h
--- a/tools/libxl/libxl.h       Thu Oct 21 18:14:50 2010 +0100
+++ b/tools/libxl/libxl.h       Thu Oct 21 18:32:56 2010 +0100
@@ -248,6 +248,9 @@ int libxl_domain_shutdown(libxl_ctx *ctx
 int libxl_domain_shutdown(libxl_ctx *ctx, uint32_t domid, int req);
 int libxl_domain_destroy(libxl_ctx *ctx, uint32_t domid, int force);
 int libxl_domain_preserve(libxl_ctx *ctx, uint32_t domid, 
libxl_domain_create_info *info, const char *name_suffix, libxl_uuid new_uuid);
+
+/* get max. number of cpus supported by hypervisor */
+int libxl_get_max_cpus(libxl_ctx *ctx);
 
 /*
  * Run the configured bootloader for a PV domain and update
diff -r bfb3c97ef507 -r ee0e2acc0d99 tools/libxl/libxl_utils.c
--- a/tools/libxl/libxl_utils.c Thu Oct 21 18:14:50 2010 +0100
+++ b/tools/libxl/libxl_utils.c Thu Oct 21 18:32:56 2010 +0100
@@ -675,3 +675,8 @@ out:
     libxl__free_all(&gc);
     return rc;
 }
+
+int libxl_get_max_cpus(libxl_ctx *ctx)
+{
+    return xc_get_max_cpus(ctx->xch);
+}
diff -r bfb3c97ef507 -r ee0e2acc0d99 tools/libxl/xl_cmdimpl.c
--- a/tools/libxl/xl_cmdimpl.c  Thu Oct 21 18:14:50 2010 +0100
+++ b/tools/libxl/xl_cmdimpl.c  Thu Oct 21 18:32:56 2010 +0100
@@ -3639,12 +3639,11 @@ static void vcpupin(char *d, const char 
 static void vcpupin(char *d, const char *vcpu, char *cpu)
 {
     libxl_vcpuinfo *vcpuinfo;
-    libxl_physinfo physinfo;
     uint64_t *cpumap = NULL;
 
     uint32_t vcpuid, cpuida, cpuidb;
     char *endptr, *toka, *tokb;
-    int i, nb_vcpu, cpusize;
+    int i, nb_vcpu, cpusize, cpumapsize;
 
     vcpuid = strtoul(vcpu, &endptr, 10);
     if (vcpu == endptr) {
@@ -3657,12 +3656,13 @@ static void vcpupin(char *d, const char 
 
     find_domain(d);
 
-    if (libxl_get_physinfo(&ctx, &physinfo) != 0) {
-        fprintf(stderr, "libxl_get_physinfo failed.\n");
+    if ((cpusize = libxl_get_max_cpus(&ctx)) == 0) {
+        fprintf(stderr, "libxl_get_max_cpus failed.\n");
         goto vcpupin_out1;
     }
-
-    cpumap = calloc(physinfo.max_cpu_id + 1, sizeof (uint64_t));
+    cpumapsize = (cpusize + sizeof (uint64_t) - 1) / sizeof (uint64_t);
+
+    cpumap = calloc(cpumapsize, sizeof (uint64_t));
     if (!cpumap) {
         goto vcpupin_out1;
     }
@@ -3690,24 +3690,24 @@ static void vcpupin(char *d, const char 
         }
     }
     else {
-        memset(cpumap, -1, sizeof (uint64_t) * (physinfo.max_cpu_id + 1));
+        memset(cpumap, -1, sizeof (uint64_t) * cpumapsize);
     }
 
     if (vcpuid != -1) {
         if (libxl_set_vcpuaffinity(&ctx, domid, vcpuid,
-                                   cpumap, physinfo.max_cpu_id + 1) == -1) {
+                                   cpumap, cpusize) == -1) {
             fprintf(stderr, "Could not set affinity for vcpu `%u'.\n", vcpuid);
         }
     }
     else {
-        if (!(vcpuinfo = libxl_list_vcpu(&ctx, domid, &nb_vcpu, &cpusize))) {
+        if (!(vcpuinfo = libxl_list_vcpu(&ctx, domid, &nb_vcpu, &i))) {
             fprintf(stderr, "libxl_list_vcpu failed.\n");
             goto vcpupin_out;
         }
         for (; nb_vcpu > 0; --nb_vcpu, ++vcpuinfo) {
             if (libxl_set_vcpuaffinity(&ctx, domid, vcpuinfo->vcpuid,
-                                       cpumap, physinfo.max_cpu_id + 1) == -1) 
{
-                fprintf(stderr, "libxl_list_vcpu failed on vcpu `%u'.\n", 
vcpuinfo->vcpuid);
+                                       cpumap, cpusize) == -1) {
+                fprintf(stderr, "libxl_set_vcpuaffinity failed on vcpu 
`%u'.\n", vcpuinfo->vcpuid);
             }
         }
     }
diff -r bfb3c97ef507 -r ee0e2acc0d99 tools/python/xen/lowlevel/xc/xc.c
--- a/tools/python/xen/lowlevel/xc/xc.c Thu Oct 21 18:14:50 2010 +0100
+++ b/tools/python/xen/lowlevel/xc/xc.c Thu Oct 21 18:32:56 2010 +0100
@@ -229,7 +229,6 @@ static PyObject *pyxc_vcpu_setaffinity(X
     uint64_t  *cpumap;
     PyObject *cpulist = NULL;
     int nr_cpus, size;
-    xc_physinfo_t info = {0}; 
     uint64_t cpumap_size = sizeof(*cpumap); 
 
     static char *kwd_list[] = { "domid", "vcpu", "cpumap", NULL };
@@ -238,10 +237,9 @@ static PyObject *pyxc_vcpu_setaffinity(X
                                       &dom, &vcpu, &cpulist) )
         return NULL;
 
-    if ( xc_physinfo(self->xc_handle, &info) != 0 )
-        return pyxc_error_to_exception(self->xc_handle);
-  
-    nr_cpus = info.nr_cpus;
+    nr_cpus = xc_get_max_cpus(self->xc_handle);
+    if ( nr_cpus == 0 )
+        return pyxc_error_to_exception(self->xc_handle);
 
     size = (nr_cpus + cpumap_size * 8 - 1)/ (cpumap_size * 8);
     cpumap = malloc(cpumap_size * size);
@@ -389,7 +387,6 @@ static PyObject *pyxc_vcpu_getinfo(XcObj
     int rc, i;
     uint64_t *cpumap;
     int nr_cpus, size;
-    xc_physinfo_t pinfo = { 0 };
     uint64_t cpumap_size = sizeof(*cpumap);
 
     static char *kwd_list[] = { "domid", "vcpu", NULL };
@@ -398,9 +395,9 @@ static PyObject *pyxc_vcpu_getinfo(XcObj
                                       &dom, &vcpu) )
         return NULL;
 
-    if ( xc_physinfo(self->xc_handle, &pinfo) != 0 ) 
-        return pyxc_error_to_exception(self->xc_handle);
-    nr_cpus = pinfo.nr_cpus;
+    nr_cpus = xc_get_max_cpus(self->xc_handle);
+    if ( nr_cpus == 0 )
+        return pyxc_error_to_exception(self->xc_handle);
 
     rc = xc_vcpu_getinfo(self->xc_handle, dom, vcpu, &info);
     if ( rc < 0 )
@@ -1906,22 +1903,23 @@ static PyObject *pyxc_dom_set_memshr(XcO
     return zero;
 }
 
-static PyObject *cpumap_to_cpulist(uint64_t cpumap)
+static PyObject *cpumap_to_cpulist(uint64_t *cpumap, int cpusize)
 {
     PyObject *cpulist = NULL;
-    uint32_t i;
+    int i;
 
     cpulist = PyList_New(0);
-    for ( i = 0; cpumap != 0; i++ )
+    for ( i = 0; i < cpusize; i++ )
     {
-        if ( cpumap & 1 )
+        if ( *cpumap & (1L << (i % 64)) )
         {
             PyObject* pyint = PyInt_FromLong(i);
 
             PyList_Append(cpulist, pyint);
             Py_DECREF(pyint);
         }
-        cpumap >>= 1;
+        if ( (i % 64) == 63 )
+            cpumap++;
     }
     return cpulist;
 }
@@ -1959,54 +1957,38 @@ static PyObject *pyxc_cpupool_destroy(Xc
     return zero;
 }
 
-static PyObject *pyxc_cpupool_getinfo(XcObject *self,
-                                      PyObject *args,
-                                      PyObject *kwds)
+static PyObject *pyxc_cpupool_getinfo(XcObject *self)
 {
     PyObject *list, *info_dict;
 
-    uint32_t first_pool = 0;
-    int max_pools = 1024, nr_pools, i;
+    uint32_t pool;
     xc_cpupoolinfo_t *info;
 
-    static char *kwd_list[] = { "first_pool", "max_pools", NULL };
-
-    if ( !PyArg_ParseTupleAndKeywords(args, kwds, "|ii", kwd_list,
-                                      &first_pool, &max_pools) )
-        return NULL;
-
-    info = calloc(max_pools, sizeof(xc_cpupoolinfo_t));
-    if (info == NULL)
-        return PyErr_NoMemory();
-
-    nr_pools = xc_cpupool_getinfo(self->xc_handle, first_pool, max_pools, 
info);
-
-    if (nr_pools < 0)
+    list = PyList_New(0);
+    for (pool = 0;;)
     {
-        free(info);
-        return pyxc_error_to_exception(self->xc_handle);
-    }
-
-    list = PyList_New(nr_pools);
-    for ( i = 0 ; i < nr_pools; i++ )
-    {
+        info = xc_cpupool_getinfo(self->xc_handle, pool);
+        if (info == NULL)
+            break;
         info_dict = Py_BuildValue(
             "{s:i,s:i,s:i,s:N}",
-            "cpupool",         (int)info[i].cpupool_id,
-            "sched",           info[i].sched_id,
-            "n_dom",           info[i].n_dom,
-            "cpulist",         cpumap_to_cpulist(info[i].cpumap));
+            "cpupool",         (int)info->cpupool_id,
+            "sched",           info->sched_id,
+            "n_dom",           info->n_dom,
+            "cpulist",         cpumap_to_cpulist(info->cpumap,
+                                                 info->cpumap_size));
+        pool = info->cpupool_id + 1;
+        free(info);
+
         if ( info_dict == NULL )
         {
             Py_DECREF(list);
-            if ( info_dict != NULL ) { Py_DECREF(info_dict); }
-            free(info);
             return NULL;
         }
-        PyList_SetItem(list, i, info_dict);
-    }
-
-    free(info);
+
+        PyList_Append(list, info_dict);
+        Py_DECREF(info_dict);
+    }
 
     return list;
 }
@@ -2072,12 +2054,19 @@ static PyObject *pyxc_cpupool_movedomain
 
 static PyObject *pyxc_cpupool_freeinfo(XcObject *self)
 {
-    uint64_t cpumap;
-
-    if (xc_cpupool_freeinfo(self->xc_handle, &cpumap) != 0)
-        return pyxc_error_to_exception(self->xc_handle);
-
-    return cpumap_to_cpulist(cpumap);
+    uint64_t *cpumap;
+    int mapsize;
+    PyObject *info = NULL;
+
+    cpumap = xc_cpupool_freeinfo(self->xc_handle, &mapsize);
+    if (!cpumap)
+        return pyxc_error_to_exception(self->xc_handle);
+
+    info = cpumap_to_cpulist(cpumap, mapsize * 8);
+
+    free(cpumap);
+
+    return info;
 }
 
 static PyObject *pyflask_context_to_sid(PyObject *self, PyObject *args,
@@ -2832,14 +2821,9 @@ static PyMethodDef pyxc_methods[] = {
 
     { "cpupool_getinfo",
       (PyCFunction)pyxc_cpupool_getinfo,
-      METH_VARARGS | METH_KEYWORDS, "\n"
+      METH_NOARGS, "\n"
       "Get information regarding a set of cpupools, in increasing id order.\n"
-      " first_pool [int, 0]:    First cpupool to retrieve info about.\n"
-      " max_pools  [int, 1024]: Maximum number of cpupools to retrieve info"
-      " about.\n\n"
-      "Returns: [list of dicts] if list length is less than 'max_pools'\n"
-      "         parameter then there was an error, or the end of the\n"
-      "         cpupool-id space was reached.\n"
+      "Returns: [list of dicts]\n"
       " pool     [int]: Identifier of cpupool to which this info pertains\n"
       " sched    [int]:  Scheduler used for this cpupool\n"
       " n_dom    [int]:  Number of Domains in this cpupool\n"

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.