# HG changeset patch # User Juergen Gross # Date 1286536603 -7200 # Node ID fc17462ba612d67069c1110f8cca3b1adb8c233b # Parent a33886146b45da46a5161a7ebed4d2f607642aee Support arbitrary numbers of physical cpus for cpupools in tools To be able to support arbitrary numbers of physical cpus it was necessary to include the size of cpumaps in the xc-interfaces for cpu pools. These were: definition of xc_cpupoolinfo_t xc_cpupool_getinfo() xc_cpupool_freeinfo() xc_cpupool_getinfo() and xc_cpupool_freeinfo() are changed to allocate the needed buffer and return it. Signed-off-by: juergen.gross@xxxxxxxxxxxxxx diff -r a33886146b45 -r fc17462ba612 tools/libxc/xc_cpupool.c --- a/tools/libxc/xc_cpupool.c Fri Oct 08 11:41:57 2010 +0100 +++ b/tools/libxc/xc_cpupool.c Fri Oct 08 13:16:43 2010 +0200 @@ -34,6 +34,11 @@ return ret; } +static int get_cpumap_size(xc_interface *xch) +{ + return (xc_get_max_cpus(xch) + 7) / 8; +} + int xc_cpupool_create(xc_interface *xch, uint32_t *ppoolid, uint32_t sched_id) @@ -64,50 +69,61 @@ return do_sysctl_save(xch, &sysctl); } -int xc_cpupool_getinfo(xc_interface *xch, - uint32_t first_poolid, - uint32_t n_max, - xc_cpupoolinfo_t *info) +xc_cpupoolinfo_t *xc_cpupool_getinfo(xc_interface *xch, + uint32_t poolid) { int err = 0; - int p; - uint32_t poolid = first_poolid; - uint8_t local[sizeof (info->cpumap)]; + xc_cpupoolinfo_t *info; + uint8_t *local; + int local_size; + int cpumap_size; + int size; DECLARE_SYSCTL; - memset(info, 0, n_max * sizeof(xc_cpupoolinfo_t)); + local_size = get_cpumap_size(xch); + if (!local_size) + { + PERROR("Could not get number of cpus"); + return NULL; + } + local = alloca(local_size); + cpumap_size = (local_size + sizeof(*info->cpumap) - 1) / sizeof(*info->cpumap); + size = sizeof(xc_cpupoolinfo_t) + cpumap_size * sizeof(*info->cpumap); + info = malloc(size); + if ( !info ) + return NULL; - for (p = 0; p < n_max; p++) + memset(info, 0, size); + info->cpumap_size = local_size * 8; + info->cpumap = (uint64_t *)(info + 1); + + sysctl.cmd = XEN_SYSCTL_cpupool_op; + sysctl.u.cpupool_op.op = XEN_SYSCTL_CPUPOOL_OP_INFO; + sysctl.u.cpupool_op.cpupool_id = poolid; + set_xen_guest_handle(sysctl.u.cpupool_op.cpumap.bitmap, local); + sysctl.u.cpupool_op.cpumap.nr_cpus = local_size * 8; + + if ( (err = lock_pages(local, local_size)) != 0 ) { - sysctl.cmd = XEN_SYSCTL_cpupool_op; - sysctl.u.cpupool_op.op = XEN_SYSCTL_CPUPOOL_OP_INFO; - sysctl.u.cpupool_op.cpupool_id = poolid; - set_xen_guest_handle(sysctl.u.cpupool_op.cpumap.bitmap, local); - sysctl.u.cpupool_op.cpumap.nr_cpus = sizeof(info->cpumap) * 8; + PERROR("Could not lock memory for Xen hypercall"); + free(info); + return NULL; + } + err = do_sysctl_save(xch, &sysctl); + unlock_pages(local, local_size); - if ( (err = lock_pages(local, sizeof(local))) != 0 ) - { - PERROR("Could not lock memory for Xen hypercall"); - break; - } - err = do_sysctl_save(xch, &sysctl); - unlock_pages(local, sizeof (local)); - - if ( err < 0 ) - break; - - info->cpupool_id = sysctl.u.cpupool_op.cpupool_id; - info->sched_id = sysctl.u.cpupool_op.sched_id; - info->n_dom = sysctl.u.cpupool_op.n_dom; - bitmap_byte_to_64(&(info->cpumap), local, sizeof(local) * 8); - poolid = sysctl.u.cpupool_op.cpupool_id + 1; - info++; + if ( err < 0 ) + { + free(info); + return NULL; } - if ( p == 0 ) - return err; + info->cpupool_id = sysctl.u.cpupool_op.cpupool_id; + info->sched_id = sysctl.u.cpupool_op.sched_id; + info->n_dom = sysctl.u.cpupool_op.n_dom; + bitmap_byte_to_64(info->cpumap, local, local_size * 8); - return p; + return info; } int xc_cpupool_addcpu(xc_interface *xch, @@ -149,31 +165,41 @@ return do_sysctl_save(xch, &sysctl); } -int xc_cpupool_freeinfo(xc_interface *xch, - uint64_t *cpumap) +uint64_t * xc_cpupool_freeinfo(xc_interface *xch, + int *cpusize) { int err; - uint8_t local[sizeof (*cpumap)]; + uint8_t *local; + uint64_t *cpumap; DECLARE_SYSCTL; + + *cpusize = get_cpumap_size(xch); + if (*cpusize == 0) + return NULL; + local = alloca(*cpusize); + cpumap = calloc((*cpusize + sizeof(*cpumap) - 1) / sizeof(*cpumap), sizeof(*cpumap)); + if (cpumap == NULL) + return NULL; sysctl.cmd = XEN_SYSCTL_cpupool_op; sysctl.u.cpupool_op.op = XEN_SYSCTL_CPUPOOL_OP_FREEINFO; set_xen_guest_handle(sysctl.u.cpupool_op.cpumap.bitmap, local); - sysctl.u.cpupool_op.cpumap.nr_cpus = sizeof(*cpumap) * 8; + sysctl.u.cpupool_op.cpumap.nr_cpus = *cpusize * 8; - if ( (err = lock_pages(local, sizeof(local))) != 0 ) + if ( (err = lock_pages(local, *cpusize)) != 0 ) { PERROR("Could not lock memory for Xen hypercall"); - return err; + free(cpumap); + return NULL; } err = do_sysctl_save(xch, &sysctl); - unlock_pages(local, sizeof (local)); + unlock_pages(local, *cpusize); + bitmap_byte_to_64(cpumap, local, *cpusize * 8); - if (err < 0) - return err; + if (err >= 0) + return cpumap; - bitmap_byte_to_64(cpumap, local, sizeof(local) * 8); - - return 0; + free(cpumap); + return NULL; } diff -r a33886146b45 -r fc17462ba612 tools/libxc/xc_misc.c --- a/tools/libxc/xc_misc.c Fri Oct 08 11:41:57 2010 +0100 +++ b/tools/libxc/xc_misc.c Fri Oct 08 13:16:43 2010 +0200 @@ -20,6 +20,20 @@ #include "xc_private.h" #include + +int xc_get_max_cpus(xc_interface *xch) +{ + static int max_cpus = 0; + xc_physinfo_t physinfo; + + if ( max_cpus ) + return max_cpus; + + if ( !xc_physinfo(xch, &physinfo) ) + max_cpus = physinfo.max_cpu_id + 1; + + return max_cpus; +} int xc_readconsolering(xc_interface *xch, char **pbuffer, diff -r a33886146b45 -r fc17462ba612 tools/libxc/xenctrl.h --- a/tools/libxc/xenctrl.h Fri Oct 08 11:41:57 2010 +0100 +++ b/tools/libxc/xenctrl.h Fri Oct 08 13:16:43 2010 +0200 @@ -215,6 +215,9 @@ start_info_t s; } start_info_any_t; + +/* return maximum number of cpus the hypervisor supports */ +int xc_get_max_cpus(xc_interface *xch); int xc_domain_create(xc_interface *xch, uint32_t ssidref, @@ -535,7 +538,8 @@ uint32_t cpupool_id; uint32_t sched_id; uint32_t n_dom; - uint64_t cpumap; + uint32_t cpumap_size; /* max number of cpus in map */ + uint64_t *cpumap; } xc_cpupoolinfo_t; /** @@ -564,15 +568,11 @@ * Get cpupool info. Returns info for up to the specified number of cpupools * starting at the given id. * @parm xc_handle a handle to an open hypervisor interface - * @parm first_poolid lowest id for which info is returned - * @parm n_max maximum number of cpupools to return info - * @parm info pointer to xc_cpupoolinfo_t array - * return number of cpupool infos + * @parm poolid lowest id for which info is returned + * return cpupool info ptr (obtained by malloc) */ -int xc_cpupool_getinfo(xc_interface *xch, - uint32_t first_poolid, - uint32_t n_max, - xc_cpupoolinfo_t *info); +xc_cpupoolinfo_t *xc_cpupool_getinfo(xc_interface *xch, + uint32_t poolid); /** * Add cpu to a cpupool. cpu may be -1 indicating the first unassigned. @@ -614,11 +614,11 @@ * Return map of cpus not in any cpupool. * * @parm xc_handle a handle to an open hypervisor interface - * @parm cpumap pointer where to store the cpumap - * return 0 on success, -1 on failure + * @parm cpusize where to store array size in bytes + * return cpumap array on success, NULL else */ -int xc_cpupool_freeinfo(xc_interface *xch, - uint64_t *cpumap); +uint64_t *xc_cpupool_freeinfo(xc_interface *xch, + int *cpusize); /* diff -r a33886146b45 -r fc17462ba612 tools/libxl/libxl.c --- a/tools/libxl/libxl.c Fri Oct 08 11:41:57 2010 +0100 +++ b/tools/libxl/libxl.c Fri Oct 08 13:16:43 2010 +0200 @@ -609,27 +609,31 @@ libxl_poolinfo * libxl_list_pool(libxl_ctx *ctx, int *nb_pool) { - libxl_poolinfo *ptr; - int i, ret; - xc_cpupoolinfo_t info[256]; - int size = 256; + libxl_poolinfo *ptr, *tmp; + int i; + xc_cpupoolinfo_t *info; + uint32_t poolid; - ptr = calloc(size, sizeof(libxl_poolinfo)); - if (!ptr) { - LIBXL__LOG_ERRNO(ctx, LIBXL__LOG_ERROR, "allocating cpupool info"); - return NULL; + ptr = NULL; + + poolid = 0; + for (i = 0;; i++) { + info = xc_cpupool_getinfo(ctx->xch, poolid); + if (info == NULL) + break; + tmp = realloc(ptr, (i + 1) * sizeof(libxl_poolinfo)); + if (!tmp) { + LIBXL__LOG_ERRNO(ctx, LIBXL__LOG_ERROR, "allocating cpupool info"); + free(ptr); + return NULL; + } + ptr = tmp; + ptr[i].poolid = info->cpupool_id; + poolid = info->cpupool_id + 1; + free(info); } - ret = xc_cpupool_getinfo(ctx->xch, 0, 256, info); - if (ret<0) { - LIBXL__LOG_ERRNO(ctx, LIBXL__LOG_ERROR, "getting cpupool info"); - return NULL; - } - - for (i = 0; i < ret; i++) { - ptr[i].poolid = info[i].cpupool_id; - } - *nb_pool = ret; + *nb_pool = i; return ptr; } @@ -3203,24 +3207,19 @@ libxl_vcpuinfo *ptr, *ret; xc_domaininfo_t domaininfo; xc_vcpuinfo_t vcpuinfo; - xc_physinfo_t physinfo = { 0 }; unsigned num_cpuwords; if (xc_domain_getinfolist(ctx->xch, domid, 1, &domaininfo) != 1) { LIBXL__LOG_ERRNO(ctx, LIBXL__LOG_ERROR, "getting infolist"); return NULL; } - if (xc_physinfo(ctx->xch, &physinfo) == -1) { - LIBXL__LOG_ERRNO(ctx, LIBXL__LOG_ERROR, "getting physinfo"); - return NULL; - } - *nrcpus = physinfo.max_cpu_id + 1; + *nrcpus = xc_get_max_cpus(ctx->xch); ret = ptr = calloc(domaininfo.max_vcpu_id + 1, sizeof (libxl_vcpuinfo)); if (!ptr) { return NULL; } - num_cpuwords = ((physinfo.max_cpu_id + 64) / 64); + num_cpuwords = ((*nrcpus + 63) / 64); for (*nb_vcpu = 0; *nb_vcpu <= domaininfo.max_vcpu_id; ++*nb_vcpu, ++ptr) { ptr->cpumap = malloc(num_cpuwords * sizeof(*ptr->cpumap)); if (!ptr->cpumap) { diff -r a33886146b45 -r fc17462ba612 tools/libxl/libxl.h --- a/tools/libxl/libxl.h Fri Oct 08 11:41:57 2010 +0100 +++ b/tools/libxl/libxl.h Fri Oct 08 13:16:43 2010 +0200 @@ -249,6 +249,9 @@ int libxl_domain_destroy(libxl_ctx *ctx, uint32_t domid, int force); int libxl_domain_preserve(libxl_ctx *ctx, uint32_t domid, libxl_domain_create_info *info, const char *name_suffix, libxl_uuid new_uuid); +/* get max. number of cpus supported by hypervisor */ +int libxl_get_max_cpus(libxl_ctx *ctx); + /* * Run the configured bootloader for a PV domain and update * info->kernel, info->u.pv.ramdisk and info->u.pv.cmdline as diff -r a33886146b45 -r fc17462ba612 tools/libxl/libxl_utils.c --- a/tools/libxl/libxl_utils.c Fri Oct 08 11:41:57 2010 +0100 +++ b/tools/libxl/libxl_utils.c Fri Oct 08 13:16:43 2010 +0200 @@ -675,3 +675,8 @@ libxl__free_all(&gc); return rc; } + +int libxl_get_max_cpus(libxl_ctx *ctx) +{ + return xc_get_max_cpus(ctx->xch); +} diff -r a33886146b45 -r fc17462ba612 tools/libxl/xl_cmdimpl.c --- a/tools/libxl/xl_cmdimpl.c Fri Oct 08 11:41:57 2010 +0100 +++ b/tools/libxl/xl_cmdimpl.c Fri Oct 08 13:16:43 2010 +0200 @@ -3620,12 +3620,11 @@ static void vcpupin(char *d, const char *vcpu, char *cpu) { libxl_vcpuinfo *vcpuinfo; - libxl_physinfo physinfo; uint64_t *cpumap = NULL; uint32_t vcpuid, cpuida, cpuidb; char *endptr, *toka, *tokb; - int i, nb_vcpu, cpusize; + int i, nb_vcpu, cpusize, cpumapsize; vcpuid = strtoul(vcpu, &endptr, 10); if (vcpu == endptr) { @@ -3638,12 +3637,13 @@ find_domain(d); - if (libxl_get_physinfo(&ctx, &physinfo) != 0) { - fprintf(stderr, "libxl_get_physinfo failed.\n"); + if ((cpusize = libxl_get_max_cpus(&ctx)) == 0) { + fprintf(stderr, "libxl_get_max_cpus failed.\n"); goto vcpupin_out1; } - - cpumap = calloc(physinfo.max_cpu_id + 1, sizeof (uint64_t)); + cpumapsize = (cpusize + sizeof (uint64_t) - 1) / sizeof (uint64_t); + + cpumap = calloc(cpumapsize, sizeof (uint64_t)); if (!cpumap) { goto vcpupin_out1; } @@ -3671,24 +3671,24 @@ } } else { - memset(cpumap, -1, sizeof (uint64_t) * (physinfo.max_cpu_id + 1)); + memset(cpumap, -1, sizeof (uint64_t) * cpumapsize); } if (vcpuid != -1) { if (libxl_set_vcpuaffinity(&ctx, domid, vcpuid, - cpumap, physinfo.max_cpu_id + 1) == -1) { + cpumap, cpusize) == -1) { fprintf(stderr, "Could not set affinity for vcpu `%u'.\n", vcpuid); } } else { - if (!(vcpuinfo = libxl_list_vcpu(&ctx, domid, &nb_vcpu, &cpusize))) { + if (!(vcpuinfo = libxl_list_vcpu(&ctx, domid, &nb_vcpu, &i))) { fprintf(stderr, "libxl_list_vcpu failed.\n"); goto vcpupin_out; } for (; nb_vcpu > 0; --nb_vcpu, ++vcpuinfo) { if (libxl_set_vcpuaffinity(&ctx, domid, vcpuinfo->vcpuid, - cpumap, physinfo.max_cpu_id + 1) == -1) { - fprintf(stderr, "libxl_list_vcpu failed on vcpu `%u'.\n", vcpuinfo->vcpuid); + cpumap, cpusize) == -1) { + fprintf(stderr, "libxl_set_vcpuaffinity failed on vcpu `%u'.\n", vcpuinfo->vcpuid); } } } diff -r a33886146b45 -r fc17462ba612 tools/python/xen/lowlevel/xc/xc.c --- a/tools/python/xen/lowlevel/xc/xc.c Fri Oct 08 11:41:57 2010 +0100 +++ b/tools/python/xen/lowlevel/xc/xc.c Fri Oct 08 13:16:43 2010 +0200 @@ -229,7 +229,6 @@ uint64_t *cpumap; PyObject *cpulist = NULL; int nr_cpus, size; - xc_physinfo_t info = {0}; uint64_t cpumap_size = sizeof(*cpumap); static char *kwd_list[] = { "domid", "vcpu", "cpumap", NULL }; @@ -238,10 +237,9 @@ &dom, &vcpu, &cpulist) ) return NULL; - if ( xc_physinfo(self->xc_handle, &info) != 0 ) + nr_cpus = xc_get_max_cpus(self->xc_handle); + if ( nr_cpus == 0 ) return pyxc_error_to_exception(self->xc_handle); - - nr_cpus = info.nr_cpus; size = (nr_cpus + cpumap_size * 8 - 1)/ (cpumap_size * 8); cpumap = malloc(cpumap_size * size); @@ -389,7 +387,6 @@ int rc, i; uint64_t *cpumap; int nr_cpus, size; - xc_physinfo_t pinfo = { 0 }; uint64_t cpumap_size = sizeof(*cpumap); static char *kwd_list[] = { "domid", "vcpu", NULL }; @@ -398,9 +395,9 @@ &dom, &vcpu) ) return NULL; - if ( xc_physinfo(self->xc_handle, &pinfo) != 0 ) + nr_cpus = xc_get_max_cpus(self->xc_handle); + if ( nr_cpus == 0 ) return pyxc_error_to_exception(self->xc_handle); - nr_cpus = pinfo.nr_cpus; rc = xc_vcpu_getinfo(self->xc_handle, dom, vcpu, &info); if ( rc < 0 ) @@ -1906,22 +1903,23 @@ return zero; } -static PyObject *cpumap_to_cpulist(uint64_t cpumap) +static PyObject *cpumap_to_cpulist(uint64_t *cpumap, int cpusize) { PyObject *cpulist = NULL; - uint32_t i; + int i; cpulist = PyList_New(0); - for ( i = 0; cpumap != 0; i++ ) + for ( i = 0; i < cpusize; i++ ) { - if ( cpumap & 1 ) + if ( *cpumap & (1L << (i % 64)) ) { PyObject* pyint = PyInt_FromLong(i); PyList_Append(cpulist, pyint); Py_DECREF(pyint); } - cpumap >>= 1; + if ( (i % 64) == 63 ) + cpumap++; } return cpulist; } @@ -1959,54 +1957,38 @@ return zero; } -static PyObject *pyxc_cpupool_getinfo(XcObject *self, - PyObject *args, - PyObject *kwds) +static PyObject *pyxc_cpupool_getinfo(XcObject *self) { PyObject *list, *info_dict; - uint32_t first_pool = 0; - int max_pools = 1024, nr_pools, i; + uint32_t pool; xc_cpupoolinfo_t *info; - static char *kwd_list[] = { "first_pool", "max_pools", NULL }; - - if ( !PyArg_ParseTupleAndKeywords(args, kwds, "|ii", kwd_list, - &first_pool, &max_pools) ) - return NULL; - - info = calloc(max_pools, sizeof(xc_cpupoolinfo_t)); - if (info == NULL) - return PyErr_NoMemory(); - - nr_pools = xc_cpupool_getinfo(self->xc_handle, first_pool, max_pools, info); - - if (nr_pools < 0) + list = PyList_New(0); + for (pool = 0;;) { - free(info); - return pyxc_error_to_exception(self->xc_handle); - } - - list = PyList_New(nr_pools); - for ( i = 0 ; i < nr_pools; i++ ) - { + info = xc_cpupool_getinfo(self->xc_handle, pool); + if (info == NULL) + break; info_dict = Py_BuildValue( "{s:i,s:i,s:i,s:N}", - "cpupool", (int)info[i].cpupool_id, - "sched", info[i].sched_id, - "n_dom", info[i].n_dom, - "cpulist", cpumap_to_cpulist(info[i].cpumap)); + "cpupool", (int)info->cpupool_id, + "sched", info->sched_id, + "n_dom", info->n_dom, + "cpulist", cpumap_to_cpulist(info->cpumap, + info->cpumap_size)); + pool = info->cpupool_id + 1; + free(info); + if ( info_dict == NULL ) { Py_DECREF(list); - if ( info_dict != NULL ) { Py_DECREF(info_dict); } - free(info); return NULL; } - PyList_SetItem(list, i, info_dict); + + PyList_Append(list, info_dict); + Py_DECREF(info_dict); } - - free(info); return list; } @@ -2072,12 +2054,19 @@ static PyObject *pyxc_cpupool_freeinfo(XcObject *self) { - uint64_t cpumap; + uint64_t *cpumap; + int mapsize; + PyObject *info = NULL; - if (xc_cpupool_freeinfo(self->xc_handle, &cpumap) != 0) + cpumap = xc_cpupool_freeinfo(self->xc_handle, &mapsize); + if (!cpumap) return pyxc_error_to_exception(self->xc_handle); - return cpumap_to_cpulist(cpumap); + info = cpumap_to_cpulist(cpumap, mapsize * 8); + + free(cpumap); + + return info; } static PyObject *pyflask_context_to_sid(PyObject *self, PyObject *args, @@ -2832,14 +2821,9 @@ { "cpupool_getinfo", (PyCFunction)pyxc_cpupool_getinfo, - METH_VARARGS | METH_KEYWORDS, "\n" + METH_NOARGS, "\n" "Get information regarding a set of cpupools, in increasing id order.\n" - " first_pool [int, 0]: First cpupool to retrieve info about.\n" - " max_pools [int, 1024]: Maximum number of cpupools to retrieve info" - " about.\n\n" - "Returns: [list of dicts] if list length is less than 'max_pools'\n" - " parameter then there was an error, or the end of the\n" - " cpupool-id space was reached.\n" + "Returns: [list of dicts]\n" " pool [int]: Identifier of cpupool to which this info pertains\n" " sched [int]: Scheduler used for this cpupool\n" " n_dom [int]: Number of Domains in this cpupool\n"