[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] Re: [Xen-devel] [PATCH v3 5/7] libxl/libxc: Move libxl_get_cpu_topology()'s hypercall buffer management to libxc
On 09/02/15 20:04, Boris Ostrovsky wrote: > Signed-off-by: Boris Ostrovsky <boris.ostrovsky@xxxxxxxxxx> > --- > tools/libxc/include/xenctrl.h | 4 +- > tools/libxc/xc_misc.c | 23 +++++++++++----- > tools/libxl/libxl.c | 32 +++++------------------ > tools/misc/xenpm.c | 51 > ++++++++++++++++--------------------- > tools/python/xen/lowlevel/xc/xc.c | 23 ++++++----------- > 5 files changed, 55 insertions(+), 78 deletions(-) > > diff --git a/tools/libxc/include/xenctrl.h b/tools/libxc/include/xenctrl.h > index 0cb6743..d94571d 100644 > --- a/tools/libxc/include/xenctrl.h > +++ b/tools/libxc/include/xenctrl.h > @@ -1226,7 +1226,7 @@ int xc_readconsolering(xc_interface *xch, > int xc_send_debug_keys(xc_interface *xch, char *keys); > > typedef xen_sysctl_physinfo_t xc_physinfo_t; > -typedef xen_sysctl_cputopoinfo_t xc_cputopoinfo_t; > +typedef xen_sysctl_cputopo_t xc_cputopo_t; The sysctl structure is still named xen_sysctl_cputopoinfo_t. Where does xen_sysctl_cputopo_t come from? > typedef xen_sysctl_numainfo_t xc_numainfo_t; > > typedef uint32_t xc_cpu_to_node_t; > @@ -1237,7 +1237,7 @@ typedef uint64_t xc_node_to_memfree_t; > typedef uint32_t xc_node_to_node_dist_t; > > int xc_physinfo(xc_interface *xch, xc_physinfo_t *info); > -int xc_cputopoinfo(xc_interface *xch, xc_cputopoinfo_t *info); > +int xc_cputopoinfo(xc_interface *xch, int *max_cpus, xc_cputopo_t *cputopo); max_cpus is the length of the cputopo array, and is therefore an unsigned value. ~Andrew > int xc_numainfo(xc_interface *xch, xc_numainfo_t *info); > > int xc_sched_id(xc_interface *xch, > diff --git a/tools/libxc/xc_misc.c b/tools/libxc/xc_misc.c > index be68291..4c654f3 100644 > --- a/tools/libxc/xc_misc.c > +++ b/tools/libxc/xc_misc.c > @@ -177,22 +177,31 @@ int xc_physinfo(xc_interface *xch, > return 0; > } > > -int xc_cputopoinfo(xc_interface *xch, > - xc_cputopoinfo_t *put_info) > +int xc_cputopoinfo(xc_interface *xch, int *max_cpus, > + xc_cputopo_t *cputopo) > { > int ret; > DECLARE_SYSCTL; > + DECLARE_HYPERCALL_BOUNCE(cputopo, *max_cpus * sizeof(*cputopo), > + XC_HYPERCALL_BUFFER_BOUNCE_OUT); > > - sysctl.cmd = XEN_SYSCTL_cputopoinfo; > + if ((ret = xc_hypercall_bounce_pre(xch, cputopo))) > + goto out; > > - memcpy(&sysctl.u.cputopoinfo, put_info, sizeof(*put_info)); > + sysctl.u.cputopoinfo.max_cpu_index = *max_cpus; > + set_xen_guest_handle(sysctl.u.cputopoinfo.cputopo, cputopo); > + > + sysctl.cmd = XEN_SYSCTL_cputopoinfo; > > if ( (ret = do_sysctl(xch, &sysctl)) != 0 ) > - return ret; > + goto out; > > - memcpy(put_info, &sysctl.u.cputopoinfo, sizeof(*put_info)); > + *max_cpus = sysctl.u.cputopoinfo.max_cpu_index + 1; > > - return 0; > +out: > + xc_hypercall_bounce_post(xch, cputopo); > + > + return ret; > } > > int xc_numainfo(xc_interface *xch, > diff --git a/tools/libxl/libxl.c b/tools/libxl/libxl.c > index b05c0bb..f8d64c2 100644 > --- a/tools/libxl/libxl.c > +++ b/tools/libxl/libxl.c > @@ -5072,38 +5072,23 @@ int libxl_get_physinfo(libxl_ctx *ctx, libxl_physinfo > *physinfo) > libxl_cputopology *libxl_get_cpu_topology(libxl_ctx *ctx, int *nb_cpu_out) > { > GC_INIT(ctx); > - xc_cputopoinfo_t tinfo; > - DECLARE_HYPERCALL_BUFFER(xen_sysctl_cputopo_t, cputopo); > + xc_cputopo_t *cputopo; > libxl_cputopology *ret = NULL; > - int i; > - int max_cpus; > + int i, max_cpus; > > max_cpus = libxl_get_max_cpus(ctx); > - if (max_cpus < 0) > - { > + if (max_cpus < 0) { > LIBXL__LOG(ctx, XTL_ERROR, "Unable to determine number of CPUS"); > - ret = NULL; > goto out; > } > > - cputopo = xc_hypercall_buffer_alloc(ctx->xch, cputopo, > - sizeof(*cputopo) * max_cpus); > - if (cputopo == NULL) { > - LIBXL__LOG_ERRNOVAL(ctx, XTL_ERROR, ENOMEM, > - "Unable to allocate hypercall arguments"); > - goto fail; > - } > + cputopo = libxl__zalloc(gc, sizeof(*cputopo) * max_cpus); > > - set_xen_guest_handle(tinfo.cputopo, cputopo); > - tinfo.max_cpu_index = max_cpus - 1; > - if (xc_cputopoinfo(ctx->xch, &tinfo) != 0) { > + if (xc_cputopoinfo(ctx->xch, &max_cpus, cputopo) != 0) { > LIBXL__LOG_ERRNO(ctx, XTL_ERROR, "CPU topology info hypercall > failed"); > - goto fail; > + goto out; > } > > - if (tinfo.max_cpu_index < max_cpus - 1) > - max_cpus = tinfo.max_cpu_index + 1; > - > ret = libxl__zalloc(NOGC, sizeof(libxl_cputopology) * max_cpus); > > for (i = 0; i < max_cpus; i++) { > @@ -5115,11 +5100,8 @@ libxl_cputopology *libxl_get_cpu_topology(libxl_ctx > *ctx, int *nb_cpu_out) > #undef V > } > > -fail: > - xc_hypercall_buffer_free(ctx->xch, cputopo); > + *nb_cpu_out = max_cpus; > > - if (ret) > - *nb_cpu_out = max_cpus; > out: > GC_FREE; > return ret; > diff --git a/tools/misc/xenpm.c b/tools/misc/xenpm.c > index f7fe620..1d1eb40 100644 > --- a/tools/misc/xenpm.c > +++ b/tools/misc/xenpm.c > @@ -355,12 +355,11 @@ static void signal_int_handler(int signo) > int i, j, k; > struct timeval tv; > int cx_cap = 0, px_cap = 0; > - DECLARE_HYPERCALL_BUFFER(xen_sysctl_cputopo_t, cputopo); > - xc_cputopoinfo_t info = { 0 }; > - > - cputopo = xc_hypercall_buffer_alloc(xc_handle, cputopo, > - sizeof(*cputopo) * MAX_NR_CPU); > + xc_cputopo_t *cputopo; > + int max_cpus; > > + max_cpus = MAX_NR_CPU; > + cputopo = calloc(max_cpus, sizeof(*cputopo)); > if ( cputopo == NULL ) > { > fprintf(stderr, "failed to allocate hypercall buffers\n"); > @@ -445,29 +444,26 @@ static void signal_int_handler(int signo) > printf(" Avg freq\t%d\tKHz\n", avgfreq[i]); > } > > - set_xen_guest_handle(info.cputopo, cputopo); > - info.max_cpu_index = MAX_NR_CPU - 1; > - > - if ( cx_cap && !xc_cputopoinfo(xc_handle, &info) ) > + if ( cx_cap && !xc_cputopoinfo(xc_handle, &max_cpus, cputopo) ) > { > uint32_t socket_ids[MAX_NR_CPU]; > uint32_t core_ids[MAX_NR_CPU]; > uint32_t socket_nr = 0; > uint32_t core_nr = 0; > > - if ( info.max_cpu_index > MAX_NR_CPU - 1 ) > - info.max_cpu_index = MAX_NR_CPU - 1; > + if ( max_cpus > MAX_NR_CPU ) > + max_cpus = MAX_NR_CPU; > /* check validity */ > - for ( i = 0; i <= info.max_cpu_index; i++ ) > + for ( i = 0; i < max_cpus; i++ ) > { > if ( cputopo[i].core == INVALID_CORE_ID || > cputopo[i].socket == INVALID_SOCKET_ID ) > break; > } > - if ( i > info.max_cpu_index ) > + if ( i >= max_cpus ) > { > /* find socket nr & core nr per socket */ > - for ( i = 0; i <= info.max_cpu_index; i++ ) > + for ( i = 0; i < max_cpus; i++ ) > { > for ( j = 0; j < socket_nr; j++ ) > if ( cputopo[i].socket == socket_ids[j] ) > @@ -494,7 +490,7 @@ static void signal_int_handler(int signo) > unsigned int n; > uint64_t res; > > - for ( j = 0; j <= info.max_cpu_index; j++ ) > + for ( j = 0; j < max_cpus; j++ ) > { > if ( cputopo[j].socket == socket_ids[i] ) > break; > @@ -513,7 +509,7 @@ static void signal_int_handler(int signo) > } > for ( k = 0; k < core_nr; k++ ) > { > - for ( j = 0; j <= info.max_cpu_index; j++ ) > + for ( j = 0; j < max_cpus; j++ ) > { > if ( cputopo[j].socket == socket_ids[i] && > cputopo[j].core == core_ids[k] ) > @@ -551,7 +547,7 @@ static void signal_int_handler(int signo) > free(sum); > free(avgfreq); > out: > - xc_hypercall_buffer_free(xc_handle, cputopo); > + free(cputopo); > xc_interface_close(xc_handle); > exit(0); > } > @@ -958,22 +954,19 @@ void scaling_governor_func(int argc, char *argv[]) > > void cpu_topology_func(int argc, char *argv[]) > { > - DECLARE_HYPERCALL_BUFFER(xen_sysctl_cputopo_t, cputopo); > - xc_cputopoinfo_t info = { 0 }; > + xc_cputopo_t *cputopo; > + int max_cpus; > int i, rc = ENOMEM; > > - cputopo = xc_hypercall_buffer_alloc(xc_handle, cputopo, > - sizeof(*cputopo) * MAX_NR_CPU); > + max_cpus = MAX_NR_CPU; > + cputopo = calloc(max_cpus, sizeof(*cputopo)); > if ( cputopo == NULL ) > { > fprintf(stderr, "failed to allocate hypercall buffers\n"); > goto out; > } > > - set_xen_guest_handle(info.cputopo, cputopo); > - info.max_cpu_index = MAX_NR_CPU-1; > - > - if ( xc_cputopoinfo(xc_handle, &info) ) > + if ( xc_cputopoinfo(xc_handle, &max_cpus, cputopo) ) > { > rc = errno; > fprintf(stderr, "Cannot get Xen CPU topology (%d - %s)\n", > @@ -981,11 +974,11 @@ void cpu_topology_func(int argc, char *argv[]) > goto out; > } > > - if ( info.max_cpu_index > (MAX_NR_CPU-1) ) > - info.max_cpu_index = MAX_NR_CPU-1; > + if ( max_cpus > MAX_NR_CPU ) > + max_cpus = MAX_NR_CPU; > > printf("CPU\tcore\tsocket\tnode\n"); > - for ( i = 0; i <= info.max_cpu_index; i++ ) > + for ( i = 0; i < max_cpus; i++ ) > { > if ( cputopo[i].core == INVALID_CORE_ID ) > continue; > @@ -994,7 +987,7 @@ void cpu_topology_func(int argc, char *argv[]) > } > rc = 0; > out: > - xc_hypercall_buffer_free(xc_handle, cputopo); > + free(cputopo); > if ( rc ) > exit(rc); > } > diff --git a/tools/python/xen/lowlevel/xc/xc.c > b/tools/python/xen/lowlevel/xc/xc.c > index 4275968..6e49dc5 100644 > --- a/tools/python/xen/lowlevel/xc/xc.c > +++ b/tools/python/xen/lowlevel/xc/xc.c > @@ -1221,31 +1221,24 @@ static PyObject *pyxc_getcpuinfo(XcObject *self, > PyObject *args, PyObject *kwds) > static PyObject *pyxc_topologyinfo(XcObject *self) > { > #define MAX_CPU_INDEX 255 > - xc_cputopoinfo_t tinfo = { 0 }; > - int i, max_cpu_index; > + xc_cputopo_t *cputopo; > + int i, max_cpus; > PyObject *ret_obj = NULL; > PyObject *cpu_to_core_obj, *cpu_to_socket_obj, *cpu_to_node_obj; > > - DECLARE_HYPERCALL_BUFFER(xen_sysctl_cputopo_t, cputopo); > - > - cputopo = xc_hypercall_buffer_alloc(self->xc_handle, cputopo, > sizeof(*cputopo) * (MAX_CPU_INDEX+1)); > + max_cpus = MAX_CPU_INDEX + 1; > + cputopo = calloc(max_cpus, sizeof(*cputopo)); > if ( cputopo == NULL ) > goto out; > - set_xen_guest_handle(tinfo.cputopo, cputopo); > - tinfo.max_cpu_index = MAX_CPU_INDEX; > > - if ( xc_cputopoinfo(self->xc_handle, &tinfo) != 0 ) > + if ( xc_cputopoinfo(self->xc_handle, &max_cpus, cputopo) != 0 ) > goto out; > > - max_cpu_index = tinfo.max_cpu_index; > - if ( max_cpu_index > MAX_CPU_INDEX ) > - max_cpu_index = MAX_CPU_INDEX; > - > /* Construct cpu-to-* lists. */ > cpu_to_core_obj = PyList_New(0); > cpu_to_socket_obj = PyList_New(0); > cpu_to_node_obj = PyList_New(0); > - for ( i = 0; i <= max_cpu_index; i++ ) > + for ( i = 0; i < max_cpus; i++ ) > { > if ( cputopo[i].core == INVALID_CORE_ID ) > { > @@ -1281,7 +1274,7 @@ static PyObject *pyxc_topologyinfo(XcObject *self) > } > } > > - ret_obj = Py_BuildValue("{s:i}", "max_cpu_index", max_cpu_index); > + ret_obj = Py_BuildValue("{s:i}", "max_cpu_index", max_cpus - 1); > > PyDict_SetItemString(ret_obj, "cpu_to_core", cpu_to_core_obj); > Py_DECREF(cpu_to_core_obj); > @@ -1293,7 +1286,7 @@ static PyObject *pyxc_topologyinfo(XcObject *self) > Py_DECREF(cpu_to_node_obj); > > out: > - xc_hypercall_buffer_free(self->xc_handle, cputopo); > + free(cputopo); > return ret_obj ? ret_obj : pyxc_error_to_exception(self->xc_handle); > #undef MAX_CPU_INDEX > } _______________________________________________ Xen-devel mailing list Xen-devel@xxxxxxxxxxxxx http://lists.xen.org/xen-devel
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |