[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [Patch 2/6] Cpupools: libxc part
-- Juergen Gross Principal Developer Operating Systems TSP ES&S SWE OS6 Telephone: +49 (0) 89 3222 2967 Fujitsu Technology Solutions e-mail: juergen.gross@xxxxxxxxxxxxxx Domagkstr. 28 Internet: ts.fujitsu.com D-80807 Muenchen Company details: ts.fujitsu.com/imprint.html Signed-off-by: juergen.gross@xxxxxxxxxxxxxx diff -r dbf0fd95180f tools/libxc/Makefile --- a/tools/libxc/Makefile Tue Apr 20 14:32:53 2010 +0100 +++ b/tools/libxc/Makefile Wed Apr 21 13:08:38 2010 +0200 @@ -8,6 +8,7 @@ CTRL_SRCS-y += xc_core.c CTRL_SRCS-y += xc_core.c CTRL_SRCS-$(CONFIG_X86) += xc_core_x86.c CTRL_SRCS-$(CONFIG_IA64) += xc_core_ia64.c +CTRL_SRCS-y += xc_cpupool.c CTRL_SRCS-y += xc_domain.c CTRL_SRCS-y += xc_evtchn.c CTRL_SRCS-y += xc_misc.c diff -r dbf0fd95180f tools/libxc/xc_cpupool.c --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/tools/libxc/xc_cpupool.c Wed Apr 21 13:08:38 2010 +0200 @@ -0,0 +1,154 @@ +/****************************************************************************** + * xc_cpupool.c + * + * API for manipulating and obtaining information on cpupools. + * + * Copyright (c) 2009, J Gross. + */ + +#include <stdarg.h> +#include "xc_private.h" + +int xc_cpupool_create(int xc_handle, + uint32_t *ppoolid, + uint32_t sched_id) +{ + int err; + DECLARE_DOMCTL; + + domctl.cmd = XEN_DOMCTL_cpupool_op; + domctl.u.cpupool_op.op = XEN_DOMCTL_CPUPOOL_OP_CREATE; + domctl.u.cpupool_op.cpupool_id = (*ppoolid == 0) ? + XEN_DOMCTL_CPUPOOL_PAR_ANY : *ppoolid; + domctl.u.cpupool_op.sched_id = sched_id; + if ( (err = do_domctl_save(xc_handle, &domctl)) != 0 ) + return err; + + *ppoolid = domctl.u.cpupool_op.cpupool_id; + return 0; +} + +int xc_cpupool_destroy(int xc_handle, + uint32_t poolid) +{ + DECLARE_DOMCTL; + + domctl.cmd = XEN_DOMCTL_cpupool_op; + domctl.u.cpupool_op.op = XEN_DOMCTL_CPUPOOL_OP_DESTROY; + domctl.u.cpupool_op.cpupool_id = poolid; + return do_domctl_save(xc_handle, &domctl); +} + +int xc_cpupool_getinfo(int xc_handle, + uint32_t first_poolid, + uint32_t n_max, + xc_cpupoolinfo_t *info) +{ + int err = 0; + int p; + uint32_t poolid = first_poolid; + uint8_t local[sizeof (info->cpumap)]; + DECLARE_DOMCTL; + + memset(info, 0, n_max * sizeof(xc_cpupoolinfo_t)); + + for (p = 0; p < n_max; p++) + { + domctl.cmd = XEN_DOMCTL_cpupool_op; + domctl.u.cpupool_op.op = XEN_DOMCTL_CPUPOOL_OP_INFO; + domctl.u.cpupool_op.cpupool_id = poolid; + set_xen_guest_handle(domctl.u.cpupool_op.cpumap.bitmap, local); + domctl.u.cpupool_op.cpumap.nr_cpus = sizeof(info->cpumap) * 8; + + if ( (err = lock_pages(local, sizeof(local))) != 0 ) + { + PERROR("Could not lock memory for Xen hypercall"); + break; + } + err = do_domctl_save(xc_handle, &domctl); + unlock_pages(local, sizeof (local)); + + if ( err < 0 ) + break; + + info->cpupool_id = domctl.u.cpupool_op.cpupool_id; + info->sched_id = domctl.u.cpupool_op.sched_id; + info->n_dom = domctl.u.cpupool_op.n_dom; + bitmap_byte_to_64(&(info->cpumap), local, sizeof(local) * 8); + poolid = domctl.u.cpupool_op.cpupool_id + 1; + info++; + } + + if ( p == 0 ) + return err; + + return p; +} + +int xc_cpupool_addcpu(int xc_handle, + uint32_t poolid, + int cpu) +{ + DECLARE_DOMCTL; + + domctl.cmd = XEN_DOMCTL_cpupool_op; + domctl.u.cpupool_op.op = XEN_DOMCTL_CPUPOOL_OP_ADDCPU; + domctl.u.cpupool_op.cpupool_id = poolid; + domctl.u.cpupool_op.cpu = (cpu < 0) ? XEN_DOMCTL_CPUPOOL_PAR_ANY : cpu; + return do_domctl_save(xc_handle, &domctl); +} + +int xc_cpupool_removecpu(int xc_handle, + uint32_t poolid, + int cpu) +{ + DECLARE_DOMCTL; + + domctl.cmd = XEN_DOMCTL_cpupool_op; + domctl.u.cpupool_op.op = XEN_DOMCTL_CPUPOOL_OP_RMCPU; + domctl.u.cpupool_op.cpupool_id = poolid; + domctl.u.cpupool_op.cpu = (cpu < 0) ? XEN_DOMCTL_CPUPOOL_PAR_ANY : cpu; + return do_domctl_save(xc_handle, &domctl); +} + +int xc_cpupool_movedomain(int xc_handle, + uint32_t poolid, + uint32_t domid) +{ + DECLARE_DOMCTL; + + domctl.cmd = XEN_DOMCTL_cpupool_op; + domctl.u.cpupool_op.op = XEN_DOMCTL_CPUPOOL_OP_MOVEDOMAIN; + domctl.u.cpupool_op.cpupool_id = poolid; + domctl.u.cpupool_op.domid = domid; + return do_domctl_save(xc_handle, &domctl); +} + +int xc_cpupool_freeinfo(int xc_handle, + uint64_t *cpumap) +{ + int err; + uint8_t local[sizeof (*cpumap)]; + DECLARE_DOMCTL; + + domctl.cmd = XEN_DOMCTL_cpupool_op; + domctl.u.cpupool_op.op = XEN_DOMCTL_CPUPOOL_OP_FREEINFO; + set_xen_guest_handle(domctl.u.cpupool_op.cpumap.bitmap, local); + domctl.u.cpupool_op.cpumap.nr_cpus = sizeof(*cpumap) * 8; + + if ( (err = lock_pages(local, sizeof(local))) != 0 ) + { + PERROR("Could not lock memory for Xen hypercall"); + return err; + } + + err = do_domctl_save(xc_handle, &domctl); + unlock_pages(local, sizeof (local)); + + if (err < 0) + return err; + + bitmap_byte_to_64(cpumap, local, sizeof(local) * 8); + + return 0; +} diff -r dbf0fd95180f tools/libxc/xc_domain.c --- a/tools/libxc/xc_domain.c Tue Apr 20 14:32:53 2010 +0100 +++ b/tools/libxc/xc_domain.c Wed Apr 21 13:08:38 2010 +0200 @@ -220,6 +220,7 @@ int xc_domain_getinfo(int xc_handle, info->cpu_time = domctl.u.getdomaininfo.cpu_time; info->nr_online_vcpus = domctl.u.getdomaininfo.nr_online_vcpus; info->max_vcpu_id = domctl.u.getdomaininfo.max_vcpu_id; + info->cpupool = domctl.u.getdomaininfo.cpupool; memcpy(info->handle, domctl.u.getdomaininfo.handle, sizeof(xen_domain_handle_t)); diff -r dbf0fd95180f tools/libxc/xc_private.h --- a/tools/libxc/xc_private.h Tue Apr 20 14:32:53 2010 +0100 +++ b/tools/libxc/xc_private.h Wed Apr 21 13:08:38 2010 +0200 @@ -164,6 +164,19 @@ static inline int do_domctl(int xc_handl return ret; } +static inline int do_domctl_save(int xc_handle, struct xen_domctl *domctl) +{ + int ret; + + do + { + ret = do_domctl(xc_handle, domctl); + } + while ( (ret < 0 ) && (errno == EAGAIN) ); + + return ret; +} + static inline int do_sysctl(int xc_handle, struct xen_sysctl *sysctl) { int ret = -1; diff -r dbf0fd95180f tools/libxc/xenctrl.h --- a/tools/libxc/xenctrl.h Tue Apr 20 14:32:53 2010 +0100 +++ b/tools/libxc/xenctrl.h Wed Apr 21 13:08:38 2010 +0200 @@ -171,6 +171,7 @@ typedef struct xc_dominfo { unsigned int nr_online_vcpus; unsigned int max_vcpu_id; xen_domain_handle_t handle; + unsigned int cpupool; } xc_dominfo_t; typedef xen_domctl_getdomaininfo_t xc_domaininfo_t; @@ -508,6 +509,100 @@ int xc_domain_setdebugging(int xc_handle int xc_domain_setdebugging(int xc_handle, uint32_t domid, unsigned int enable); + +/* + * CPUPOOL MANAGEMENT FUNCTIONS + */ + +typedef struct xc_cpupoolinfo { + uint32_t cpupool_id; + uint32_t sched_id; + uint32_t n_dom; + uint64_t cpumap; +} xc_cpupoolinfo_t; + +/** + * Create a new cpupool. + * + * @parm xc_handle a handle to an open hypervisor interface + * @parm ppoolid pointer to the new cpupool id (in/out) + * @parm sched_id id of scheduler to use for pool + * return 0 on success, -1 on failure + */ +int xc_cpupool_create(int xc_handle, + uint32_t *ppoolid, + uint32_t sched_id); + +/** + * Destroy a cpupool. Pool must be unused and have no cpu assigned. + * + * @parm xc_handle a handle to an open hypervisor interface + * @parm poolid id of the cpupool to destroy + * return 0 on success, -1 on failure + */ +int xc_cpupool_destroy(int xc_handle, + uint32_t poolid); + +/** + * Get cpupool info. Returns info for up to the specified number of cpupools + * starting at the given id. + * @parm xc_handle a handle to an open hypervisor interface + * @parm first_poolid lowest id for which info is returned + * @parm n_max maximum number of cpupools to return info + * @parm info pointer to xc_cpupoolinfo_t array + * return number of cpupool infos + */ +int xc_cpupool_getinfo(int xc_handle, + uint32_t first_poolid, + uint32_t n_max, + xc_cpupoolinfo_t *info); + +/** + * Add cpu to a cpupool. cpu may be -1 indicating the first unassigned. + * + * @parm xc_handle a handle to an open hypervisor interface + * @parm poolid id of the cpupool + * @parm cpu cpu number to add + * return 0 on success, -1 on failure + */ +int xc_cpupool_addcpu(int xc_handle, + uint32_t poolid, + int cpu); + +/** + * Remove cpu from cpupool. cpu may be -1 indicating the last cpu of the pool. + * + * @parm xc_handle a handle to an open hypervisor interface + * @parm poolid id of the cpupool + * @parm cpu cpu number to remove + * return 0 on success, -1 on failure + */ +int xc_cpupool_removecpu(int xc_handle, + uint32_t poolid, + int cpu); + +/** + * Move domain to another cpupool. + * + * @parm xc_handle a handle to an open hypervisor interface + * @parm poolid id of the destination cpupool + * @parm domid id of the domain to move + * return 0 on success, -1 on failure + */ +int xc_cpupool_movedomain(int xc_handle, + uint32_t poolid, + uint32_t domid); + +/** + * Return map of cpus not in any cpupool. + * + * @parm xc_handle a handle to an open hypervisor interface + * @parm cpumap pointer where to store the cpumap + * return 0 on success, -1 on failure + */ +int xc_cpupool_freeinfo(int xc_handle, + uint64_t *cpumap); + /* * EVENT CHANNEL FUNCTIONS _______________________________________________ Xen-devel mailing list Xen-devel@xxxxxxxxxxxxxxxxxxx http://lists.xensource.com/xen-devel
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |