[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] Re: [Xen-devel] [PATCH RESEND 09/12] libxc: numa-sched: enable getting/specifying per-vcpu node-affinity
On Tue, Nov 05, 2013 at 03:35:42PM +0100, Dario Faggioli wrote: > by providing the proper get/set interfaces and wiring them > to the new domctl-s from the previous commit. s/previous commit/<title of the commit>/ > > Signed-off-by: Dario Faggioli <dario.faggioli@xxxxxxxxxx> > --- > tools/libxc/xc_domain.c | 82 > +++++++++++++++++++++++++++++++++++++++++++++++ > tools/libxc/xenctrl.h | 19 +++++++++++ > 2 files changed, 101 insertions(+) > > diff --git a/tools/libxc/xc_domain.c b/tools/libxc/xc_domain.c > index f8825a3..e44ad2e 100644 > --- a/tools/libxc/xc_domain.c > +++ b/tools/libxc/xc_domain.c > @@ -272,6 +272,88 @@ out: > return ret; > } > > +int xc_vcpu_setnodeaffinity(xc_interface *xch, > + uint32_t domid, > + int vcpu, > + xc_nodemap_t nodemap) > +{ > + DECLARE_DOMCTL; > + DECLARE_HYPERCALL_BUFFER(uint8_t, local); > + int ret = -1; Ewww.. Could we just use regular -Exx for new xc_* calls? > + int nodesize; > + > + nodesize = xc_get_cpumap_size(xch); > + if (!nodesize) > + { > + PERROR("Could not get number of nodes"); Extra space. > + goto out; > + } > + > + local = xc_hypercall_buffer_alloc(xch, local, nodesize); > + if ( local == NULL ) > + { > + PERROR("Could not allocate memory for setvcpunodeaffinity domctl > hypercall"); > + goto out; > + } > + > + domctl.cmd = XEN_DOMCTL_setvcpunodeaffinity; > + domctl.domain = (domid_t)domid; > + domctl.u.vcpuaffinity.vcpu = vcpu; > + > + memcpy(local, nodemap, nodesize); > + > + set_xen_guest_handle(domctl.u.vcpuaffinity.map.bitmap, local); > + > + domctl.u.vcpuaffinity.map.nr_bits = nodesize * 8; > + > + ret = do_domctl(xch, &domctl); > + > + xc_hypercall_buffer_free(xch, local); > + > + out: > + return ret; > +} > + > +int xc_vcpu_getnodeaffinity(xc_interface *xch, > + uint32_t domid, > + int vcpu, > + xc_nodemap_t nodemap) > +{ > + DECLARE_DOMCTL; > + DECLARE_HYPERCALL_BUFFER(uint8_t, local); > + int ret = -1; > + int nodesize; > + > + nodesize = xc_get_nodemap_size(xch); > + if (!nodesize) > + { > + PERROR("Could not get number of nodes"); > + goto out; > + } > + > + local = xc_hypercall_buffer_alloc(xch, local, nodesize); > + if (local == NULL) > + { > + PERROR("Could not allocate memory for getvcpunodeaffinity domctl > hypercall"); > + goto out; > + } > + > + domctl.cmd = XEN_DOMCTL_getvcpunodeaffinity; > + domctl.domain = (domid_t)domid; > + domctl.u.vcpuaffinity.vcpu = vcpu; > + > + set_xen_guest_handle(domctl.u.vcpuaffinity.map.bitmap, local); > + domctl.u.vcpuaffinity.map.nr_bits = nodesize * 8; Could the 8 be replaced by a sizeof? > + > + ret = do_domctl(xch, &domctl); > + > + memcpy(nodemap, local, nodesize); > + > + xc_hypercall_buffer_free(xch, local); > +out: > + return ret; > +} > + > int xc_domain_get_guest_width(xc_interface *xch, uint32_t domid, > unsigned int *guest_width) > { > diff --git a/tools/libxc/xenctrl.h b/tools/libxc/xenctrl.h > index 8cf3f3b..208fa2c 100644 > --- a/tools/libxc/xenctrl.h > +++ b/tools/libxc/xenctrl.h > @@ -551,6 +551,25 @@ int xc_domain_node_getaffinity(xc_interface *xch, > uint32_t domind, > xc_nodemap_t nodemap); > > +/** > + * These functions set and retrieves the NUMA node-affinity > + * of a specific vcpu. > + * > + * @parm xch a handle to an open hypervisor interface. > + * @parm domid the domain id one is interested in. > + * @parm vcpu the vcpu one wants to set/get the affinity of. > + * @parm nodemap the map of the affine nodes. > + * @return 0 on success, -1 on failure. and something in errno? > + */ > +int xc_vcpu_setnodeaffinity(xc_interface *xch, > + uint32_t domid, > + int vcpu, > + xc_nodemap_t nodemap); > +int xc_vcpu_getnodeaffinity(xc_interface *xch, > + uint32_t domid, > + int vcpu, > + xc_nodemap_t nodemap); > + > int xc_vcpu_setaffinity(xc_interface *xch, > uint32_t domid, > int vcpu, > > > _______________________________________________ > Xen-devel mailing list > Xen-devel@xxxxxxxxxxxxx > http://lists.xen.org/xen-devel _______________________________________________ Xen-devel mailing list Xen-devel@xxxxxxxxxxxxx http://lists.xen.org/xen-devel
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |