[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH v5 13/17] libxc: get and set soft and hard affinity



by using the flag and the new cpumap arguments introduced in
the parameters of the DOMCTL_{get,set}_vcpuaffinity hypercalls.

Now, both xc_vcpu_setaffinity() and xc_vcpu_getaffinity() have
a new flag parameter, to specify whether the user wants to
set/get hard affinity, soft affinity or both. They also have
two cpumap parameters instead of only one. This way, it is
possible to set/get both hard and soft affinity at the same
time (and, in case of set, each one to its own value).

In xc_vcpu_setaffinity(), the cpumaps are IN/OUT parameters,
as it is for the corresponding arguments of the
DOMCTL_set_vcpuaffinity hypercall. What Xen puts there is the
hard and soft effective affinity, that is what Xen will actually
use for scheduling.

In-tree callers are also fixed to cope with the new interface.

Signed-off-by: Dario Faggioli <dario.faggioli@xxxxxxxxxx>
Acked-by: George Dunlap <george.dunlap@xxxxxxxxxxxxx>
Acked-by: Ian Campbell <ian.campbell@xxxxxxxxxx>
---
Changes from v4:
 * update toward the new hypercall interface;
 * migrate to hypercall BOUNCEs instead of BUFFERs, as
   suggested during (v3) review;

Changes from v2:
 * better cleanup logic in _vcpu_setaffinity() (regarding
   xc_hypercall_buffer_{alloc,free}()), as suggested during
   review;
 * make it more evident that DOMCTL_setvcpuaffinity has an out
   parameter, by calling ecpumap_out, and improving the comment
   wrt that;
 * change the interface and have xc_vcpu_[sg]etaffinity() so
   that they take the new parameters (flags and ecpumap_out) and
   fix the in tree callers.
---
 tools/libxc/xc_domain.c             |   72 ++++++++++++++++++++++-------------
 tools/libxc/xenctrl.h               |   55 ++++++++++++++++++++++++++-
 tools/libxl/libxl.c                 |    6 ++-
 tools/ocaml/libs/xc/xenctrl_stubs.c |    8 +++-
 tools/python/xen/lowlevel/xc/xc.c   |    6 ++-
 5 files changed, 113 insertions(+), 34 deletions(-)

diff --git a/tools/libxc/xc_domain.c b/tools/libxc/xc_domain.c
index 8c807a6..0348f23 100644
--- a/tools/libxc/xc_domain.c
+++ b/tools/libxc/xc_domain.c
@@ -192,43 +192,53 @@ int xc_domain_node_getaffinity(xc_interface *xch,
 int xc_vcpu_setaffinity(xc_interface *xch,
                         uint32_t domid,
                         int vcpu,
-                        xc_cpumap_t cpumap)
+                        xc_cpumap_t cpumap_hard_inout,
+                        xc_cpumap_t cpumap_soft_inout,
+                        uint32_t flags)
 {
     DECLARE_DOMCTL;
-    DECLARE_HYPERCALL_BUFFER(uint8_t, local);
+    DECLARE_HYPERCALL_BOUNCE(cpumap_hard_inout, 0,
+                             XC_HYPERCALL_BUFFER_BOUNCE_BOTH);
+    DECLARE_HYPERCALL_BOUNCE(cpumap_soft_inout, 0,
+                             XC_HYPERCALL_BUFFER_BOUNCE_BOTH);
     int ret = -1;
     int cpusize;
 
     cpusize = xc_get_cpumap_size(xch);
-    if (!cpusize)
+    if ( !cpusize )
     {
         PERROR("Could not get number of cpus");
-        goto out;
+        return -1;
     }
 
-    local = xc_hypercall_buffer_alloc(xch, local, cpusize);
-    if ( local == NULL )
+    HYPERCALL_BOUNCE_SET_SIZE(cpumap_hard_inout, cpusize);
+    HYPERCALL_BOUNCE_SET_SIZE(cpumap_soft_inout, cpusize);
+
+    if ( xc_hypercall_bounce_pre(xch, cpumap_hard_inout) ||
+         xc_hypercall_bounce_pre(xch, cpumap_soft_inout) )
     {
-        PERROR("Could not allocate memory for setvcpuaffinity domctl 
hypercall");
+        PERROR("Could not allocate hcall buffers for DOMCTL_setvcpuaffinity");
         goto out;
     }
 
     domctl.cmd = XEN_DOMCTL_setvcpuaffinity;
     domctl.domain = (domid_t)domid;
     domctl.u.vcpuaffinity.vcpu = vcpu;
-    domctl.u.vcpuaffinity.flags = XEN_VCPUAFFINITY_HARD;
-
-    memcpy(local, cpumap, cpusize);
-
-    set_xen_guest_handle(domctl.u.vcpuaffinity.cpumap_hard.bitmap, local);
+    domctl.u.vcpuaffinity.flags = flags;
 
+    set_xen_guest_handle(domctl.u.vcpuaffinity.cpumap_hard.bitmap,
+                         cpumap_hard_inout);
     domctl.u.vcpuaffinity.cpumap_hard.nr_bits = cpusize * 8;
+    set_xen_guest_handle(domctl.u.vcpuaffinity.cpumap_soft.bitmap,
+                         cpumap_soft_inout);
+    domctl.u.vcpuaffinity.cpumap_soft.nr_bits = cpusize * 8;
 
     ret = do_domctl(xch, &domctl);
 
-    xc_hypercall_buffer_free(xch, local);
-
  out:
+    xc_hypercall_bounce_post(xch, cpumap_hard_inout);
+    xc_hypercall_bounce_post(xch, cpumap_soft_inout);
+
     return ret;
 }
 
@@ -236,41 +246,51 @@ int xc_vcpu_setaffinity(xc_interface *xch,
 int xc_vcpu_getaffinity(xc_interface *xch,
                         uint32_t domid,
                         int vcpu,
-                        xc_cpumap_t cpumap)
+                        xc_cpumap_t cpumap_hard,
+                        xc_cpumap_t cpumap_soft,
+                        uint32_t flags)
 {
     DECLARE_DOMCTL;
-    DECLARE_HYPERCALL_BUFFER(uint8_t, local);
+    DECLARE_HYPERCALL_BOUNCE(cpumap_hard, 0, XC_HYPERCALL_BUFFER_BOUNCE_OUT);
+    DECLARE_HYPERCALL_BOUNCE(cpumap_soft, 0, XC_HYPERCALL_BUFFER_BOUNCE_OUT);
     int ret = -1;
     int cpusize;
 
     cpusize = xc_get_cpumap_size(xch);
-    if (!cpusize)
+    if ( !cpusize )
     {
         PERROR("Could not get number of cpus");
-        goto out;
+        return -1;
     }
 
-    local = xc_hypercall_buffer_alloc(xch, local, cpusize);
-    if (local == NULL)
+    HYPERCALL_BOUNCE_SET_SIZE(cpumap_hard, cpusize);
+    HYPERCALL_BOUNCE_SET_SIZE(cpumap_soft, cpusize);
+
+    if ( xc_hypercall_bounce_pre(xch, cpumap_hard) ||
+         xc_hypercall_bounce_pre(xch, cpumap_soft) )
     {
-        PERROR("Could not allocate memory for getvcpuaffinity domctl 
hypercall");
+        PERROR("Could not allocate hcall buffers for DOMCTL_getvcpuaffinity");
         goto out;
     }
 
     domctl.cmd = XEN_DOMCTL_getvcpuaffinity;
     domctl.domain = (domid_t)domid;
     domctl.u.vcpuaffinity.vcpu = vcpu;
-    domctl.u.vcpuaffinity.flags = XEN_VCPUAFFINITY_HARD;
+    domctl.u.vcpuaffinity.flags = flags;
 
-    set_xen_guest_handle(domctl.u.vcpuaffinity.cpumap_hard.bitmap, local);
+    set_xen_guest_handle(domctl.u.vcpuaffinity.cpumap_hard.bitmap,
+                         cpumap_hard);
     domctl.u.vcpuaffinity.cpumap_hard.nr_bits = cpusize * 8;
+    set_xen_guest_handle(domctl.u.vcpuaffinity.cpumap_soft.bitmap,
+                         cpumap_soft);
+    domctl.u.vcpuaffinity.cpumap_soft.nr_bits = cpusize * 8;
 
     ret = do_domctl(xch, &domctl);
 
-    memcpy(cpumap, local, cpusize);
+ out:
+    xc_hypercall_bounce_post(xch, cpumap_hard);
+    xc_hypercall_bounce_post(xch, cpumap_soft);
 
-    xc_hypercall_buffer_free(xch, local);
-out:
     return ret;
 }
 
diff --git a/tools/libxc/xenctrl.h b/tools/libxc/xenctrl.h
index 733ed03..7663e2f 100644
--- a/tools/libxc/xenctrl.h
+++ b/tools/libxc/xenctrl.h
@@ -582,14 +582,65 @@ int xc_domain_node_getaffinity(xc_interface *xch,
                                uint32_t domind,
                                xc_nodemap_t nodemap);
 
+/**
+ * This function specifies the CPU affinity for a vcpu.
+ *
+ * There are two kinds of affinity. Soft affinity is on what CPUs a vcpu
+ * prefers to run. Hard affinity is on what CPUs a vcpu is allowed to run.
+ * If flags contains XEN_VCPUAFFINITY_SOFT, the soft affinity it is set to
+ * what cpumap_soft_inout contains. If flags contains XEN_VCPUAFFINITY_HARD,
+ * the hard affinity is set to what cpumap_hard_inout contains. Both flags
+ * can be set at the same time, in which case both soft and hard affinity are
+ * set to what the respective parameter contains.
+ *
+ * The function also returns the effective hard or/and soft affinity, still
+ * via the cpumap_soft_inout and cpumap_hard_inout parameters. Effective
+ * affinity is, in case of soft affinity, the intersection of soft affinity,
+ * hard affinity and the cpupool's online CPUs for the domain, and is returned
+ * in cpumap_soft_inout, if XEN_VCPUAFFINITY_SOFT is set in flags. In case of
+ * hard affinity, it is the intersection between hard affinity and the
+ * cpupool's online CPUs, and is returned in cpumap_hard_inout, if
+ * XEN_VCPUAFFINITY_HARD is set in flags. If both flags are set, both soft
+ * and hard affinity are returned in the respective parameter.
+ *
+ * We do report it back as effective affinity is what the Xen scheduler will
+ * actually use, and we thus allow checking whether or not that matches with,
+ * or at least is good enough for, the caller's purposes.
+ *
+ * @param xch a handle to an open hypervisor interface.
+ * @param domid the id of the domain to which the vcpu belongs
+ * @param vcpu the vcpu id wihin the domain
+ * @param cpumap_hard_inout specifies(/returns) the (effective) hard affinity
+ * @param cpumap_soft_inout specifies(/returns) the (effective) soft affinity
+ * @param flags what we want to set
+ */
 int xc_vcpu_setaffinity(xc_interface *xch,
                         uint32_t domid,
                         int vcpu,
-                        xc_cpumap_t cpumap);
+                        xc_cpumap_t cpumap_hard_inout,
+                        xc_cpumap_t cpumap_soft_inout,
+                        uint32_t flags);
+
+/**
+ * This function retrieves hard and soft CPU affinity of a vcpu,
+ * depending on what flags are set.
+ *
+ * Soft affinity is returned in cpumap_soft if XEN_VCPUAFFINITY_SOFT is set.
+ * Hard affinity is returned in cpumap_hard if XEN_VCPUAFFINITY_HARD is set.
+ *
+ * @param xch a handle to an open hypervisor interface.
+ * @param domid the id of the domain to which the vcpu belongs
+ * @param vcpu the vcpu id wihin the domain
+ * @param cpumap_hard is where hard affinity is returned
+ * @param cpumap_soft is where soft affinity is returned
+ * @param flags what we want get
+ */
 int xc_vcpu_getaffinity(xc_interface *xch,
                         uint32_t domid,
                         int vcpu,
-                        xc_cpumap_t cpumap);
+                        xc_cpumap_t cpumap_hard,
+                        xc_cpumap_t cpumap_soft,
+                        uint32_t flags);
 
 
 /**
diff --git a/tools/libxl/libxl.c b/tools/libxl/libxl.c
index d3180dc..e55bd68 100644
--- a/tools/libxl/libxl.c
+++ b/tools/libxl/libxl.c
@@ -4546,7 +4546,8 @@ libxl_vcpuinfo *libxl_list_vcpu(libxl_ctx *ctx, uint32_t 
domid,
             LOGE(ERROR, "getting vcpu info");
             goto err;
         }
-        if (xc_vcpu_getaffinity(ctx->xch, domid, *nb_vcpu, ptr->cpumap.map) == 
-1) {
+        if (xc_vcpu_getaffinity(ctx->xch, domid, *nb_vcpu, ptr->cpumap.map,
+                                NULL, XEN_VCPUAFFINITY_HARD) == -1) {
             LOGE(ERROR, "getting vcpu affinity");
             goto err;
         }
@@ -4570,7 +4571,8 @@ err:
 int libxl_set_vcpuaffinity(libxl_ctx *ctx, uint32_t domid, uint32_t vcpuid,
                            libxl_bitmap *cpumap)
 {
-    if (xc_vcpu_setaffinity(ctx->xch, domid, vcpuid, cpumap->map)) {
+    if (xc_vcpu_setaffinity(ctx->xch, domid, vcpuid, cpumap->map, NULL,
+                            XEN_VCPUAFFINITY_HARD)) {
         LIBXL__LOG_ERRNO(ctx, LIBXL__LOG_ERROR, "setting vcpu affinity");
         return ERROR_FAIL;
     }
diff --git a/tools/ocaml/libs/xc/xenctrl_stubs.c 
b/tools/ocaml/libs/xc/xenctrl_stubs.c
index f5cf0ed..4d22b82 100644
--- a/tools/ocaml/libs/xc/xenctrl_stubs.c
+++ b/tools/ocaml/libs/xc/xenctrl_stubs.c
@@ -438,7 +438,9 @@ CAMLprim value stub_xc_vcpu_setaffinity(value xch, value 
domid,
                        c_cpumap[i/8] |= 1 << (i&7);
        }
        retval = xc_vcpu_setaffinity(_H(xch), _D(domid),
-                                    Int_val(vcpu), c_cpumap);
+                                    Int_val(vcpu),
+                                    c_cpumap, NULL,
+                                    XEN_VCPUAFFINITY_HARD);
        free(c_cpumap);
 
        if (retval < 0)
@@ -460,7 +462,9 @@ CAMLprim value stub_xc_vcpu_getaffinity(value xch, value 
domid,
                failwith_xc(_H(xch));
 
        retval = xc_vcpu_getaffinity(_H(xch), _D(domid),
-                                    Int_val(vcpu), c_cpumap);
+                                    Int_val(vcpu),
+                                    c_cpumap, NULL,
+                                    XEN_VCPUAFFINITY_HARD);
        if (retval < 0) {
                free(c_cpumap);
                failwith_xc(_H(xch));
diff --git a/tools/python/xen/lowlevel/xc/xc.c 
b/tools/python/xen/lowlevel/xc/xc.c
index 737bdac..ae2522b 100644
--- a/tools/python/xen/lowlevel/xc/xc.c
+++ b/tools/python/xen/lowlevel/xc/xc.c
@@ -256,7 +256,8 @@ static PyObject *pyxc_vcpu_setaffinity(XcObject *self,
         }
     }
   
-    if ( xc_vcpu_setaffinity(self->xc_handle, dom, vcpu, cpumap) != 0 )
+    if ( xc_vcpu_setaffinity(self->xc_handle, dom, vcpu, cpumap,
+                             NULL, XEN_VCPUAFFINITY_HARD) != 0 )
     {
         free(cpumap);
         return pyxc_error_to_exception(self->xc_handle);
@@ -403,7 +404,8 @@ static PyObject *pyxc_vcpu_getinfo(XcObject *self,
     if(cpumap == NULL)
         return pyxc_error_to_exception(self->xc_handle);
 
-    rc = xc_vcpu_getaffinity(self->xc_handle, dom, vcpu, cpumap);
+    rc = xc_vcpu_getaffinity(self->xc_handle, dom, vcpu, cpumap,
+                             NULL, XEN_VCPUAFFINITY_HARD);
     if ( rc < 0 )
     {
         free(cpumap);


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.