[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH v9 for Xen 4.7 2/4] libxc: enable per-VCPU parameter for RTDS



Add xc_sched_rtds_vcpu_get/set functions to interact with
Xen to get/set a domain's per-VCPU parameters.

Signed-off-by: Chong Li <chong.li@xxxxxxxxx>
Signed-off-by: Meng Xu <mengxu@xxxxxxxxxxxxx>
Signed-off-by: Sisu Xi <xisisu@xxxxxxxxx>

Acked-by: Wei Liu <wei.liu2@xxxxxxxxxx>
Reviewed-by: Dario Faggioli <dario.faggioli@xxxxxxxxxx>
---
Changes on PATCH v7:
1) rc in xc_sched_rtds_vcpu_set() should not be initialized to 0. It
is fixed.

Changes on PATCH v6:
1) Resolve some coding sytle issues

Changes on PATCH v5:
1) In xc_sched_rtds_vcpu_get/set, re-issueing the hypercall
if it is preempted.


Changes on PATCH v4:
1) Minor modifications on the function parameters.


Changes on PATCH v2:
1) Minor modifications due to the change of struct xen_domctl_scheduler_op.

CC: <dario.faggioli@xxxxxxxxxx>
CC: <george.dunlap@xxxxxxxxxxxxx>
CC: <dgolomb@xxxxxxxxxxxxxx>
CC: <mengxu@xxxxxxxxxxxxx>
CC: <wei.liu2@xxxxxxxxxx>
CC: <lichong659@xxxxxxxxx>
---
 tools/libxc/include/xenctrl.h |  8 +++++
 tools/libxc/xc_rt.c           | 68 +++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 76 insertions(+)

diff --git a/tools/libxc/include/xenctrl.h b/tools/libxc/include/xenctrl.h
index a9e4dc1..f430ef9 100644
--- a/tools/libxc/include/xenctrl.h
+++ b/tools/libxc/include/xenctrl.h
@@ -878,6 +878,14 @@ int xc_sched_rtds_domain_set(xc_interface *xch,
 int xc_sched_rtds_domain_get(xc_interface *xch,
                             uint32_t domid,
                             struct xen_domctl_sched_rtds *sdom);
+int xc_sched_rtds_vcpu_set(xc_interface *xch,
+                           uint32_t domid,
+                           struct xen_domctl_schedparam_vcpu *vcpus,
+                           uint32_t num_vcpus);
+int xc_sched_rtds_vcpu_get(xc_interface *xch,
+                           uint32_t domid,
+                           struct xen_domctl_schedparam_vcpu *vcpus,
+                           uint32_t num_vcpus);
 
 int
 xc_sched_arinc653_schedule_set(
diff --git a/tools/libxc/xc_rt.c b/tools/libxc/xc_rt.c
index d59e5ce..221d17f 100644
--- a/tools/libxc/xc_rt.c
+++ b/tools/libxc/xc_rt.c
@@ -62,3 +62,71 @@ int xc_sched_rtds_domain_get(xc_interface *xch,
 
     return rc;
 }
+
+int xc_sched_rtds_vcpu_set(xc_interface *xch,
+                           uint32_t domid,
+                           struct xen_domctl_schedparam_vcpu *vcpus,
+                           uint32_t num_vcpus)
+{
+    int rc;
+    unsigned processed = 0;
+    DECLARE_DOMCTL;
+    DECLARE_HYPERCALL_BOUNCE(vcpus, sizeof(*vcpus) * num_vcpus,
+                             XC_HYPERCALL_BUFFER_BOUNCE_IN);
+
+    if ( xc_hypercall_bounce_pre(xch, vcpus) )
+        return -1;
+
+    domctl.cmd = XEN_DOMCTL_scheduler_op;
+    domctl.domain = (domid_t) domid;
+    domctl.u.scheduler_op.sched_id = XEN_SCHEDULER_RTDS;
+    domctl.u.scheduler_op.cmd = XEN_DOMCTL_SCHEDOP_putvcpuinfo;
+
+    while ( processed < num_vcpus )
+    {
+        domctl.u.scheduler_op.u.v.nr_vcpus = num_vcpus - processed;
+        set_xen_guest_handle_offset(domctl.u.scheduler_op.u.v.vcpus, vcpus,
+                                    processed);
+        if ( (rc = do_domctl(xch, &domctl)) != 0 )
+            break;
+        processed += domctl.u.scheduler_op.u.v.nr_vcpus;
+    }
+
+    xc_hypercall_bounce_post(xch, vcpus);
+
+    return rc;
+}
+
+int xc_sched_rtds_vcpu_get(xc_interface *xch,
+                           uint32_t domid,
+                           struct xen_domctl_schedparam_vcpu *vcpus,
+                           uint32_t num_vcpus)
+{
+    int rc;
+    unsigned processed = 0;
+    DECLARE_DOMCTL;
+    DECLARE_HYPERCALL_BOUNCE(vcpus, sizeof(*vcpus) * num_vcpus,
+                             XC_HYPERCALL_BUFFER_BOUNCE_BOTH);
+
+    if ( xc_hypercall_bounce_pre(xch, vcpus) )
+        return -1;
+
+    domctl.cmd = XEN_DOMCTL_scheduler_op;
+    domctl.domain = (domid_t) domid;
+    domctl.u.scheduler_op.sched_id = XEN_SCHEDULER_RTDS;
+    domctl.u.scheduler_op.cmd = XEN_DOMCTL_SCHEDOP_getvcpuinfo;
+
+    while ( processed < num_vcpus )
+    {
+        domctl.u.scheduler_op.u.v.nr_vcpus = num_vcpus - processed;
+        set_xen_guest_handle_offset(domctl.u.scheduler_op.u.v.vcpus, vcpus,
+                                    processed);
+        if ( (rc = do_domctl(xch, &domctl)) != 0 )
+            break;
+        processed += domctl.u.scheduler_op.u.v.nr_vcpus;
+    }
+
+    xc_hypercall_bounce_post(xch, vcpus);
+
+    return rc;
+}
-- 
1.9.1


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.