|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH v3 for Xen 4.6 1/4] xen: enable per-VCPU parameter settings for RTDS scheduler
Add two hypercalls(XEN_DOMCTL_SCHEDOP_getvcpuinfo/putvcpuinfo) to get/set a
domain's
per-VCPU parameters.
Changes on PATCH v2:
1) Change struct xen_domctl_scheduler_op, for transferring per-vcpu parameters
between libxc and hypervisor.
2) Handler of XEN_DOMCTL_SCHEDOP_getinfo now just returns the default budget
and period values of RTDS scheduler.
3) Handler of XEN_DOMCTL_SCHEDOP_getvcpuinfo now can return a random subset of
the parameters of the VCPUs of a specific domain
Signed-off-by: Chong Li <chong.li@xxxxxxxxx>
Signed-off-by: Meng Xu <mengxu@xxxxxxxxxxxxx>
Signed-off-by: Sisu Xi <xisisu@xxxxxxxxx>
---
CC: <dario.faggioli@xxxxxxxxxx>
CC: <george.dunlap@xxxxxxxxxxxxx>
CC: <dgolomb@xxxxxxxxxxxxxx>
CC: <mengxu@xxxxxxxxxxxxx>
CC: <jbeulich@xxxxxxxx>
CC: <lichong659@xxxxxxxxx>
---
xen/common/Makefile | 1 -
xen/common/domctl.c | 3 ++
xen/common/sched_credit.c | 14 ++++----
xen/common/sched_credit2.c | 6 ++--
xen/common/sched_rt.c | 80 +++++++++++++++++++++++++++++++++++++++++----
xen/common/schedule.c | 5 +--
xen/include/public/domctl.h | 64 ++++++++++++++++++++++++++----------
7 files changed, 136 insertions(+), 37 deletions(-)
diff --git a/xen/common/Makefile b/xen/common/Makefile
index 1cddebc..3fdf931 100644
--- a/xen/common/Makefile
+++ b/xen/common/Makefile
@@ -31,7 +31,6 @@ obj-y += rbtree.o
obj-y += rcupdate.o
obj-y += sched_credit.o
obj-y += sched_credit2.o
-obj-y += sched_sedf.o
obj-y += sched_arinc653.o
obj-y += sched_rt.o
obj-y += schedule.o
diff --git a/xen/common/domctl.c b/xen/common/domctl.c
index 2a2d203..349f68b 100644
--- a/xen/common/domctl.c
+++ b/xen/common/domctl.c
@@ -839,6 +839,9 @@ long do_domctl(XEN_GUEST_HANDLE_PARAM(xen_domctl_t)
u_domctl)
case XEN_DOMCTL_scheduler_op:
ret = sched_adjust(d, &op->u.scheduler_op);
+ if ( ret == -ERESTART )
+ ret = hypercall_create_continuation(
+ __HYPERVISOR_domctl, "h", u_domctl);
copyback = 1;
break;
diff --git a/xen/common/sched_credit.c b/xen/common/sched_credit.c
index 953ecb0..43b086b 100644
--- a/xen/common/sched_credit.c
+++ b/xen/common/sched_credit.c
@@ -1039,25 +1039,25 @@ csched_dom_cntl(
if ( op->cmd == XEN_DOMCTL_SCHEDOP_getinfo )
{
- op->u.credit.weight = sdom->weight;
- op->u.credit.cap = sdom->cap;
+ op->u.d.credit.weight = sdom->weight;
+ op->u.d.credit.cap = sdom->cap;
}
else
{
ASSERT(op->cmd == XEN_DOMCTL_SCHEDOP_putinfo);
- if ( op->u.credit.weight != 0 )
+ if ( op->u.d.credit.weight != 0 )
{
if ( !list_empty(&sdom->active_sdom_elem) )
{
prv->weight -= sdom->weight * sdom->active_vcpu_count;
- prv->weight += op->u.credit.weight * sdom->active_vcpu_count;
+ prv->weight += op->u.d.credit.weight * sdom->active_vcpu_count;
}
- sdom->weight = op->u.credit.weight;
+ sdom->weight = op->u.d.credit.weight;
}
- if ( op->u.credit.cap != (uint16_t)~0U )
- sdom->cap = op->u.credit.cap;
+ if ( op->u.d.credit.cap != (uint16_t)~0U )
+ sdom->cap = op->u.d.credit.cap;
}
diff --git a/xen/common/sched_credit2.c b/xen/common/sched_credit2.c
index 75e0321..8992423 100644
--- a/xen/common/sched_credit2.c
+++ b/xen/common/sched_credit2.c
@@ -1438,20 +1438,20 @@ csched2_dom_cntl(
if ( op->cmd == XEN_DOMCTL_SCHEDOP_getinfo )
{
- op->u.credit2.weight = sdom->weight;
+ op->u.d.credit2.weight = sdom->weight;
}
else
{
ASSERT(op->cmd == XEN_DOMCTL_SCHEDOP_putinfo);
- if ( op->u.credit2.weight != 0 )
+ if ( op->u.d.credit2.weight != 0 )
{
struct list_head *iter;
int old_weight;
old_weight = sdom->weight;
- sdom->weight = op->u.credit2.weight;
+ sdom->weight = op->u.d.credit2.weight;
/* Update weights for vcpus, and max_weight for runqueues on which
they reside */
list_for_each ( iter, &sdom->vcpu )
diff --git a/xen/common/sched_rt.c b/xen/common/sched_rt.c
index 4372486..8d1740d 100644
--- a/xen/common/sched_rt.c
+++ b/xen/common/sched_rt.c
@@ -1137,18 +1137,19 @@ rt_dom_cntl(
struct list_head *iter;
unsigned long flags;
int rc = 0;
+ xen_domctl_schedparam_vcpu_t local_sched;
+ unsigned int index;
switch ( op->cmd )
{
case XEN_DOMCTL_SCHEDOP_getinfo:
spin_lock_irqsave(&prv->lock, flags);
- svc = list_entry(sdom->vcpu.next, struct rt_vcpu, sdom_elem);
- op->u.rtds.period = svc->period / MICROSECS(1); /* transfer to us */
- op->u.rtds.budget = svc->budget / MICROSECS(1);
+ op->u.d.rtds.period = RTDS_DEFAULT_PERIOD / MICROSECS(1); /* transfer
to us */
+ op->u.d.rtds.budget = RTDS_DEFAULT_BUDGET / MICROSECS(1);
spin_unlock_irqrestore(&prv->lock, flags);
break;
case XEN_DOMCTL_SCHEDOP_putinfo:
- if ( op->u.rtds.period == 0 || op->u.rtds.budget == 0 )
+ if ( op->u.d.rtds.period == 0 || op->u.d.rtds.budget == 0 )
{
rc = -EINVAL;
break;
@@ -1157,8 +1158,75 @@ rt_dom_cntl(
list_for_each( iter, &sdom->vcpu )
{
struct rt_vcpu * svc = list_entry(iter, struct rt_vcpu, sdom_elem);
- svc->period = MICROSECS(op->u.rtds.period); /* transfer to nanosec
*/
- svc->budget = MICROSECS(op->u.rtds.budget);
+ svc->period = MICROSECS(op->u.d.rtds.period); /* transfer to
nanosec */
+ svc->budget = MICROSECS(op->u.d.rtds.budget);
+ }
+ spin_unlock_irqrestore(&prv->lock, flags);
+ break;
+ case XEN_DOMCTL_SCHEDOP_getvcpuinfo:
+ spin_lock_irqsave(&prv->lock, flags);
+ for( index = 0; index < op->u.v.nr_vcpus; index++ )
+ {
+ if ( copy_from_guest_offset(&local_sched,
+ op->u.v.vcpus, index, 1) )
+ {
+ rc = -EFAULT;
+ break;
+ }
+ if ( local_sched.vcpuid >= d->max_vcpus
+ || d->vcpu[local_sched.vcpuid] == NULL )
+ {
+ rc = -EINVAL;
+ break;
+ }
+ svc = rt_vcpu(d->vcpu[local_sched.vcpuid]);
+
+ local_sched.vcpuid = svc->vcpu->vcpu_id;
+ local_sched.s.rtds.budget = svc->budget / MICROSECS(1);
+ local_sched.s.rtds.period = svc->period / MICROSECS(1);
+ if( index >= op->u.v.nr_vcpus ) /* not enough guest buffer*/
+ {
+ rc = -ENOBUFS;
+ break;
+ }
+ if ( copy_to_guest_offset(op->u.v.vcpus, index,
+ &local_sched, 1) )
+ {
+ rc = -EFAULT;
+ break;
+ }
+ if( hypercall_preempt_check() )
+ {
+ rc = -ERESTART;
+ break;
+ }
+ }
+ spin_unlock_irqrestore(&prv->lock, flags);
+ break;
+ case XEN_DOMCTL_SCHEDOP_putvcpuinfo:
+ spin_lock_irqsave(&prv->lock, flags);
+ for( index = 0; index < op->u.v.nr_vcpus; index++ )
+ {
+ if ( copy_from_guest_offset(&local_sched,
+ op->u.v.vcpus, index, 1) )
+ {
+ rc = -EFAULT;
+ break;
+ }
+ if ( local_sched.vcpuid >= d->max_vcpus
+ || d->vcpu[local_sched.vcpuid] == NULL )
+ {
+ rc = -EINVAL;
+ break;
+ }
+ svc = rt_vcpu(d->vcpu[local_sched.vcpuid]);
+ svc->period = MICROSECS(local_sched.s.rtds.period);
+ svc->budget = MICROSECS(local_sched.s.rtds.budget);
+ if( hypercall_preempt_check() )
+ {
+ rc = -ERESTART;
+ break;
+ }
}
spin_unlock_irqrestore(&prv->lock, flags);
break;
diff --git a/xen/common/schedule.c b/xen/common/schedule.c
index ecf1545..159425e 100644
--- a/xen/common/schedule.c
+++ b/xen/common/schedule.c
@@ -65,7 +65,6 @@ DEFINE_PER_CPU(struct schedule_data, schedule_data);
DEFINE_PER_CPU(struct scheduler *, scheduler);
static const struct scheduler *schedulers[] = {
- &sched_sedf_def,
&sched_credit_def,
&sched_credit2_def,
&sched_arinc653_def,
@@ -1054,7 +1053,9 @@ long sched_adjust(struct domain *d, struct
xen_domctl_scheduler_op *op)
if ( (op->sched_id != DOM2OP(d)->sched_id) ||
((op->cmd != XEN_DOMCTL_SCHEDOP_putinfo) &&
- (op->cmd != XEN_DOMCTL_SCHEDOP_getinfo)) )
+ (op->cmd != XEN_DOMCTL_SCHEDOP_getinfo) &&
+ (op->cmd != XEN_DOMCTL_SCHEDOP_putvcpuinfo) &&
+ (op->cmd != XEN_DOMCTL_SCHEDOP_getvcpuinfo)) )
return -EINVAL;
/* NB: the pluggable scheduler code needs to take care
diff --git a/xen/include/public/domctl.h b/xen/include/public/domctl.h
index bc45ea5..67a5626 100644
--- a/xen/include/public/domctl.h
+++ b/xen/include/public/domctl.h
@@ -330,31 +330,59 @@ DEFINE_XEN_GUEST_HANDLE(xen_domctl_max_vcpus_t);
#define XEN_SCHEDULER_ARINC653 7
#define XEN_SCHEDULER_RTDS 8
+typedef struct xen_domctl_sched_sedf {
+ uint64_aligned_t period;
+ uint64_aligned_t slice;
+ uint64_aligned_t latency;
+ uint32_t extratime;
+ uint32_t weight;
+} xen_domctl_sched_sedf_t;
+
+typedef struct xen_domctl_sched_credit {
+ uint16_t weight;
+ uint16_t cap;
+} xen_domctl_sched_credit_t;
+
+typedef struct xen_domctl_sched_credit2 {
+ uint16_t weight;
+} xen_domctl_sched_credit2_t;
+
+typedef struct xen_domctl_sched_rtds {
+ uint32_t period;
+ uint32_t budget;
+} xen_domctl_sched_rtds_t;
+
+typedef union xen_domctl_schedparam {
+ xen_domctl_sched_sedf_t sedf;
+ xen_domctl_sched_credit_t credit;
+ xen_domctl_sched_credit2_t credit2;
+ xen_domctl_sched_rtds_t rtds;
+} xen_domctl_schedparam_t;
+
+typedef struct xen_domctl_schedparam_vcpu {
+ union {
+ xen_domctl_sched_credit_t credit;
+ xen_domctl_sched_credit2_t credit2;
+ xen_domctl_sched_rtds_t rtds;
+ } s;
+ uint16_t vcpuid;
+} xen_domctl_schedparam_vcpu_t;
+DEFINE_XEN_GUEST_HANDLE(xen_domctl_schedparam_vcpu_t);
+
/* Set or get info? */
#define XEN_DOMCTL_SCHEDOP_putinfo 0
#define XEN_DOMCTL_SCHEDOP_getinfo 1
+#define XEN_DOMCTL_SCHEDOP_putvcpuinfo 2
+#define XEN_DOMCTL_SCHEDOP_getvcpuinfo 3
struct xen_domctl_scheduler_op {
uint32_t sched_id; /* XEN_SCHEDULER_* */
uint32_t cmd; /* XEN_DOMCTL_SCHEDOP_* */
union {
- struct xen_domctl_sched_sedf {
- uint64_aligned_t period;
- uint64_aligned_t slice;
- uint64_aligned_t latency;
- uint32_t extratime;
- uint32_t weight;
- } sedf;
- struct xen_domctl_sched_credit {
- uint16_t weight;
- uint16_t cap;
- } credit;
- struct xen_domctl_sched_credit2 {
- uint16_t weight;
- } credit2;
- struct xen_domctl_sched_rtds {
- uint32_t period;
- uint32_t budget;
- } rtds;
+ xen_domctl_schedparam_t d;
+ struct {
+ XEN_GUEST_HANDLE_64(xen_domctl_schedparam_vcpu_t) vcpus;
+ uint16_t nr_vcpus;
+ } v;
} u;
};
typedef struct xen_domctl_scheduler_op xen_domctl_scheduler_op_t;
--
1.9.1
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |