[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-changelog] [xen-unstable] Merge?
# HG changeset patch # User Christian Limpach <Christian.Limpach@xxxxxxxxxxxxx> # Node ID 8c29bf7ede67044bc8364e7a5c203b90281f6fd6 # Parent 043f8799e68ea90fe607af631f99bdfb2638512a # Parent 8860eba3dcad1bba929d3703a9ee603c197a402a Merge? --- xen/common/domctl.c | 43 ++++++++++++-------------------------- xen/common/sched_credit.c | 43 -------------------------------------- xen/common/sched_sedf.c | 15 ------------- xen/common/schedule.c | 50 ++++++++++++++++++++++++++++++++++++++++++++- xen/include/xen/sched-if.h | 2 - xen/include/xen/sched.h | 17 +++++++++++---- 6 files changed, 76 insertions(+), 94 deletions(-) diff -r 043f8799e68e -r 8c29bf7ede67 xen/common/domctl.c --- a/xen/common/domctl.c Thu Sep 21 19:33:51 2006 +0100 +++ b/xen/common/domctl.c Thu Sep 21 19:34:45 2006 +0100 @@ -356,37 +356,20 @@ long do_domctl(XEN_GUEST_HANDLE(xen_domc struct vcpu *v; cpumask_t new_affinity; + ret = -ESRCH; if ( d == NULL ) - { - ret = -ESRCH; - break; - } - - if ( (op->u.vcpuaffinity.vcpu >= MAX_VIRT_CPUS) || - !d->vcpu[op->u.vcpuaffinity.vcpu] ) - { - ret = -EINVAL; - put_domain(d); - break; - } - - v = d->vcpu[op->u.vcpuaffinity.vcpu]; - if ( v == NULL ) - { - ret = -ESRCH; - put_domain(d); - break; - } + break; + + ret = -EINVAL; + if ( op->u.vcpuaffinity.vcpu >= MAX_VIRT_CPUS ) + goto vcpuaffinity_out; + + ret = -ESRCH; + if ( (v = d->vcpu[op->u.vcpuaffinity.vcpu]) == NULL ) + goto vcpuaffinity_out; if ( op->cmd == XEN_DOMCTL_setvcpuaffinity ) { - if ( v == current ) - { - ret = -EINVAL; - put_domain(d); - break; - } - xenctl_cpumap_to_cpumask( &new_affinity, &op->u.vcpuaffinity.cpumap); ret = vcpu_set_affinity(v, &new_affinity); @@ -395,8 +378,10 @@ long do_domctl(XEN_GUEST_HANDLE(xen_domc { cpumask_to_xenctl_cpumap( &op->u.vcpuaffinity.cpumap, &v->cpu_affinity); - } - + ret = 0; + } + + vcpuaffinity_out: put_domain(d); } break; diff -r 043f8799e68e -r 8c29bf7ede67 xen/common/sched_credit.c --- a/xen/common/sched_credit.c Thu Sep 21 19:33:51 2006 +0100 +++ b/xen/common/sched_credit.c Thu Sep 21 19:34:45 2006 +0100 @@ -572,47 +572,6 @@ csched_vcpu_wake(struct vcpu *vc) } static int -csched_vcpu_set_affinity(struct vcpu *vc, cpumask_t *affinity) -{ - unsigned long flags; - int lcpu; - - if ( vc == current ) - { - /* No locking needed but also can't move on the spot... */ - if ( !cpu_isset(vc->processor, *affinity) ) - return -EBUSY; - - vc->cpu_affinity = *affinity; - } - else - { - /* Pause, modify, and unpause. */ - vcpu_pause(vc); - - vc->cpu_affinity = *affinity; - if ( !cpu_isset(vc->processor, vc->cpu_affinity) ) - { - /* - * We must grab the scheduler lock for the CPU currently owning - * this VCPU before changing its ownership. - */ - vcpu_schedule_lock_irqsave(vc, flags); - lcpu = vc->processor; - - vc->processor = first_cpu(vc->cpu_affinity); - - spin_unlock_irqrestore(&per_cpu(schedule_data, lcpu).schedule_lock, - flags); - } - - vcpu_unpause(vc); - } - - return 0; -} - -static int csched_dom_cntl( struct domain *d, struct xen_domctl_scheduler_op *op) @@ -1227,8 +1186,6 @@ struct scheduler sched_credit_def = { .sleep = csched_vcpu_sleep, .wake = csched_vcpu_wake, - .set_affinity = csched_vcpu_set_affinity, - .adjust = csched_dom_cntl, .tick = csched_tick, diff -r 043f8799e68e -r 8c29bf7ede67 xen/common/sched_sedf.c --- a/xen/common/sched_sedf.c Thu Sep 21 19:33:51 2006 +0100 +++ b/xen/common/sched_sedf.c Thu Sep 21 19:34:45 2006 +0100 @@ -1175,20 +1175,6 @@ void sedf_wake(struct vcpu *d) } -static int sedf_set_affinity(struct vcpu *v, cpumask_t *affinity) -{ - if ( v == current ) - return cpu_isset(v->processor, *affinity) ? 0 : -EBUSY; - - vcpu_pause(v); - v->cpu_affinity = *affinity; - v->processor = first_cpu(v->cpu_affinity); - vcpu_unpause(v); - - return 0; -} - - /* Print a lot of useful information about a domains in the system */ static void sedf_dump_domain(struct vcpu *d) { @@ -1449,7 +1435,6 @@ struct scheduler sched_sedf_def = { .sleep = sedf_sleep, .wake = sedf_wake, .adjust = sedf_adjust, - .set_affinity = sedf_set_affinity }; /* diff -r 043f8799e68e -r 8c29bf7ede67 xen/common/schedule.c --- a/xen/common/schedule.c Thu Sep 21 19:33:51 2006 +0100 +++ b/xen/common/schedule.c Thu Sep 21 19:34:45 2006 +0100 @@ -181,15 +181,56 @@ void vcpu_wake(struct vcpu *v) TRACE_2D(TRC_SCHED_WAKE, v->domain->domain_id, v->vcpu_id); } +static void vcpu_migrate(struct vcpu *v) +{ + cpumask_t online_affinity; + unsigned long flags; + int old_cpu; + + vcpu_schedule_lock_irqsave(v, flags); + + if ( test_bit(_VCPUF_running, &v->vcpu_flags) || + !test_and_clear_bit(_VCPUF_migrating, &v->vcpu_flags) ) + { + vcpu_schedule_unlock_irqrestore(v, flags); + return; + } + + /* Switch to new CPU, then unlock old CPU. */ + old_cpu = v->processor; + cpus_and(online_affinity, v->cpu_affinity, cpu_online_map); + v->processor = first_cpu(online_affinity); + spin_unlock_irqrestore( + &per_cpu(schedule_data, old_cpu).schedule_lock, flags); + + /* Wake on new CPU. */ + vcpu_wake(v); +} + int vcpu_set_affinity(struct vcpu *v, cpumask_t *affinity) { cpumask_t online_affinity; + unsigned long flags; cpus_and(online_affinity, *affinity, cpu_online_map); if ( cpus_empty(online_affinity) ) return -EINVAL; - return SCHED_OP(set_affinity, v, affinity); + vcpu_schedule_lock_irqsave(v, flags); + + v->cpu_affinity = *affinity; + if ( !cpu_isset(v->processor, v->cpu_affinity) ) + set_bit(_VCPUF_migrating, &v->vcpu_flags); + + vcpu_schedule_unlock_irqrestore(v, flags); + + if ( test_bit(_VCPUF_migrating, &v->vcpu_flags) ) + { + vcpu_sleep_nosync(v); + vcpu_migrate(v); + } + + return 0; } /* Block the currently-executing domain until a pertinent event occurs. */ @@ -555,6 +596,13 @@ static void __enter_scheduler(void) context_switch(prev, next); } +void context_saved(struct vcpu *prev) +{ + clear_bit(_VCPUF_running, &prev->vcpu_flags); + + if ( unlikely(test_bit(_VCPUF_migrating, &prev->vcpu_flags)) ) + vcpu_migrate(prev); +} /**************************************************************************** * Timers: the scheduler utilises a number of timers diff -r 043f8799e68e -r 8c29bf7ede67 xen/include/xen/sched-if.h --- a/xen/include/xen/sched-if.h Thu Sep 21 19:33:51 2006 +0100 +++ b/xen/include/xen/sched-if.h Thu Sep 21 19:34:45 2006 +0100 @@ -69,8 +69,6 @@ struct scheduler { void (*sleep) (struct vcpu *); void (*wake) (struct vcpu *); - int (*set_affinity) (struct vcpu *, cpumask_t *); - struct task_slice (*do_schedule) (s_time_t); int (*adjust) (struct domain *, diff -r 043f8799e68e -r 8c29bf7ede67 xen/include/xen/sched.h --- a/xen/include/xen/sched.h Thu Sep 21 19:33:51 2006 +0100 +++ b/xen/include/xen/sched.h Thu Sep 21 19:34:45 2006 +0100 @@ -312,7 +312,7 @@ extern void context_switch( * saved to memory. Alternatively, if implementing lazy context switching, * ensure that invoking sync_vcpu_execstate() will switch and commit @prev. */ -#define context_saved(prev) (clear_bit(_VCPUF_running, &(prev)->vcpu_flags)) +extern void context_saved(struct vcpu *prev); /* Called by the scheduler to continue running the current VCPU. */ extern void continue_running( @@ -386,9 +386,12 @@ extern struct domain *domain_list; /* VCPU is paused by the hypervisor? */ #define _VCPUF_paused 11 #define VCPUF_paused (1UL<<_VCPUF_paused) -/* VCPU is blocked awaiting an event to be consumed by Xen. */ + /* VCPU is blocked awaiting an event to be consumed by Xen. */ #define _VCPUF_blocked_in_xen 12 #define VCPUF_blocked_in_xen (1UL<<_VCPUF_blocked_in_xen) + /* VCPU affinity has changed: migrating to a new CPU. */ +#define _VCPUF_migrating 13 +#define VCPUF_migrating (1UL<<_VCPUF_migrating) /* * Per-domain flags (domain_flags). @@ -418,9 +421,15 @@ static inline int vcpu_runnable(struct v static inline int vcpu_runnable(struct vcpu *v) { return ( !(v->vcpu_flags & - (VCPUF_blocked|VCPUF_down|VCPUF_paused|VCPUF_blocked_in_xen)) && + ( VCPUF_blocked | + VCPUF_down | + VCPUF_paused | + VCPUF_blocked_in_xen | + VCPUF_migrating )) && !(v->domain->domain_flags & - (DOMF_shutdown|DOMF_ctrl_pause|DOMF_paused)) ); + ( DOMF_shutdown | + DOMF_ctrl_pause | + DOMF_paused ))); } void vcpu_pause(struct vcpu *v); _______________________________________________ Xen-changelog mailing list Xen-changelog@xxxxxxxxxxxxxxxxxxx http://lists.xensource.com/xen-changelog
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |