diff -r 72470de157ce xen/common/sched_credit.c --- a/xen/common/sched_credit.c Wed Feb 16 09:49:33 2011 +0000 +++ b/xen/common/sched_credit.c Wed Feb 16 15:09:54 2011 +0100 @@ -1268,7 +1268,8 @@ csched_load_balance(struct csched_privat /* * Any work over there to steal? */ - speer = csched_runq_steal(peer_cpu, cpu, snext->pri); + speer = cpu_isset(peer_cpu, *online) ? + csched_runq_steal(peer_cpu, cpu, snext->pri) : NULL; pcpu_schedule_unlock(peer_cpu); if ( speer != NULL ) { diff -r 72470de157ce xen/common/schedule.c --- a/xen/common/schedule.c Wed Feb 16 09:49:33 2011 +0000 +++ b/xen/common/schedule.c Wed Feb 16 15:09:54 2011 +0100 @@ -395,7 +395,28 @@ static void vcpu_migrate(struct vcpu *v) unsigned long flags; int old_cpu, new_cpu; - vcpu_schedule_lock_irqsave(v, flags); + for (;;) + { + vcpu_schedule_lock_irqsave(v, flags); + + /* Select new CPU. */ + old_cpu = v->processor; + new_cpu = SCHED_OP(VCPU2OP(v), pick_cpu, v); + + if ( new_cpu == old_cpu ) + break; + + if ( !pcpu_schedule_trylock(new_cpu) ) + { + vcpu_schedule_unlock_irqrestore(v, flags); + continue; + } + if ( cpu_isset(new_cpu, v->domain->cpupool->cpu_valid) ) + break; + + pcpu_schedule_unlock(new_cpu); + vcpu_schedule_unlock_irqrestore(v, flags); + } /* * NB. Check of v->running happens /after/ setting migration flag @@ -405,13 +426,12 @@ static void vcpu_migrate(struct vcpu *v) if ( v->is_running || !test_and_clear_bit(_VPF_migrating, &v->pause_flags) ) { + if ( old_cpu != new_cpu ) + pcpu_schedule_unlock(new_cpu); + vcpu_schedule_unlock_irqrestore(v, flags); return; } - - /* Select new CPU. */ - old_cpu = v->processor; - new_cpu = SCHED_OP(VCPU2OP(v), pick_cpu, v); /* * Transfer urgency status to new CPU before switching CPUs, as once @@ -424,9 +444,13 @@ static void vcpu_migrate(struct vcpu *v) atomic_dec(&per_cpu(schedule_data, old_cpu).urgent_count); } - /* Switch to new CPU, then unlock old CPU. This is safe because + /* Switch to new CPU, then unlock new and old CPU. This is safe because * the lock pointer cant' change while the current lock is held. */ v->processor = new_cpu; + + if ( old_cpu != new_cpu ) + pcpu_schedule_unlock(new_cpu); + spin_unlock_irqrestore( per_cpu(schedule_data, old_cpu).schedule_lock, flags);