diff --git a/xen/common/sched_credit.c b/xen/common/sched_credit.c index 9bc638c09c..6e886bdfbb 100644 --- a/xen/common/sched_credit.c +++ b/xen/common/sched_credit.c @@ -867,6 +867,17 @@ _csched_cpu_pick(const struct scheduler *ops, struct vcpu *vc, bool_t commit) return cpu; } +static void +csched_vcpu_migrate(const struct scheduler *ops, struct vcpu *vc, + unsigned int new_cpu) +{ + BUG_ON(vc->is_running); + BUG_ON(test_bit(_VPF_migrating, &vc->pause_flags)); + BUG_ON(CSCHED_VCPU(vc) == CSCHED_VCPU(curr_on_cpu(vc->processor))); + BUG_ON(__vcpu_on_runq(CSCHED_VCPU(vc))); + vc->processor = new_cpu; +} + static int csched_cpu_pick(const struct scheduler *ops, struct vcpu *vc) { @@ -1086,6 +1097,18 @@ csched_vcpu_remove(const struct scheduler *ops, struct vcpu *vc) BUG_ON( sdom == NULL ); } +static int +csched_vcpu_onrunq(const struct scheduler *ops, struct vcpu *vc) +{ + return __vcpu_on_runq(CSCHED_VCPU(vc)); +} + +static int +csched_vcpu_csflags(const struct scheduler *ops, struct vcpu *vc) +{ + return CSCHED_VCPU(vc)->flags; +} + static void csched_vcpu_sleep(const struct scheduler *ops, struct vcpu *vc) { @@ -2278,8 +2301,12 @@ static const struct scheduler sched_credit_def = { .adjust_global = csched_sys_cntl, .pick_cpu = csched_cpu_pick, + .migrate = csched_vcpu_migrate, .do_schedule = csched_schedule, + .onrunq = csched_vcpu_onrunq, + .csflags = csched_vcpu_csflags, + .dump_cpu_state = csched_dump_pcpu, .dump_settings = csched_dump, .init = csched_init, diff --git a/xen/common/schedule.c b/xen/common/schedule.c index 343ab6306e..2b98b38e6b 100644 --- a/xen/common/schedule.c +++ b/xen/common/schedule.c @@ -1554,7 +1554,34 @@ void context_saved(struct vcpu *prev) SCHED_OP(vcpu_scheduler(prev), context_saved, prev); if ( unlikely(prev->pause_flags & VPF_migrating) ) + { + /* + * If someone (e.g., vcpu_set_affinity()) has set VPF_migrating + * on prev in between when schedule() releases the scheduler + * lock and here, we need to make sure we properly mark the + * vcpu as not runnable (and all it comes with that), with + * vcpu_sleep_nosync(), before calling vcpu_migrate(). + */ + //vcpu_sleep_nosync(prev); + unsigned long flags; + spinlock_t *lock = vcpu_schedule_lock_irqsave(prev, &flags); + + if (vcpu_runnable(prev) || !test_bit(_VPF_migrating, &prev->pause_flags)) + printk("CPU %u: d%uv%d isr=%u runnbl=%d proc=%d pf=%lu orq=%d csf=%u\n", + smp_processor_id(), prev->domain->domain_id, prev->vcpu_id, + prev->is_running, vcpu_runnable(prev), + prev->processor, prev->pause_flags, + SCHED_OP(vcpu_scheduler(prev), onrunq, prev), + SCHED_OP(vcpu_scheduler(prev), csflags, prev)); + if ( prev->runstate.state == RUNSTATE_runnable ) + vcpu_runstate_change(prev, RUNSTATE_offline, NOW()); + BUG_ON(curr_on_cpu(prev->processor) == prev); + SCHED_OP(vcpu_scheduler(prev), sleep, prev); + + vcpu_schedule_unlock_irqrestore(lock, flags, prev); + vcpu_migrate(prev); + } } /* The scheduler timer: force a run through the scheduler */ diff --git a/xen/include/xen/sched-if.h b/xen/include/xen/sched-if.h index 9596eae1e2..97b6461106 100644 --- a/xen/include/xen/sched-if.h +++ b/xen/include/xen/sched-if.h @@ -160,6 +160,9 @@ struct scheduler { void (*insert_vcpu) (const struct scheduler *, struct vcpu *); void (*remove_vcpu) (const struct scheduler *, struct vcpu *); + int (*onrunq) (const struct scheduler *, struct vcpu *); + int (*csflags) (const struct scheduler *, struct vcpu *); + void (*sleep) (const struct scheduler *, struct vcpu *); void (*wake) (const struct scheduler *, struct vcpu *); void (*yield) (const struct scheduler *, struct vcpu *);