[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-changelog] [xen stable-4.2] x86/S3: Restore broken vcpu affinity on resume
commit 77cb78851429a5a8509e3dfed466b2580ad5c60d Author: Ben Guthro <benjamin.guthro@xxxxxxxxxx> AuthorDate: Tue Apr 9 16:05:52 2013 +0200 Commit: Jan Beulich <jbeulich@xxxxxxxx> CommitDate: Tue Apr 9 16:05:52 2013 +0200 x86/S3: Restore broken vcpu affinity on resume When in SYS_STATE_suspend, and going through the cpu_disable_scheduler path, save a copy of the current cpu affinity, and mark a flag to restore it later. Later, in the resume process, when enabling nonboot cpus restore these affinities. Signed-off-by: Ben Guthro <benjamin.guthro@xxxxxxxxxx> Acked-by: George Dunlap <george.dunlap@xxxxxxxxxxxxx> Acked-by: Keir Fraser <keir@xxxxxxx> master commit: 41e71c2607e036f1ac00df898b8f4acb2d4df7ee master date: 2013-04-02 09:52:32 +0200 --- xen/arch/x86/acpi/power.c | 3 +++ xen/common/domain.c | 2 ++ xen/common/schedule.c | 45 ++++++++++++++++++++++++++++++++++++++++++--- xen/include/xen/sched.h | 6 ++++++ 4 files changed, 53 insertions(+), 3 deletions(-) diff --git a/xen/arch/x86/acpi/power.c b/xen/arch/x86/acpi/power.c index 9e1f989..72adc2f 100644 --- a/xen/arch/x86/acpi/power.c +++ b/xen/arch/x86/acpi/power.c @@ -96,7 +96,10 @@ static void thaw_domains(void) rcu_read_lock(&domlist_read_lock); for_each_domain ( d ) + { + restore_vcpu_affinity(d); domain_unpause(d); + } rcu_read_unlock(&domlist_read_lock); } diff --git a/xen/common/domain.c b/xen/common/domain.c index e728819..c09fb73 100644 --- a/xen/common/domain.c +++ b/xen/common/domain.c @@ -126,6 +126,7 @@ struct vcpu *alloc_vcpu( if ( !zalloc_cpumask_var(&v->cpu_affinity) || !zalloc_cpumask_var(&v->cpu_affinity_tmp) || + !zalloc_cpumask_var(&v->cpu_affinity_saved) || !zalloc_cpumask_var(&v->vcpu_dirty_cpumask) ) goto fail_free; @@ -155,6 +156,7 @@ struct vcpu *alloc_vcpu( fail_free: free_cpumask_var(v->cpu_affinity); free_cpumask_var(v->cpu_affinity_tmp); + free_cpumask_var(v->cpu_affinity_saved); free_cpumask_var(v->vcpu_dirty_cpumask); free_vcpu_struct(v); return NULL; diff --git a/xen/common/schedule.c b/xen/common/schedule.c index c2cd9d5..7f298d8 100644 --- a/xen/common/schedule.c +++ b/xen/common/schedule.c @@ -538,6 +538,38 @@ void vcpu_force_reschedule(struct vcpu *v) } } +void restore_vcpu_affinity(struct domain *d) +{ + struct vcpu *v; + + for_each_vcpu ( d, v ) + { + vcpu_schedule_lock_irq(v); + + if ( v->affinity_broken ) + { + printk(XENLOG_DEBUG "Restoring affinity for d%dv%d\n", + d->domain_id, v->vcpu_id); + cpumask_copy(v->cpu_affinity, v->cpu_affinity_saved); + v->affinity_broken = 0; + } + + if ( v->processor == smp_processor_id() ) + { + set_bit(_VPF_migrating, &v->pause_flags); + vcpu_schedule_unlock_irq(v); + vcpu_sleep_nosync(v); + vcpu_migrate(v); + } + else + { + vcpu_schedule_unlock_irq(v); + } + } + + domain_update_node_affinity(d); +} + /* * This function is used by cpu_hotplug code from stop_machine context * and from cpupools to switch schedulers on a cpu. @@ -551,7 +583,7 @@ int cpu_disable_scheduler(unsigned int cpu) int ret = 0; c = per_cpu(cpupool, cpu); - if ( (c == NULL) || (system_state == SYS_STATE_suspend) ) + if ( c == NULL ) return ret; for_each_domain_in_cpupool ( d, c ) @@ -564,8 +596,15 @@ int cpu_disable_scheduler(unsigned int cpu) if ( cpumask_empty(&online_affinity) && cpumask_test_cpu(cpu, v->cpu_affinity) ) { - printk("Breaking vcpu affinity for domain %d vcpu %d\n", - v->domain->domain_id, v->vcpu_id); + printk(XENLOG_DEBUG "Breaking affinity for d%dv%d\n", + d->domain_id, v->vcpu_id); + + if (system_state == SYS_STATE_suspend) + { + cpumask_copy(v->cpu_affinity_saved, v->cpu_affinity); + v->affinity_broken = 1; + } + cpumask_setall(v->cpu_affinity); } diff --git a/xen/include/xen/sched.h b/xen/include/xen/sched.h index 53804c8..b619269 100644 --- a/xen/include/xen/sched.h +++ b/xen/include/xen/sched.h @@ -140,6 +140,9 @@ struct vcpu bool_t defer_shutdown; /* VCPU is paused following shutdown request (d->is_shutting_down)? */ bool_t paused_for_shutdown; + /* VCPU need affinity restored */ + bool_t affinity_broken; + /* * > 0: a single port is being polled; @@ -162,6 +165,8 @@ struct vcpu cpumask_var_t cpu_affinity; /* Used to change affinity temporarily. */ cpumask_var_t cpu_affinity_tmp; + /* Used to restore affinity across S3. */ + cpumask_var_t cpu_affinity_saved; /* Bitmask of CPUs which are holding onto this VCPU's state. */ cpumask_var_t vcpu_dirty_cpumask; @@ -666,6 +671,7 @@ int schedule_cpu_switch(unsigned int cpu, struct cpupool *c); void vcpu_force_reschedule(struct vcpu *v); int cpu_disable_scheduler(unsigned int cpu); int vcpu_set_affinity(struct vcpu *v, const cpumask_t *affinity); +void restore_vcpu_affinity(struct domain *d); void vcpu_runstate_get(struct vcpu *v, struct vcpu_runstate_info *runstate); uint64_t get_cpu_idle_time(unsigned int cpu); -- generated by git-patchbot for /home/xen/git/xen.git#stable-4.2 _______________________________________________ Xen-changelog mailing list Xen-changelog@xxxxxxxxxxxxx http://lists.xensource.com/xen-changelog
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |