|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH] xen/sched: fix locking in sched_tick_[suspend|resume]()
sched_tick_suspend() and sched_tick_resume() should not call the
scheduler specific timer handlers in case the cpu they are running on
is just being moved to or from a cpupool.
Use a new percpu lock for that purpose.
Reported-by: Sergey Dyasli <sergey.dyasli@xxxxxxxxxx>
Signed-off-by: Juergen Gross <jgross@xxxxxxxx>
---
To be applied on top of my core scheduling series.
---
xen/common/schedule.c | 27 +++++++++++++++++++++++++++
1 file changed, 27 insertions(+)
diff --git a/xen/common/schedule.c b/xen/common/schedule.c
index 217fcb09ce..744f8cb5db 100644
--- a/xen/common/schedule.c
+++ b/xen/common/schedule.c
@@ -68,6 +68,9 @@ cpumask_t sched_res_mask;
/* Common lock for free cpus. */
static DEFINE_SPINLOCK(sched_free_cpu_lock);
+/* Lock for guarding per-scheduler calls against scheduler changes on a cpu. */
+static DEFINE_PER_CPU(spinlock_t, sched_cpu_lock);
+
/* Various timer handlers. */
static void s_timer_fn(void *unused);
static void vcpu_periodic_timer_fn(void *data);
@@ -2472,6 +2475,8 @@ static int cpu_schedule_up(unsigned int cpu)
if ( sr == NULL )
return -ENOMEM;
+ spin_lock_init(&per_cpu(sched_cpu_lock, cpu));
+
sr->master_cpu = cpu;
cpumask_copy(sr->cpus, cpumask_of(cpu));
set_sched_res(cpu, sr);
@@ -2763,11 +2768,14 @@ int schedule_cpu_add(unsigned int cpu, struct cpupool
*c)
struct scheduler *new_ops = c->sched;
struct sched_resource *sr;
spinlock_t *old_lock, *new_lock;
+ spinlock_t *cpu_lock = &per_cpu(sched_cpu_lock, cpu);
unsigned long flags;
int ret = 0;
rcu_read_lock(&sched_res_rculock);
+ spin_lock(cpu_lock);
+
sr = get_sched_res(cpu);
ASSERT(cpumask_test_cpu(cpu, &cpupool_free_cpus));
@@ -2879,6 +2887,8 @@ int schedule_cpu_add(unsigned int cpu, struct cpupool *c)
cpu_raise_softirq(cpu, SCHEDULE_SOFTIRQ);
out:
+ spin_unlock(cpu_lock);
+
rcu_read_unlock(&sched_res_rculock);
return ret;
@@ -2897,12 +2907,15 @@ int schedule_cpu_rm(unsigned int cpu)
struct sched_unit *unit;
struct scheduler *old_ops;
spinlock_t *old_lock;
+ spinlock_t *cpu_lock = &per_cpu(sched_cpu_lock, cpu);
unsigned long flags;
int idx, ret = -ENOMEM;
unsigned int cpu_iter;
rcu_read_lock(&sched_res_rculock);
+ spin_lock(cpu_lock);
+
sr = get_sched_res(cpu);
old_ops = sr->scheduler;
@@ -3004,6 +3017,8 @@ int schedule_cpu_rm(unsigned int cpu)
sr->cpupool = NULL;
out:
+ spin_unlock(cpu_lock);
+
rcu_read_unlock(&sched_res_rculock);
xfree(sr_new);
@@ -3084,11 +3099,17 @@ void sched_tick_suspend(void)
{
struct scheduler *sched;
unsigned int cpu = smp_processor_id();
+ spinlock_t *lock = &per_cpu(sched_cpu_lock, cpu);
rcu_read_lock(&sched_res_rculock);
+ spin_lock(lock);
+
sched = get_sched_res(cpu)->scheduler;
sched_do_tick_suspend(sched, cpu);
+
+ spin_unlock(lock);
+
rcu_idle_enter(cpu);
rcu_idle_timer_start();
@@ -3099,14 +3120,20 @@ void sched_tick_resume(void)
{
struct scheduler *sched;
unsigned int cpu = smp_processor_id();
+ spinlock_t *lock = &per_cpu(sched_cpu_lock, cpu);
rcu_read_lock(&sched_res_rculock);
rcu_idle_timer_stop();
rcu_idle_exit(cpu);
+
+ spin_lock(lock);
+
sched = get_sched_res(cpu)->scheduler;
sched_do_tick_resume(sched, cpu);
+ spin_unlock(lock);
+
rcu_read_unlock(&sched_res_rculock);
}
--
2.16.4
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxxx
https://lists.xenproject.org/mailman/listinfo/xen-devel
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |