diff --git a/xen/common/cpupool.c b/xen/common/cpupool.c index e79850b..8e7b723 100644 --- a/xen/common/cpupool.c +++ b/xen/common/cpupool.c @@ -261,19 +261,13 @@ int cpupool_move_domain(struct domain *d, struct cpupool *c) static int cpupool_assign_cpu_locked(struct cpupool *c, unsigned int cpu) { int ret; - struct cpupool *old; struct domain *d; if ( (cpupool_moving_cpu == cpu) && (c != cpupool_cpu_moving) ) return -EBUSY; - old = per_cpu(cpupool, cpu); - per_cpu(cpupool, cpu) = c; ret = schedule_cpu_switch(cpu, c); if ( ret ) - { - per_cpu(cpupool, cpu) = old; return ret; - } cpumask_clear_cpu(cpu, &cpupool_free_cpus); if (cpupool_moving_cpu == cpu) @@ -326,7 +320,6 @@ static long cpupool_unassign_cpu_helper(void *info) cpumask_clear_cpu(cpu, &cpupool_free_cpus); goto out; } - per_cpu(cpupool, cpu) = NULL; cpupool_moving_cpu = -1; cpupool_put(cpupool_cpu_moving); cpupool_cpu_moving = NULL; diff --git a/xen/common/schedule.c b/xen/common/schedule.c index d7e2d98..9072540 100644 --- a/xen/common/schedule.c +++ b/xen/common/schedule.c @@ -1486,6 +1486,17 @@ void __init scheduler_init(void) BUG(); } +/* + * Move a pCPU outside of the influence of the scheduler of its current + * cpupool, or subject it to the scheduler of a new cpupool. + * + * For the pCPUs that are removed from their cpupool, their scheduler becomes + * &ops (the default scheduler, selected at boot, which also services the + * default cpupool). However, as these pCPUs are not really part of any pool, + * there won't be any scheduling event on them, not even from the default + * scheduler. Basically, they will just sit idle until they are explicitly + * added back to a cpupool. + */ int schedule_cpu_switch(unsigned int cpu, struct cpupool *c) { unsigned long flags; @@ -1494,9 +1505,24 @@ int schedule_cpu_switch(unsigned int cpu, struct cpupool *c) void *ppriv, *ppriv_old, *vpriv, *vpriv_old; struct scheduler *old_ops = per_cpu(scheduler, cpu); struct scheduler *new_ops = (c == NULL) ? &ops : c->sched; + struct cpupool *old_pool = per_cpu(cpupool, cpu); + + /* + * pCPUs only move from a valid cpupool to free (i.e., out of any pool), + * or from free to a valid cpupool. In the former case (which happens when + * c is NULL), we want the CPU to have been marked as free already, as + * well as to not be valid for the source pool any longer, when we get to + * here. In the latter case (which happens when c is a valid cpupool), we + * want the CPU to still be marked as free, as well as to not yet be valid + * for the destination pool. + */ + ASSERT(c != old_pool && (c != NULL || old_pool != NULL)); + ASSERT(cpumask_test_cpu(cpu, &cpupool_free_cpus)); + ASSERT((c == NULL && !cpumask_test_cpu(cpu, old_pool->cpu_valid)) || + (c != NULL && !cpumask_test_cpu(cpu, c->cpu_valid))); if ( old_ops == new_ops ) - return 0; + goto out; idle = idle_vcpu[cpu]; ppriv = SCHED_OP(new_ops, alloc_pdata, cpu); @@ -1524,6 +1550,9 @@ int schedule_cpu_switch(unsigned int cpu, struct cpupool *c) SCHED_OP(old_ops, free_vdata, vpriv_old); SCHED_OP(old_ops, free_pdata, ppriv_old, cpu); + out: + per_cpu(cpupool, cpu) = c; + return 0; }