diff -r 7ada6faef565 xen/common/cpupool.c --- a/xen/common/cpupool.c Sun Feb 06 17:26:31 2011 +0000 +++ b/xen/common/cpupool.c Mon Feb 07 14:26:50 2011 +0100 @@ -35,7 +35,7 @@ static DEFINE_SPINLOCK(cpupool_lock); DEFINE_PER_CPU(struct cpupool *, cpupool); -#define cpupool_dprintk(x...) ((void)0) +#define cpupool_dprintk(x...) printk(x) static struct cpupool *alloc_cpupool_struct(void) { @@ -227,14 +227,30 @@ static int cpupool_assign_cpu_locked(str return 0; } +static long cpupool_assign_cpu_helper(void *info) +{ + int cpu = cpupool_moving_cpu; + long ret; + + cpupool_dprintk("cpupool_assign_cpu(pool=%d,cpu=%d)\n", + cpupool_cpu_moving->cpupool_id, cpu); + BUG_ON(!is_idle_vcpu(current)); + BUG_ON(cpu != smp_processor_id()); + spin_lock(&cpupool_lock); + ret = cpupool_assign_cpu_locked(cpupool_cpu_moving, cpu); + spin_unlock(&cpupool_lock); + return ret; +} + static long cpupool_unassign_cpu_helper(void *info) { int cpu = cpupool_moving_cpu; long ret; - cpupool_dprintk("cpupool_unassign_cpu(pool=%d,cpu=%d) ret %ld\n", - cpupool_id, cpu, ret); - + cpupool_dprintk("cpupool_unassign_cpu(pool=%d,cpu=%d)\n", + cpupool_cpu_moving->cpupool_id, cpu); + BUG_ON(!is_idle_vcpu(current)); + BUG_ON(cpu != smp_processor_id()); spin_lock(&cpupool_lock); ret = cpu_disable_scheduler(cpu); cpu_set(cpu, cpupool_free_cpus); @@ -258,9 +274,51 @@ out: } /* + * assign a specific cpu to a cpupool + * we must be sure to run on the cpu to be assigned in idle! to achieve this + * the main functionality is performed via continue_hypercall_on_cpu on the + * specific cpu. + * possible failures: + * - cpu not free + * - cpu just being unplugged + */ +int cpupool_assign_cpu(struct cpupool *c, unsigned int cpu) +{ + int ret; + + cpupool_dprintk("cpupool_assign_cpu(pool=%d,cpu=%d)\n", + c->cpupool_id, cpu); + + spin_lock(&cpupool_lock); + ret = -EBUSY; + if ( (cpupool_moving_cpu != -1) && (cpu != cpupool_moving_cpu) ) + goto out; + if ( cpu_isset(cpu, cpupool_locked_cpus) ) + goto out; + + ret = 0; + if ( !cpu_isset(cpu, cpupool_free_cpus) && (cpu != cpupool_moving_cpu) ) + goto out; + + cpupool_moving_cpu = cpu; + atomic_inc(&c->refcnt); + cpupool_cpu_moving = c; + cpu_clear(cpu, c->cpu_valid); + spin_unlock(&cpupool_lock); + + return continue_hypercall_on_cpu(cpu, cpupool_assign_cpu_helper, c); + +out: + spin_unlock(&cpupool_lock); + cpupool_dprintk("cpupool_assign_cpu(pool=%d,cpu=%d) ret %d\n", + c->cpupool_id, cpu, ret); + return ret; +} + +/* * unassign a specific cpu from a cpupool - * we must be sure not to run on the cpu to be unassigned! to achieve this - * the main functionality is performed via continue_hypercall_on_cpu on a + * we must be sure to run on the cpu to be unassigned in idle! to achieve this + * the main functionality is performed via continue_hypercall_on_cpu on the * specific cpu. * if the cpu to be removed is the last one of the cpupool no active domain * must be bound to the cpupool. dying domains are moved to cpupool0 as they @@ -271,7 +329,6 @@ out: */ int cpupool_unassign_cpu(struct cpupool *c, unsigned int cpu) { - int work_cpu; int ret; struct domain *d; @@ -319,19 +376,12 @@ int cpupool_unassign_cpu(struct cpupool cpu_clear(cpu, c->cpu_valid); spin_unlock(&cpupool_lock); - work_cpu = smp_processor_id(); - if ( work_cpu == cpu ) - { - work_cpu = first_cpu(cpupool0->cpu_valid); - if ( work_cpu == cpu ) - work_cpu = next_cpu(cpu, cpupool0->cpu_valid); - } - return continue_hypercall_on_cpu(work_cpu, cpupool_unassign_cpu_helper, c); + return continue_hypercall_on_cpu(cpu, cpupool_unassign_cpu_helper, c); out: spin_unlock(&cpupool_lock); cpupool_dprintk("cpupool_unassign_cpu(pool=%d,cpu=%d) ret %d\n", - cpupool_id, cpu, ret); + c->cpupool_id, cpu, ret); return ret; } @@ -345,7 +395,7 @@ int cpupool_add_domain(struct domain *d, { struct cpupool *c; int rc = 1; - int n_dom; + int n_dom = 0; if ( poolid == CPUPOOLID_NONE ) return 0; @@ -472,27 +522,15 @@ int cpupool_do_sysctl(struct xen_sysctl_ { unsigned cpu; + c = __cpupool_get_by_id(op->cpupool_id, 0); + ret = -ENOENT; + if ( c == NULL ) + break; cpu = op->cpu; - cpupool_dprintk("cpupool_assign_cpu(pool=%d,cpu=%d)\n", - op->cpupool_id, cpu); - spin_lock(&cpupool_lock); if ( cpu == XEN_SYSCTL_CPUPOOL_PAR_ANY ) cpu = first_cpu(cpupool_free_cpus); - ret = -EINVAL; - if ( cpu >= NR_CPUS ) - goto addcpu_out; - ret = -EBUSY; - if ( !cpu_isset(cpu, cpupool_free_cpus) ) - goto addcpu_out; - c = cpupool_find_by_id(op->cpupool_id, 0); - ret = -ENOENT; - if ( c == NULL ) - goto addcpu_out; - ret = cpupool_assign_cpu_locked(c, cpu); - addcpu_out: - spin_unlock(&cpupool_lock); - cpupool_dprintk("cpupool_assign_cpu(pool=%d,cpu=%d) ret %d\n", - op->cpupool_id, cpu, ret); + ret = (cpu < NR_CPUS) ? cpupool_assign_cpu(c, cpu) : -EINVAL; + cpupool_put(c); } break;