[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-changelog] [xen-unstable] cpupool: Fix CPU hotplug after recent changes.
# HG changeset patch # User Keir Fraser <keir.fraser@xxxxxxxxxx> # Date 1274118721 -3600 # Node ID e4028345ad48c442eb55b7bc08afdf1aede0aa2e # Parent 2a16128f17d884b87124eb159d4c4a0c34339d4e cpupool: Fix CPU hotplug after recent changes. Signed-off-by: Keir Fraser <keir.fraser@xxxxxxxxxx> --- xen/arch/x86/setup.c | 4 +- xen/common/cpupool.c | 54 ++++---------------------------- xen/common/sched_credit.c | 74 ++++----------------------------------------- xen/common/sched_credit2.c | 4 +- xen/common/schedule.c | 61 +++++++++++++++++++++---------------- xen/include/xen/sched-if.h | 4 +- xen/include/xen/sched.h | 4 +- 7 files changed, 59 insertions(+), 146 deletions(-) diff -r 2a16128f17d8 -r e4028345ad48 xen/arch/x86/setup.c --- a/xen/arch/x86/setup.c Mon May 17 18:51:29 2010 +0100 +++ b/xen/arch/x86/setup.c Mon May 17 18:52:01 2010 +0100 @@ -1019,14 +1019,14 @@ void __init __start_xen(unsigned long mb xsm_init(&initrdidx, mbi, initial_images_start); + timer_init(); + init_idle_domain(); trap_init(); rcu_init(); - timer_init(); - early_time_init(); arch_init_memory(); diff -r 2a16128f17d8 -r e4028345ad48 xen/common/cpupool.c --- a/xen/common/cpupool.c Mon May 17 18:51:29 2010 +0100 +++ b/xen/common/cpupool.c Mon May 17 18:52:01 2010 +0100 @@ -27,9 +27,6 @@ cpumask_t cpupool_free_cpus; static struct cpupool *cpupool_list; /* linked list, sorted by poolid */ -static int cpupool0_max_cpus; -integer_param("pool0_max_cpus", cpupool0_max_cpus); - static int cpupool_moving_cpu = -1; static struct cpupool *cpupool_cpu_moving = NULL; static cpumask_t cpupool_locked_cpus = CPU_MASK_NONE; @@ -110,7 +107,7 @@ struct cpupool *cpupool_create(int pooli } *q = c; c->cpupool_id = (poolid == CPUPOOLID_NONE) ? (last + 1) : poolid; - if ( schedule_init_global(sched, &(c->sched)) ) + if ( (c->sched = scheduler_alloc(sched)) == NULL ) { spin_unlock(&cpupool_lock); cpupool_destroy(c); @@ -119,7 +116,7 @@ struct cpupool *cpupool_create(int pooli spin_unlock(&cpupool_lock); printk("Created cpupool %d with scheduler %s (%s)\n", c->cpupool_id, - c->sched.name, c->sched.opt_name); + c->sched->name, c->sched->opt_name); return c; } @@ -147,7 +144,7 @@ int cpupool_destroy(struct cpupool *c) *q = c->next; spin_unlock(&cpupool_lock); printk(XENLOG_DEBUG "cpupool_destroy(pool=%d)\n", c->cpupool_id); - schedule_deinit_global(&(c->sched)); + scheduler_free(c->sched); free_cpupool_struct(c); return 0; } @@ -170,29 +167,6 @@ static int cpupool_assign_cpu_locked(str } cpu_set(cpu, c->cpu_valid); return 0; -} - -/* - * assign free physical cpus to a cpupool - * cpus assigned are unused cpus with lowest possible ids - * returns the number of cpus assigned - */ -int cpupool_assign_ncpu(struct cpupool *c, int ncpu) -{ - int i, n = 0; - - spin_lock(&cpupool_lock); - for_each_cpu_mask(i, cpupool_free_cpus) - { - if ( cpupool_assign_cpu_locked(c, i) == 0 ) - n++; - if ( n == ncpu ) - break; - } - spin_unlock(&cpupool_lock); - printk(XENLOG_DEBUG "cpupool_assign_ncpu(pool=%d,ncpu=%d) rc %d\n", - c->cpupool_id, ncpu, n); - return n; } static long cpupool_unassign_cpu_helper(void *info) @@ -352,8 +326,7 @@ static void cpupool_cpu_add(unsigned int spin_lock(&cpupool_lock); cpu_clear(cpu, cpupool_locked_cpus); cpu_set(cpu, cpupool_free_cpus); - if ( cpupool0 != NULL ) - cpupool_assign_cpu_locked(cpupool0, cpu); + cpupool_assign_cpu_locked(cpupool0, cpu); spin_unlock(&cpupool_lock); } @@ -426,7 +399,7 @@ int cpupool_do_sysctl(struct xen_sysctl_ if ( c == NULL ) break; op->cpupool_id = c->cpupool_id; - op->sched_id = c->sched.sched_id; + op->sched_id = c->sched->sched_id; op->n_dom = c->n_dom; ret = cpumask_to_xenctl_cpumap(&(op->cpumap), &(c->cpu_valid)); } @@ -599,26 +572,13 @@ static int __init cpupool_presmp_init(vo static int __init cpupool_presmp_init(void) { void *cpu = (void *)(long)smp_processor_id(); + cpupool0 = cpupool_create(0, NULL); + BUG_ON(cpupool0 == NULL); cpu_callback(&cpu_nfb, CPU_ONLINE, cpu); register_cpu_notifier(&cpu_nfb); return 0; } presmp_initcall(cpupool_presmp_init); - -static int __init cpupool_init(void) -{ - cpupool0 = cpupool_create(0, NULL); - BUG_ON(cpupool0 == NULL); - - if ( (cpupool0_max_cpus == 0) || (cpupool0_max_cpus > num_online_cpus()) ) - cpupool0_max_cpus = num_online_cpus(); - - if ( !cpupool_assign_ncpu(cpupool0, cpupool0_max_cpus) ) - BUG(); - - return 0; -} -__initcall(cpupool_init); /* * Local variables: diff -r 2a16128f17d8 -r e4028345ad48 xen/common/sched_credit.c --- a/xen/common/sched_credit.c Mon May 17 18:51:29 2010 +0100 +++ b/xen/common/sched_credit.c Mon May 17 18:52:01 2010 +0100 @@ -169,14 +169,7 @@ struct csched_private { uint32_t credit; int credit_balance; uint32_t runq_sort; - int ticker_active; }; - - -/* - * Global variables - */ -static struct csched_private *csched_priv0 = NULL; static void csched_tick(void *_cpu); static void csched_acct(void *dummy); @@ -351,17 +344,16 @@ csched_alloc_pdata(const struct schedule prv->credit += CSCHED_CREDITS_PER_ACCT; prv->ncpus++; cpu_set(cpu, prv->cpus); - if ( (prv->ncpus == 1) && (prv != csched_priv0) ) + if ( prv->ncpus == 1 ) { prv->master = cpu; - init_timer( &prv->master_ticker, csched_acct, prv, cpu); - prv->ticker_active = 2; + init_timer(&prv->master_ticker, csched_acct, prv, cpu); + set_timer(&prv->master_ticker, NOW() + + MILLISECS(CSCHED_MSECS_PER_TICK) * CSCHED_TICKS_PER_ACCT); } init_timer(&spc->ticker, csched_tick, (void *)(unsigned long)cpu, cpu); - - if ( prv == csched_priv0 ) - prv->master = first_cpu(prv->cpus); + set_timer(&spc->ticker, NOW() + MILLISECS(CSCHED_MSECS_PER_TICK)); INIT_LIST_HEAD(&spc->runq); spc->runq_sort_last = prv->runq_sort; @@ -1450,58 +1442,22 @@ csched_dump(const struct scheduler *ops) } static int -csched_init(struct scheduler *ops, int pool0) +csched_init(struct scheduler *ops) { struct csched_private *prv; prv = xmalloc(struct csched_private); if ( prv == NULL ) - return 1; + return -ENOMEM; + memset(prv, 0, sizeof(*prv)); - if ( pool0 ) - csched_priv0 = prv; ops->sched_data = prv; spin_lock_init(&prv->lock); INIT_LIST_HEAD(&prv->active_sdom); - prv->ncpus = 0; prv->master = UINT_MAX; - cpus_clear(prv->idlers); - prv->weight = 0U; - prv->credit = 0U; - prv->credit_balance = 0; - prv->runq_sort = 0U; - prv->ticker_active = (csched_priv0 == prv) ? 0 : 1; return 0; } - -/* Tickers cannot be kicked until SMP subsystem is alive. */ -static __init int csched_start_tickers(void) -{ - struct csched_pcpu *spc; - unsigned int cpu; - - /* Is the credit scheduler initialised? */ - if ( (csched_priv0 == NULL) || (csched_priv0->ncpus == 0) ) - return 0; - - csched_priv0->ticker_active = 1; - - for_each_online_cpu ( cpu ) - { - spc = CSCHED_PCPU(cpu); - set_timer(&spc->ticker, NOW() + MILLISECS(CSCHED_MSECS_PER_TICK)); - } - - init_timer( &csched_priv0->master_ticker, csched_acct, csched_priv0, - csched_priv0->master); - - set_timer( &csched_priv0->master_ticker, NOW() + - MILLISECS(CSCHED_MSECS_PER_TICK) * CSCHED_TICKS_PER_ACCT ); - - return 0; -} -__initcall(csched_start_tickers); static void csched_deinit(const struct scheduler *ops) @@ -1526,25 +1482,11 @@ static void csched_tick_resume(const str { struct csched_pcpu *spc; uint64_t now = NOW(); - struct csched_private *prv; - - prv = CSCHED_PRIV(ops); - if ( !prv->ticker_active ) - return; - spc = CSCHED_PCPU(cpu); set_timer(&spc->ticker, now + MILLISECS(CSCHED_MSECS_PER_TICK) - now % MILLISECS(CSCHED_MSECS_PER_TICK) ); - - if ( (prv->ticker_active == 2) && (prv->master == cpu) ) - { - set_timer( &prv->master_ticker, now + - MILLISECS(CSCHED_MSECS_PER_TICK) * CSCHED_TICKS_PER_ACCT - - now % MILLISECS(CSCHED_MSECS_PER_TICK) * CSCHED_TICKS_PER_ACCT); - prv->ticker_active = 1; - } } static struct csched_private _csched_priv; diff -r 2a16128f17d8 -r e4028345ad48 xen/common/sched_credit2.c --- a/xen/common/sched_credit2.c Mon May 17 18:51:29 2010 +0100 +++ b/xen/common/sched_credit2.c Mon May 17 18:52:01 2010 +0100 @@ -1134,7 +1134,7 @@ make_runq_map(struct csched_private *prv } static int -csched_init(struct scheduler *ops, int pool0) +csched_init(struct scheduler *ops) { int i; struct csched_private *prv; @@ -1145,7 +1145,7 @@ csched_init(struct scheduler *ops, int p prv = xmalloc(struct csched_private); if ( prv == NULL ) - return 1; + return -ENOMEM; memset(prv, 0, sizeof(*prv)); ops->sched_data = prv; diff -r 2a16128f17d8 -r e4028345ad48 xen/common/schedule.c --- a/xen/common/schedule.c Mon May 17 18:51:29 2010 +0100 +++ b/xen/common/schedule.c Mon May 17 18:52:01 2010 +0100 @@ -72,7 +72,7 @@ static struct scheduler __read_mostly op (( (opsptr)->fn != NULL ) ? (opsptr)->fn(opsptr, ##__VA_ARGS__ ) \ : (typeof((opsptr)->fn(opsptr, ##__VA_ARGS__)))0 ) -#define DOM2OP(_d) (((_d)->cpupool == NULL) ? &ops : &((_d)->cpupool->sched)) +#define DOM2OP(_d) (((_d)->cpupool == NULL) ? &ops : ((_d)->cpupool->sched)) #define VCPU2OP(_v) (DOM2OP((_v)->domain)) #define VCPU2ONLINE(_v) \ (((_v)->domain->cpupool == NULL) ? &cpu_online_map \ @@ -243,21 +243,21 @@ int sched_move_domain(struct domain *d, void **vcpu_priv; void *domdata; - domdata = SCHED_OP(&(c->sched), alloc_domdata, d); + domdata = SCHED_OP(c->sched, alloc_domdata, d); if ( domdata == NULL ) return -ENOMEM; vcpu_priv = xmalloc_array(void *, d->max_vcpus); if ( vcpu_priv == NULL ) { - SCHED_OP(&(c->sched), free_domdata, domdata); + SCHED_OP(c->sched, free_domdata, domdata); return -ENOMEM; } memset(vcpu_priv, 0, d->max_vcpus * sizeof(void *)); for_each_vcpu ( d, v ) { - vcpu_priv[v->vcpu_id] = SCHED_OP(&(c->sched), alloc_vdata, v, domdata); + vcpu_priv[v->vcpu_id] = SCHED_OP(c->sched, alloc_vdata, v, domdata); if ( vcpu_priv[v->vcpu_id] == NULL ) { for_each_vcpu ( d, v ) @@ -266,7 +266,7 @@ int sched_move_domain(struct domain *d, xfree(vcpu_priv[v->vcpu_id]); } xfree(vcpu_priv); - SCHED_OP(&(c->sched), free_domdata, domdata); + SCHED_OP(c->sched, free_domdata, domdata); return -ENOMEM; } } @@ -1133,7 +1133,7 @@ void __init scheduler_init(void) if ( strcmp(ops.opt_name, opt_sched) == 0 ) break; } - + if ( schedulers[i] == NULL ) { printk("Could not find scheduler: %s\n", opt_sched); @@ -1144,23 +1144,21 @@ void __init scheduler_init(void) register_cpu_notifier(&cpu_nfb); printk("Using scheduler: %s (%s)\n", ops.name, ops.opt_name); - if ( SCHED_OP(&ops, init, 1) ) + if ( SCHED_OP(&ops, init) ) panic("scheduler returned error on init\n"); } -/* switch scheduler on cpu */ void schedule_cpu_switch(unsigned int cpu, struct cpupool *c) { unsigned long flags; struct vcpu *v; - void *vpriv = NULL; - void *ppriv; - void *ppriv_old; - struct scheduler *old_ops; - struct scheduler *new_ops; - - old_ops = per_cpu(scheduler, cpu); - new_ops = (c == NULL) ? &ops : &(c->sched); + void *ppriv, *ppriv_old, *vpriv = NULL; + struct scheduler *old_ops = per_cpu(scheduler, cpu); + struct scheduler *new_ops = (c == NULL) ? &ops : c->sched; + + if ( old_ops == new_ops ) + return; + v = per_cpu(schedule_data, cpu).idle; ppriv = SCHED_OP(new_ops, alloc_pdata, cpu); if ( c != NULL ) @@ -1192,11 +1190,14 @@ void schedule_cpu_switch(unsigned int cp SCHED_OP(old_ops, free_pdata, ppriv_old, cpu); } -/* init scheduler global data */ -int schedule_init_global(char *name, struct scheduler *sched) +struct scheduler *scheduler_alloc(char *name) { int i; const struct scheduler *data; + struct scheduler *sched; + + if ( name == NULL ) + return &ops; data = &ops; for ( i = 0; (schedulers[i] != NULL) && (name != NULL) ; i++ ) @@ -1207,14 +1208,24 @@ int schedule_init_global(char *name, str break; } } + + if ( (sched = xmalloc(struct scheduler)) == NULL ) + return NULL; memcpy(sched, data, sizeof(*sched)); - return SCHED_OP(sched, init, 0); -} - -/* deinitialize scheduler global data */ -void schedule_deinit_global(struct scheduler *sched) -{ + if ( SCHED_OP(sched, init) != 0 ) + { + xfree(sched); + sched = NULL; + } + + return sched; +} + +void scheduler_free(struct scheduler *sched) +{ + BUG_ON(sched == &ops); SCHED_OP(sched, deinit); + xfree(sched); } void schedule_dump(struct cpupool *c) @@ -1223,7 +1234,7 @@ void schedule_dump(struct cpupool *c) struct scheduler *sched; cpumask_t *cpus; - sched = (c == NULL) ? &ops : &(c->sched); + sched = (c == NULL) ? &ops : c->sched; cpus = (c == NULL) ? &cpupool_free_cpus : &c->cpu_valid; printk("Scheduler: %s (%s)\n", sched->name, sched->opt_name); SCHED_OP(sched, dump_settings); diff -r 2a16128f17d8 -r e4028345ad48 xen/include/xen/sched-if.h --- a/xen/include/xen/sched-if.h Mon May 17 18:51:29 2010 +0100 +++ b/xen/include/xen/sched-if.h Mon May 17 18:52:01 2010 +0100 @@ -89,7 +89,7 @@ struct scheduler { unsigned int sched_id; /* ID for this scheduler */ void *sched_data; /* global data pointer */ - int (*init) (struct scheduler *, int); + int (*init) (struct scheduler *); void (*deinit) (const struct scheduler *); void (*free_vdata) (const struct scheduler *, void *); @@ -131,7 +131,7 @@ struct cpupool cpumask_t cpu_valid; /* all cpus assigned to pool */ struct cpupool *next; unsigned int n_dom; - struct scheduler sched; + struct scheduler *sched; }; const struct scheduler *scheduler_get_by_id(unsigned int id); diff -r 2a16128f17d8 -r e4028345ad48 xen/include/xen/sched.h --- a/xen/include/xen/sched.h Mon May 17 18:51:29 2010 +0100 +++ b/xen/include/xen/sched.h Mon May 17 18:52:01 2010 +0100 @@ -581,8 +581,8 @@ void cpu_init(void); struct scheduler; -int schedule_init_global(char *name, struct scheduler *sched); -void schedule_deinit_global(struct scheduler *sched); +struct scheduler *scheduler_alloc(char *name); +void scheduler_free(struct scheduler *sched); void schedule_cpu_switch(unsigned int cpu, struct cpupool *c); void vcpu_force_reschedule(struct vcpu *v); int cpu_disable_scheduler(unsigned int cpu); _______________________________________________ Xen-changelog mailing list Xen-changelog@xxxxxxxxxxxxxxxxxxx http://lists.xensource.com/xen-changelog
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |