[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-changelog] [xen-unstable] credit2: Detect socket layout and assign one runqueue per socket
# HG changeset patch # User Keir Fraser <keir@xxxxxxx> # Date 1293179367 0 # Node ID 597e3fee23bceb489fbab4aea3c74e32c369e2ba # Parent 3e7702cb31dbc72d88d894d443088f55b3119e9d credit2: Detect socket layout and assign one runqueue per socket Because alloc_pdata() is called before the cpu layout information is available, we grab a callback to the newly-created CPU_STARTING notifier. cpu 0 doesn't get a callback, so we simply hard-code it to runqueue 0. Signed-off-by: George Dunlap <george.dunlap@xxxxxxxxxxxxx> --- xen/common/sched_credit2.c | 69 +++++++++++++++++++++++++++++++++++++++++++-- 1 files changed, 66 insertions(+), 3 deletions(-) diff -r 3e7702cb31db -r 597e3fee23bc xen/common/sched_credit2.c --- a/xen/common/sched_credit2.c Fri Dec 24 08:29:00 2010 +0000 +++ b/xen/common/sched_credit2.c Fri Dec 24 08:29:27 2010 +0000 @@ -24,6 +24,7 @@ #include <asm/atomic.h> #include <xen/errno.h> #include <xen/trace.h> +#include <xen/cpu.h> #if __i386__ #define PRI_stime "lld" @@ -712,13 +713,15 @@ csched_vcpu_insert(const struct schedule printk("%s: Inserting d%dv%d\n", __func__, dom->domain_id, vc->vcpu_id); + /* NB: On boot, idle vcpus are inserted before alloc_pdata() has + * been called for that cpu. + */ if ( ! is_idle_vcpu(vc) ) { /* FIXME: Do we need the private lock here? */ list_add_tail(&svc->sdom_elem, &svc->sdom->vcpu); /* Add vcpu to runqueue of initial processor */ - /* FIXME: Abstract for multiple runqueues */ vcpu_schedule_lock_irq(vc); runq_assign(ops, vc); @@ -1462,6 +1465,20 @@ static void init_pcpu(const struct sched /* Figure out which runqueue to put it in */ rqi = 0; + /* Figure out which runqueue to put it in */ + /* NB: cpu 0 doesn't get a STARTING callback, so we hard-code it to runqueue 0. */ + if ( cpu == 0 ) + rqi = 0; + else + rqi = cpu_to_socket(cpu); + + if ( rqi < 0 ) + { + printk("%s: cpu_to_socket(%d) returned %d!\n", + __func__, cpu, rqi); + BUG(); + } + rqd=prv->rqd + rqi; printk("Adding cpu %d to runqueue %d\n", cpu, rqi); @@ -1495,7 +1512,13 @@ static void * static void * csched_alloc_pdata(const struct scheduler *ops, int cpu) { - init_pcpu(ops, cpu); + /* Check to see if the cpu is online yet */ + /* Note: cpu 0 doesn't get a STARTING callback */ + if ( cpu == 0 || cpu_to_socket(cpu) >= 0 ) + init_pcpu(ops, cpu); + else + printk("%s: cpu %d not online yet, deferring initializatgion\n", + __func__, cpu); return (void *)1; } @@ -1543,6 +1566,41 @@ csched_free_pdata(const struct scheduler } static int +csched_cpu_starting(int cpu) +{ + struct scheduler *ops; + + /* Hope this is safe from cpupools switching things around. :-) */ + ops = per_cpu(scheduler, cpu); + + init_pcpu(ops, cpu); + + return NOTIFY_DONE; +} + +static int cpu_credit2_callback( + struct notifier_block *nfb, unsigned long action, void *hcpu) +{ + unsigned int cpu = (unsigned long)hcpu; + int rc = 0; + + switch ( action ) + { + case CPU_STARTING: + csched_cpu_starting(cpu); + break; + default: + break; + } + + return !rc ? NOTIFY_DONE : notifier_from_errno(rc); +} + +static struct notifier_block cpu_credit2_nfb = { + .notifier_call = cpu_credit2_callback +}; + +static int csched_init(struct scheduler *ops) { int i; @@ -1551,15 +1609,20 @@ csched_init(struct scheduler *ops) printk("Initializing Credit2 scheduler\n" \ " WARNING: This is experimental software in development.\n" \ " Use at your own risk.\n"); + + /* Basically no CPU information is available at this point; just + * set up basic structures, and a callback when the CPU info is + * available. */ prv = xmalloc(struct csched_private); if ( prv == NULL ) return -ENOMEM; memset(prv, 0, sizeof(*prv)); ops->sched_data = prv; - spin_lock_init(&prv->lock); INIT_LIST_HEAD(&prv->sdom); + + register_cpu_notifier(&cpu_credit2_nfb); /* But un-initialize all runqueues */ for ( i=0; i<NR_CPUS; i++) _______________________________________________ Xen-changelog mailing list Xen-changelog@xxxxxxxxxxxxxxxxxxx http://lists.xensource.com/xen-changelog
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |