diff -r 297dffc6ca65 xen/arch/ia64/vmx/vmmu.c --- a/xen/arch/ia64/vmx/vmmu.c Tue Feb 09 14:45:09 2010 +0000 +++ b/xen/arch/ia64/vmx/vmmu.c Mon Feb 15 13:51:23 2010 +0000 @@ -394,7 +394,7 @@ if (cpu != current->processor) return; local_irq_save(flags); - if (!spin_trylock(&per_cpu(schedule_data, cpu).schedule_lock)) + if (!spin_trylock(per_cpu(schedule_data, cpu).schedule_lock)) goto bail2; if (v->processor != cpu) goto bail1; @@ -416,7 +416,7 @@ ia64_dv_serialize_data(); args->vcpu = NULL; bail1: - spin_unlock(&per_cpu(schedule_data, cpu).schedule_lock); + spin_unlock(per_cpu(schedule_data, cpu).schedule_lock); bail2: local_irq_restore(flags); } @@ -446,7 +446,7 @@ do { cpu = v->processor; if (cpu != current->processor) { - spin_barrier(&per_cpu(schedule_data, cpu).schedule_lock); + spin_barrier(per_cpu(schedule_data, cpu).schedule_lock); /* Flush VHPT on remote processors. */ smp_call_function_single(cpu, &ptc_ga_remote_func, &args, 1); } else { diff -r 297dffc6ca65 xen/common/sched_credit.c --- a/xen/common/sched_credit.c Tue Feb 09 14:45:09 2010 +0000 +++ b/xen/common/sched_credit.c Mon Feb 15 13:51:23 2010 +0000 @@ -770,7 +770,7 @@ spc->runq_sort_last = sort_epoch; - spin_lock_irqsave(&per_cpu(schedule_data, cpu).schedule_lock, flags); + spin_lock_irqsave(per_cpu(schedule_data, cpu).schedule_lock, flags); runq = &spc->runq; elem = runq->next; @@ -795,7 +795,7 @@ elem = next; } - spin_unlock_irqrestore(&per_cpu(schedule_data, cpu).schedule_lock, flags); + spin_unlock_irqrestore(per_cpu(schedule_data, cpu).schedule_lock, flags); } static void @@ -1110,7 +1110,7 @@ * cause a deadlock if the peer CPU is also load balancing and trying * to lock this CPU. */ - if ( !spin_trylock(&per_cpu(schedule_data, peer_cpu).schedule_lock) ) + if ( !spin_trylock(per_cpu(schedule_data, peer_cpu).schedule_lock) ) { CSCHED_STAT_CRANK(steal_trylock_failed); continue; @@ -1120,7 +1120,7 @@ * Any work over there to steal? */ speer = csched_runq_steal(peer_cpu, cpu, snext->pri); - spin_unlock(&per_cpu(schedule_data, peer_cpu).schedule_lock); + spin_unlock(per_cpu(schedule_data, peer_cpu).schedule_lock); if ( speer != NULL ) return speer; } diff -r 297dffc6ca65 xen/common/schedule.c --- a/xen/common/schedule.c Tue Feb 09 14:45:09 2010 +0000 +++ b/xen/common/schedule.c Mon Feb 15 13:51:23 2010 +0000 @@ -108,7 +108,7 @@ s_time_t delta; ASSERT(v->runstate.state != new_state); - ASSERT(spin_is_locked(&per_cpu(schedule_data,v->processor).schedule_lock)); + ASSERT(spin_is_locked(per_cpu(schedule_data,v->processor).schedule_lock)); trace_runstate_change(v, new_state); @@ -299,7 +299,7 @@ old_cpu = v->processor; v->processor = SCHED_OP(pick_cpu, v); spin_unlock_irqrestore( - &per_cpu(schedule_data, old_cpu).schedule_lock, flags); + per_cpu(schedule_data, old_cpu).schedule_lock, flags); /* Wake on new CPU. */ vcpu_wake(v); @@ -806,7 +806,7 @@ sd = &this_cpu(schedule_data); - spin_lock_irq(&sd->schedule_lock); + spin_lock_irq(sd->schedule_lock); stop_timer(&sd->s_timer); @@ -822,7 +822,7 @@ if ( unlikely(prev == next) ) { - spin_unlock_irq(&sd->schedule_lock); + spin_unlock_irq(sd->schedule_lock); trace_continue_running(next); return continue_running(prev); } @@ -850,7 +850,7 @@ ASSERT(!next->is_running); next->is_running = 1; - spin_unlock_irq(&sd->schedule_lock); + spin_unlock_irq(sd->schedule_lock); perfc_incr(sched_ctx); @@ -922,7 +922,9 @@ for_each_possible_cpu ( i ) { - spin_lock_init(&per_cpu(schedule_data, i).schedule_lock); + spin_lock_init(&per_cpu(schedule_data, i)._lock); + per_cpu(schedule_data, i).schedule_lock + = &per_cpu(schedule_data, i)._lock; init_timer(&per_cpu(schedule_data, i).s_timer, s_timer_fn, NULL, i); } @@ -956,10 +958,10 @@ for_each_online_cpu ( i ) { - spin_lock(&per_cpu(schedule_data, i).schedule_lock); + spin_lock(per_cpu(schedule_data, i).schedule_lock); printk("CPU[%02d] ", i); SCHED_OP(dump_cpu_state, i); - spin_unlock(&per_cpu(schedule_data, i).schedule_lock); + spin_unlock(per_cpu(schedule_data, i).schedule_lock); } local_irq_restore(flags); diff -r 297dffc6ca65 xen/include/xen/sched-if.h --- a/xen/include/xen/sched-if.h Tue Feb 09 14:45:09 2010 +0000 +++ b/xen/include/xen/sched-if.h Mon Feb 15 13:51:23 2010 +0000 @@ -10,8 +10,43 @@ #include +/* How do we check if the vcpu has migrated since we've grabbed teh lock? + * Have to add a runqueue ID? Still have to map vcpu to lock... + * + * When we need to lock: + * + When changing certain values in the vcpu struct + - runstate + . Including sleep + - Pause state... (vcpu_runnable)? + - v->processor + - v->is_running (Implicitly by grabbing schedule lock in schedule) + - v->affinity + - Anytime we want to avoid a running vcpu being scheduled out while we're doing something + . e.g., sched_adjust + + When scheduling + - Implicitly also covers is_running, runstate_change + + + For credit2: + - Updating runqueue, credits, &c + + * + * Ideas: + * + Pointer in the vcpu struct; check to see if it's changed since you grabbed it. + * - Big addition to struct + * - Can a pointer to a lock be protected by the lock it points to?!? + * + Lock by runq id, map cpu to runq (?) + * + Spinlock callback w/ vcpu pointer. + - Turns spinlocks into indirect function calls. + + Just do the same thing; it won't hurt to grab the same lock twice; if it does, + we can think about making the loop more efficient. + */ + +/* Idea: For cache betterness, keep the actual lock in the same cache area + * as the rest of the struct. Just have the scheduler point to the one it wants + * (This may be the one right in front of it).*/ struct schedule_data { - spinlock_t schedule_lock; /* spinlock protecting curr */ + spinlock_t *schedule_lock, + _lock; struct vcpu *curr; /* current task */ struct vcpu *idle; /* idle task for this cpu */ void *sched_priv; @@ -26,11 +61,19 @@ for ( ; ; ) { + /* NB: For schedulers with multiple cores per runqueue, + * a vcpu may change processor w/o changing runqueues; + * so we may release a lock only to grab it again. + * + * If that is measured to be an issue, then the check + * should be changed to checking if the locks pointed to + * by cpu and v->processor are still the same. + */ cpu = v->processor; - spin_lock(&per_cpu(schedule_data, cpu).schedule_lock); + spin_lock(per_cpu(schedule_data, cpu).schedule_lock); if ( likely(v->processor == cpu) ) break; - spin_unlock(&per_cpu(schedule_data, cpu).schedule_lock); + spin_unlock(per_cpu(schedule_data, cpu).schedule_lock); } } @@ -41,7 +84,7 @@ static inline void vcpu_schedule_unlock(struct vcpu *v) { - spin_unlock(&per_cpu(schedule_data, v->processor).schedule_lock); + spin_unlock(per_cpu(schedule_data, v->processor).schedule_lock); } #define vcpu_schedule_unlock_irq(v) \