[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-changelog] Introduce a locking protocol for acquiring the 'scheduler
# HG changeset patch # User kaf24@xxxxxxxxxxxxxxxxxxxx # Node ID c1840ac1f05d32fb6c4b39253b53c301167c2e75 # Parent 0c5980d0bf20f3806a3aaaeb0e0bf87ac1671f38 Introduce a locking protocol for acquiring the 'scheduler lock' on a particular VCPU. Since this requires acquiring the approrpiate per-CPU lock, we must re-check the VCPU's current CPU binding after the lock is acquired. Signed-off-by: Keir Fraser <keir@xxxxxxxxxxxxx> diff -r 0c5980d0bf20 -r c1840ac1f05d xen/common/sched_bvt.c --- a/xen/common/sched_bvt.c Fri Jan 13 15:27:45 2006 +++ b/xen/common/sched_bvt.c Fri Jan 13 15:44:04 2006 @@ -98,9 +98,9 @@ static void warp_timer_fn(void *data) { struct bvt_dom_info *inf = data; - unsigned int cpu = inf->domain->vcpu[0]->processor; - - spin_lock_irq(&schedule_data[cpu].schedule_lock); + struct vcpu *v = inf->domain->vcpu[0]; + + vcpu_schedule_lock_irq(v); inf->warp = 0; @@ -108,28 +108,28 @@ if ( inf->warpu == 0 ) { inf->warpback = 0; - cpu_raise_softirq(cpu, SCHEDULE_SOFTIRQ); + cpu_raise_softirq(v->processor, SCHEDULE_SOFTIRQ); } set_timer(&inf->unwarp_timer, NOW() + inf->warpu); - spin_unlock_irq(&schedule_data[cpu].schedule_lock); + vcpu_schedule_unlock_irq(v); } static void unwarp_timer_fn(void *data) { struct bvt_dom_info *inf = data; - unsigned int cpu = inf->domain->vcpu[0]->processor; - - spin_lock_irq(&schedule_data[cpu].schedule_lock); + struct vcpu *v = inf->domain->vcpu[0]; + + vcpu_schedule_lock_irq(v); if ( inf->warpback ) { inf->warp = 1; - cpu_raise_softirq(cpu, SCHEDULE_SOFTIRQ); + cpu_raise_softirq(v->processor, SCHEDULE_SOFTIRQ); } - spin_unlock_irq(&schedule_data[cpu].schedule_lock); + vcpu_schedule_unlock_irq(v); } static inline u32 calc_avt(struct vcpu *d, s_time_t now) diff -r 0c5980d0bf20 -r c1840ac1f05d xen/common/schedule.c --- a/xen/common/schedule.c Fri Jan 13 15:27:45 2006 +++ b/xen/common/schedule.c Fri Jan 13 15:44:04 2006 @@ -165,10 +165,10 @@ { unsigned long flags; - spin_lock_irqsave(&schedule_data[v->processor].schedule_lock, flags); + vcpu_schedule_lock_irqsave(v, flags); if ( likely(!vcpu_runnable(v)) ) SCHED_OP(sleep, v); - spin_unlock_irqrestore(&schedule_data[v->processor].schedule_lock, flags); + vcpu_schedule_unlock_irqrestore(v, flags); TRACE_2D(TRC_SCHED_SLEEP, v->domain->domain_id, v->vcpu_id); } @@ -187,13 +187,13 @@ { unsigned long flags; - spin_lock_irqsave(&schedule_data[v->processor].schedule_lock, flags); + vcpu_schedule_lock_irqsave(v, flags); if ( likely(vcpu_runnable(v)) ) { SCHED_OP(wake, v); v->wokenup = NOW(); } - spin_unlock_irqrestore(&schedule_data[v->processor].schedule_lock, flags); + vcpu_schedule_unlock_irqrestore(v, flags); TRACE_2D(TRC_SCHED_WAKE, v->domain->domain_id, v->vcpu_id); } @@ -324,7 +324,7 @@ for_each_vcpu ( d, v ) { if ( v == current ) - spin_lock_irq(&schedule_data[smp_processor_id()].schedule_lock); + vcpu_schedule_lock_irq(v); else vcpu_pause(v); } @@ -336,7 +336,7 @@ for_each_vcpu ( d, v ) { if ( v == current ) - spin_unlock_irq(&schedule_data[smp_processor_id()].schedule_lock); + vcpu_schedule_unlock_irq(v); else vcpu_unpause(v); } diff -r 0c5980d0bf20 -r c1840ac1f05d xen/include/xen/sched-if.h --- a/xen/include/xen/sched-if.h Fri Jan 13 15:27:45 2006 +++ b/xen/include/xen/sched-if.h Fri Jan 13 15:44:04 2006 @@ -16,16 +16,47 @@ struct vcpu *curr; /* current task */ struct vcpu *idle; /* idle task for this cpu */ void *sched_priv; - struct timer s_timer; /* scheduling timer */ + struct timer s_timer; /* scheduling timer */ unsigned long tick; /* current periodic 'tick' */ #ifdef BUCKETS u32 hist[BUCKETS]; /* for scheduler latency histogram */ #endif } __cacheline_aligned; +extern struct schedule_data schedule_data[]; + +static inline void vcpu_schedule_lock(struct vcpu *v) +{ + unsigned int cpu; + + for ( ; ; ) + { + cpu = v->processor; + spin_lock(&schedule_data[cpu].schedule_lock); + if ( likely(v->processor == cpu) ) + break; + spin_unlock(&schedule_data[cpu].schedule_lock); + } +} + +#define vcpu_schedule_lock_irq(v) \ + do { local_irq_disable(); vcpu_schedule_lock(v); } while ( 0 ) +#define vcpu_schedule_lock_irqsave(v, flags) \ + do { local_irq_save(flags); vcpu_schedule_lock(v); } while ( 0 ) + +static inline void vcpu_schedule_unlock(struct vcpu *v) +{ + spin_unlock(&schedule_data[v->processor].schedule_lock); +} + +#define vcpu_schedule_unlock_irq(v) \ + do { vcpu_schedule_unlock(v); local_irq_enable(); } while ( 0 ) +#define vcpu_schedule_unlock_irqrestore(v, flags) \ + do { vcpu_schedule_unlock(v); local_irq_restore(flags); } while ( 0 ) + struct task_slice { struct vcpu *task; - s_time_t time; + s_time_t time; }; struct scheduler { @@ -48,6 +79,4 @@ void (*dump_cpu_state) (int); }; -extern struct schedule_data schedule_data[]; - #endif /* __XEN_SCHED_IF_H__ */ _______________________________________________ Xen-changelog mailing list Xen-changelog@xxxxxxxxxxxxxxxxxxx http://lists.xensource.com/xen-changelog
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |