[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-changelog] [xen-3.2-testing] evtchn: Avoid spurious event-channel notifications across unbind/bind.
# HG changeset patch # User Keir Fraser <keir.fraser@xxxxxxxxxx> # Date 1217499863 -3600 # Node ID a4bd5dee83e024070abcd1e7b28e1eef5adfa14f # Parent 7651b1d93492647dad389cbbd448b17c7c4d13ca evtchn: Avoid spurious event-channel notifications across unbind/bind. Signed-off-by: Keir Fraser <keir.fraser@xxxxxxxxxx> Signed-off-by: Huacai Chen <huacai.chen@xxxxxxxxx> xen-unstable changeset: 18182:482c16b55c28cd8dfc7b6dc93d0987f0b7bed42d xen-unstable date: Thu Jul 31 11:13:30 2008 +0100 --- xen/arch/ia64/xen/irq.c | 3 +-- xen/arch/x86/irq.c | 3 +-- xen/common/domain.c | 2 ++ xen/common/event_channel.c | 37 ++++++++++++++++++++++++++++++------- xen/include/xen/irq.h | 2 +- xen/include/xen/sched.h | 2 ++ xen/include/xen/spinlock.h | 4 ++-- 7 files changed, 39 insertions(+), 14 deletions(-) diff -r 7651b1d93492 -r a4bd5dee83e0 xen/arch/ia64/xen/irq.c --- a/xen/arch/ia64/xen/irq.c Wed Jul 30 15:25:59 2008 +0100 +++ b/xen/arch/ia64/xen/irq.c Thu Jul 31 11:24:23 2008 +0100 @@ -459,7 +459,7 @@ int pirq_guest_bind(struct vcpu *v, int return rc; } -int pirq_guest_unbind(struct domain *d, int irq) +void pirq_guest_unbind(struct domain *d, int irq) { irq_desc_t *desc = &irq_desc[irq]; irq_guest_action_t *action; @@ -493,7 +493,6 @@ int pirq_guest_unbind(struct domain *d, } spin_unlock_irqrestore(&desc->lock, flags); - return 0; } void diff -r 7651b1d93492 -r a4bd5dee83e0 xen/arch/x86/irq.c --- a/xen/arch/x86/irq.c Wed Jul 30 15:25:59 2008 +0100 +++ b/xen/arch/x86/irq.c Thu Jul 31 11:24:23 2008 +0100 @@ -537,7 +537,7 @@ int pirq_guest_bind(struct vcpu *v, int return rc; } -int pirq_guest_unbind(struct domain *d, int irq) +void pirq_guest_unbind(struct domain *d, int irq) { unsigned int vector = irq_to_vector(irq); irq_desc_t *desc = &irq_desc[vector]; @@ -620,7 +620,6 @@ int pirq_guest_unbind(struct domain *d, out: spin_unlock_irqrestore(&desc->lock, flags); - return 0; } extern void dump_ioapic_irq_info(void); diff -r 7651b1d93492 -r a4bd5dee83e0 xen/common/domain.c --- a/xen/common/domain.c Wed Jul 30 15:25:59 2008 +0100 +++ b/xen/common/domain.c Thu Jul 31 11:24:23 2008 +0100 @@ -147,6 +147,8 @@ struct vcpu *alloc_vcpu( v->runstate.state = is_idle_vcpu(v) ? RUNSTATE_running : RUNSTATE_offline; v->runstate.state_entry_time = NOW(); + spin_lock_init(&v->virq_lock); + if ( !is_idle_domain(d) ) { set_bit(_VPF_down, &v->pause_flags); diff -r 7651b1d93492 -r a4bd5dee83e0 xen/common/event_channel.c --- a/xen/common/event_channel.c Wed Jul 30 15:25:59 2008 +0100 +++ b/xen/common/event_channel.c Thu Jul 31 11:24:23 2008 +0100 @@ -381,14 +381,18 @@ static long __evtchn_close(struct domain break; case ECS_PIRQ: - if ( (rc = pirq_guest_unbind(d1, chn1->u.pirq)) == 0 ) - d1->pirq_to_evtchn[chn1->u.pirq] = 0; + pirq_guest_unbind(d1, chn1->u.pirq); + d1->pirq_to_evtchn[chn1->u.pirq] = 0; break; case ECS_VIRQ: for_each_vcpu ( d1, v ) - if ( v->virq_to_evtchn[chn1->u.virq] == port1 ) - v->virq_to_evtchn[chn1->u.virq] = 0; + { + if ( v->virq_to_evtchn[chn1->u.virq] != port1 ) + continue; + v->virq_to_evtchn[chn1->u.virq] = 0; + spin_barrier(&v->virq_lock); + } break; case ECS_IPI: @@ -442,6 +446,9 @@ static long __evtchn_close(struct domain BUG(); } + /* Clear pending event to avoid unexpected behavior on re-bind. */ + clear_bit(port1, shared_info_addr(d1, evtchn_pending)); + /* Reset binding to vcpu0 when the channel is freed. */ chn1->state = ECS_FREE; chn1->notify_vcpu_id = 0; @@ -571,20 +578,27 @@ void evtchn_set_pending(struct vcpu *v, void send_guest_vcpu_virq(struct vcpu *v, int virq) -{ +{ + unsigned long flags; int port; ASSERT(!virq_is_global(virq)); + + spin_lock_irqsave(&v->virq_lock, flags); port = v->virq_to_evtchn[virq]; if ( unlikely(port == 0) ) - return; + goto out; evtchn_set_pending(v, port); + + out: + spin_unlock_irqrestore(&v->virq_lock, flags); } void send_guest_global_virq(struct domain *d, int virq) { + unsigned long flags; int port; struct vcpu *v; struct evtchn *chn; @@ -598,12 +612,17 @@ void send_guest_global_virq(struct domai if ( unlikely(v == NULL) ) return; + spin_lock_irqsave(&v->virq_lock, flags); + port = v->virq_to_evtchn[virq]; if ( unlikely(port == 0) ) - return; + goto out; chn = evtchn_from_port(d, port); evtchn_set_pending(d->vcpu[chn->notify_vcpu_id], port); + + out: + spin_unlock_irqrestore(&v->virq_lock, flags); } @@ -612,6 +631,10 @@ void send_guest_pirq(struct domain *d, i int port = d->pirq_to_evtchn[pirq]; struct evtchn *chn; + /* + * It should not be possible to race with __evtchn_close(): + * The caller of this function must synchronise with pirq_guest_unbind(). + */ ASSERT(port != 0); chn = evtchn_from_port(d, port); diff -r 7651b1d93492 -r a4bd5dee83e0 xen/include/xen/irq.h --- a/xen/include/xen/irq.h Wed Jul 30 15:25:59 2008 +0100 +++ b/xen/include/xen/irq.h Thu Jul 31 11:24:23 2008 +0100 @@ -76,7 +76,7 @@ extern int pirq_guest_eoi(struct domain extern int pirq_guest_eoi(struct domain *d, int irq); extern int pirq_guest_unmask(struct domain *d); extern int pirq_guest_bind(struct vcpu *v, int irq, int will_share); -extern int pirq_guest_unbind(struct domain *d, int irq); +extern void pirq_guest_unbind(struct domain *d, int irq); static inline void set_native_irq_info(int irq, cpumask_t mask) { diff -r 7651b1d93492 -r a4bd5dee83e0 xen/include/xen/sched.h --- a/xen/include/xen/sched.h Wed Jul 30 15:25:59 2008 +0100 +++ b/xen/include/xen/sched.h Thu Jul 31 11:24:23 2008 +0100 @@ -128,7 +128,9 @@ struct vcpu unsigned long pause_flags; atomic_t pause_count; + /* IRQ-safe virq_lock protects against delivering VIRQ to stale evtchn. */ u16 virq_to_evtchn[NR_VIRQS]; + spinlock_t virq_lock; /* Bitmask of CPUs on which this VCPU may run. */ cpumask_t cpu_affinity; diff -r 7651b1d93492 -r a4bd5dee83e0 xen/include/xen/spinlock.h --- a/xen/include/xen/spinlock.h Wed Jul 30 15:25:59 2008 +0100 +++ b/xen/include/xen/spinlock.h Thu Jul 31 11:24:23 2008 +0100 @@ -85,8 +85,8 @@ typedef struct { int gcc_is_buggy; } rwl /* Ensure a lock is quiescent between two critical operations. */ static inline void spin_barrier(spinlock_t *lock) { - spin_lock(lock); - spin_unlock(lock); + do { mb(); } while ( spin_is_locked(lock) ); + mb(); } #define DEFINE_SPINLOCK(x) spinlock_t x = SPIN_LOCK_UNLOCKED _______________________________________________ Xen-changelog mailing list Xen-changelog@xxxxxxxxxxxxxxxxxxx http://lists.xensource.com/xen-changelog
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |