This lock is only intended to prevent racing with closing an event channel - evtchn_set_pending() doesn't require external serialization. Signed-off-by: Jan Beulich --- a/xen/common/domain.c +++ b/xen/common/domain.c @@ -147,7 +147,7 @@ struct vcpu *alloc_vcpu( v->domain = d; v->vcpu_id = vcpu_id; - spin_lock_init(&v->virq_lock); + rwlock_init(&v->virq_lock); tasklet_init(&v->continue_hypercall_tasklet, NULL, 0); --- a/xen/common/event_channel.c +++ b/xen/common/event_channel.c @@ -417,7 +417,7 @@ static long __evtchn_close(struct domain if ( v->virq_to_evtchn[chn1->u.virq] != port1 ) continue; v->virq_to_evtchn[chn1->u.virq] = 0; - spin_barrier_irq(&v->virq_lock); + write_barrier_irq(&v->virq_lock); } break; @@ -613,12 +613,11 @@ int guest_enabled_event(struct vcpu *v, void send_guest_vcpu_virq(struct vcpu *v, int virq) { - unsigned long flags; int port; ASSERT(!virq_is_global(virq)); - spin_lock_irqsave(&v->virq_lock, flags); + read_lock(&v->virq_lock); port = v->virq_to_evtchn[virq]; if ( unlikely(port == 0) ) @@ -627,12 +626,11 @@ void send_guest_vcpu_virq(struct vcpu *v evtchn_set_pending(v, port); out: - spin_unlock_irqrestore(&v->virq_lock, flags); + read_unlock(&v->virq_lock); } void send_guest_global_virq(struct domain *d, int virq) { - unsigned long flags; int port; struct vcpu *v; struct evtchn *chn; @@ -646,7 +644,7 @@ void send_guest_global_virq(struct domai if ( unlikely(v == NULL) ) return; - spin_lock_irqsave(&v->virq_lock, flags); + read_lock(&v->virq_lock); port = v->virq_to_evtchn[virq]; if ( unlikely(port == 0) ) @@ -656,7 +654,7 @@ void send_guest_global_virq(struct domai evtchn_set_pending(d->vcpu[chn->notify_vcpu_id], port); out: - spin_unlock_irqrestore(&v->virq_lock, flags); + read_unlock(&v->virq_lock); } int send_guest_pirq(struct domain *d, int pirq) --- a/xen/common/spinlock.c +++ b/xen/common/spinlock.c @@ -344,6 +344,20 @@ int _rw_is_write_locked(rwlock_t *lock) return _raw_rw_is_write_locked(&lock->raw); } +void _write_barrier(rwlock_t *lock) +{ + check_lock(&lock->debug); + do { mb(); } while ( _raw_rw_is_locked(&lock->raw) ); +} + +void _write_barrier_irq(rwlock_t *lock) +{ + unsigned long flags; + local_irq_save(flags); + _write_barrier(lock); + local_irq_restore(flags); +} + #ifdef LOCK_PROFILE struct lock_profile_anc { --- a/xen/include/xen/sched.h +++ b/xen/include/xen/sched.h @@ -157,9 +157,12 @@ struct vcpu unsigned long pause_flags; atomic_t pause_count; - /* IRQ-safe virq_lock protects against delivering VIRQ to stale evtchn. */ + /* + * Writer-side-IRQ-safe virq_lock protects against delivering VIRQ + * to stale evtchn. + */ u16 virq_to_evtchn[NR_VIRQS]; - spinlock_t virq_lock; + rwlock_t virq_lock; /* Bitmask of CPUs on which this VCPU may run. */ cpumask_t cpu_affinity; --- a/xen/include/xen/spinlock.h +++ b/xen/include/xen/spinlock.h @@ -170,6 +170,9 @@ void _write_unlock_irqrestore(rwlock_t * int _rw_is_locked(rwlock_t *lock); int _rw_is_write_locked(rwlock_t *lock); +void _write_barrier(rwlock_t *lock); +void _write_barrier_irq(rwlock_t *lock); + #define spin_lock(l) _spin_lock(l) #define spin_lock_irq(l) _spin_lock_irq(l) #define spin_lock_irqsave(l, f) ((f) = _spin_lock_irqsave(l)) @@ -223,4 +226,7 @@ int _rw_is_write_locked(rwlock_t *lock); #define rw_is_locked(l) _rw_is_locked(l) #define rw_is_write_locked(l) _rw_is_write_locked(l) +#define write_barrier(l) _write_barrier(l) +#define write_barrier_irq(l) _write_barrier_irq(l) + #endif /* __SPINLOCK_H__ */