[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [PATCH v2 6/8] evtchn: convert vIRQ lock to an r/w one
There's no need to serialize all sending of vIRQ-s; all that's needed is serialization against the closing of the respective event channels (so far by means of a barrier). To facilitate the conversion, switch to an ordinary write locked region in evtchn_close(). Signed-off-by: Jan Beulich <jbeulich@xxxxxxxx> --- v2: Don't introduce/use rw_barrier() here. Add comment to evtchn_bind_virq(). Re-base. --- a/xen/common/domain.c +++ b/xen/common/domain.c @@ -160,7 +160,7 @@ struct vcpu *vcpu_create(struct domain * v->vcpu_id = vcpu_id; v->dirty_cpu = VCPU_CPU_CLEAN; - spin_lock_init(&v->virq_lock); + rwlock_init(&v->virq_lock); tasklet_init(&v->continue_hypercall_tasklet, NULL, NULL); --- a/xen/common/event_channel.c +++ b/xen/common/event_channel.c @@ -449,6 +449,13 @@ int evtchn_bind_virq(evtchn_bind_virq_t spin_unlock_irqrestore(&chn->lock, flags); + /* + * If by any, the update of virq_to_evtchn[] would need guarding by + * virq_lock, but since this is the last action here, there's no strict + * need to acquire the lock. Hnece holding event_lock isn't helpful + * anymore at this point, but utilize that its unlocking acts as the + * otherwise necessary smp_wmb() here. + */ v->virq_to_evtchn[virq] = bind->port = port; out: @@ -638,10 +645,10 @@ int evtchn_close(struct domain *d1, int case ECS_VIRQ: for_each_vcpu ( d1, v ) { - if ( v->virq_to_evtchn[chn1->u.virq] != port1 ) - continue; - v->virq_to_evtchn[chn1->u.virq] = 0; - spin_barrier(&v->virq_lock); + write_lock_irqsave(&v->virq_lock, flags); + if ( v->virq_to_evtchn[chn1->u.virq] == port1 ) + v->virq_to_evtchn[chn1->u.virq] = 0; + write_unlock_irqrestore(&v->virq_lock, flags); } break; @@ -797,7 +804,7 @@ void send_guest_vcpu_virq(struct vcpu *v ASSERT(!virq_is_global(virq)); - spin_lock_irqsave(&v->virq_lock, flags); + read_lock_irqsave(&v->virq_lock, flags); port = v->virq_to_evtchn[virq]; if ( unlikely(port == 0) ) @@ -807,7 +814,7 @@ void send_guest_vcpu_virq(struct vcpu *v evtchn_port_set_pending(d, v->vcpu_id, evtchn_from_port(d, port)); out: - spin_unlock_irqrestore(&v->virq_lock, flags); + read_unlock_irqrestore(&v->virq_lock, flags); } void send_guest_global_virq(struct domain *d, uint32_t virq) @@ -826,7 +833,7 @@ void send_guest_global_virq(struct domai if ( unlikely(v == NULL) ) return; - spin_lock_irqsave(&v->virq_lock, flags); + read_lock_irqsave(&v->virq_lock, flags); port = v->virq_to_evtchn[virq]; if ( unlikely(port == 0) ) @@ -836,7 +843,7 @@ void send_guest_global_virq(struct domai evtchn_port_set_pending(d, chn->notify_vcpu_id, chn); out: - spin_unlock_irqrestore(&v->virq_lock, flags); + read_unlock_irqrestore(&v->virq_lock, flags); } void send_guest_pirq(struct domain *d, const struct pirq *pirq) --- a/xen/include/xen/sched.h +++ b/xen/include/xen/sched.h @@ -235,7 +235,7 @@ struct vcpu /* IRQ-safe virq_lock protects against delivering VIRQ to stale evtchn. */ evtchn_port_t virq_to_evtchn[NR_VIRQS]; - spinlock_t virq_lock; + rwlock_t virq_lock; /* Tasklet for continue_hypercall_on_cpu(). */ struct tasklet continue_hypercall_tasklet;
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |