[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Xen-devel] [PATCH] convert vcpu's virq_lock to rwlock


  • To: Jan Beulich <JBeulich@xxxxxxxxxx>, "xen-devel@xxxxxxxxxxxxxxxxxxx" <xen-devel@xxxxxxxxxxxxxxxxxxx>
  • From: Keir Fraser <keir@xxxxxxx>
  • Date: Sat, 26 Mar 2011 09:29:00 +0000
  • Cc:
  • Delivery-date: Sat, 26 Mar 2011 02:30:03 -0700
  • Domainkey-signature: a=rsa-sha1; c=nofws; d=gmail.com; s=gamma; h=sender:user-agent:date:subject:from:to:message-id:thread-topic :thread-index:in-reply-to:mime-version:content-type :content-transfer-encoding; b=qxfdUy/PayfP6maDCe55KsM4xenDkovBh/kXPiabshPSPs1FkFi30JV2N4YgZUC1Rq JWl3tBfzgt7txmGvsJ6sF9CjPriH5nUPdHum2oQfCBPzvbGDB/XyESvC1udQ1Do/g1Ph 8+KHVP6ZwWpLKxcu0Ns50FSUQdZw5nFB69pGs=
  • List-id: Xen developer discussion <xen-devel.lists.xensource.com>
  • Thread-index: AcvrmDzuGqwuOMyQGkK57H18rKxkWw==
  • Thread-topic: [Xen-devel] [PATCH] convert vcpu's virq_lock to rwlock

On 25/03/2011 16:53, "Jan Beulich" <JBeulich@xxxxxxxxxx> wrote:

> This lock is only intended to prevent racing with closing an event
> channel - evtchn_set_pending() doesn't require external serialization.

Still a small critical section, I doubt the serialisation matters compared
with the cost of an extra LOCKed operation on read_unlock(). Especially
since the lock is per-vcpu anyway and the only at-all common VIRQ is
VIRQ_TIMER. The probability of unnecessary serialisation being a bottleneck
here is basically zero.

We can get rid of the local_irq_save/restore operations though (at the cost
of changing the lock debug checks in spinlock.c because the send_*_virq
functions can be called both with IRQs disabled and enabled, so as-is the
patch could make us bug out on the checks). But, overall, worth it? I doubt
EFLAGS.IF fiddling compares unfavourably with an extra LOCKed operation,
cost wise.

So, I'm not even slightly convinced. I'll leave as is.

 -- Keir

> Signed-off-by: Jan Beulich <jbeulich@xxxxxxxxxx>
> 
> --- a/xen/common/domain.c
> +++ b/xen/common/domain.c
> @@ -147,7 +147,7 @@ struct vcpu *alloc_vcpu(
>      v->domain = d;
>      v->vcpu_id = vcpu_id;
>  
> -    spin_lock_init(&v->virq_lock);
> +    rwlock_init(&v->virq_lock);
>  
>      tasklet_init(&v->continue_hypercall_tasklet, NULL, 0);
>  
> --- a/xen/common/event_channel.c
> +++ b/xen/common/event_channel.c
> @@ -417,7 +417,7 @@ static long __evtchn_close(struct domain
>              if ( v->virq_to_evtchn[chn1->u.virq] != port1 )
>                  continue;
>              v->virq_to_evtchn[chn1->u.virq] = 0;
> -            spin_barrier_irq(&v->virq_lock);
> +            write_barrier_irq(&v->virq_lock);
>          }
>          break;
>  
> @@ -613,12 +613,11 @@ int guest_enabled_event(struct vcpu *v,
>  
>  void send_guest_vcpu_virq(struct vcpu *v, int virq)
>  {
> -    unsigned long flags;
>      int port;
>  
>      ASSERT(!virq_is_global(virq));
>  
> -    spin_lock_irqsave(&v->virq_lock, flags);
> +    read_lock(&v->virq_lock);
>  
>      port = v->virq_to_evtchn[virq];
>      if ( unlikely(port == 0) )
> @@ -627,12 +626,11 @@ void send_guest_vcpu_virq(struct vcpu *v
>      evtchn_set_pending(v, port);
>  
>   out:
> -    spin_unlock_irqrestore(&v->virq_lock, flags);
> +    read_unlock(&v->virq_lock);
>  }
>  
>  void send_guest_global_virq(struct domain *d, int virq)
>  {
> -    unsigned long flags;
>      int port;
>      struct vcpu *v;
>      struct evtchn *chn;
> @@ -646,7 +644,7 @@ void send_guest_global_virq(struct domai
>      if ( unlikely(v == NULL) )
>          return;
>  
> -    spin_lock_irqsave(&v->virq_lock, flags);
> +    read_lock(&v->virq_lock);
>  
>      port = v->virq_to_evtchn[virq];
>      if ( unlikely(port == 0) )
> @@ -656,7 +654,7 @@ void send_guest_global_virq(struct domai
>      evtchn_set_pending(d->vcpu[chn->notify_vcpu_id], port);
>  
>   out:
> -    spin_unlock_irqrestore(&v->virq_lock, flags);
> +    read_unlock(&v->virq_lock);
>  }
>  
>  int send_guest_pirq(struct domain *d, int pirq)
> --- a/xen/common/spinlock.c
> +++ b/xen/common/spinlock.c
> @@ -344,6 +344,20 @@ int _rw_is_write_locked(rwlock_t *lock)
>      return _raw_rw_is_write_locked(&lock->raw);
>  }
>  
> +void _write_barrier(rwlock_t *lock)
> +{
> +    check_lock(&lock->debug);
> +    do { mb(); } while ( _raw_rw_is_locked(&lock->raw) );
> +}
> +
> +void _write_barrier_irq(rwlock_t *lock)
> +{
> +    unsigned long flags;
> +    local_irq_save(flags);
> +    _write_barrier(lock);
> +    local_irq_restore(flags);
> +}
> +
>  #ifdef LOCK_PROFILE
>  
>  struct lock_profile_anc {
> --- a/xen/include/xen/sched.h
> +++ b/xen/include/xen/sched.h
> @@ -157,9 +157,12 @@ struct vcpu
>      unsigned long    pause_flags;
>      atomic_t         pause_count;
>  
> -    /* IRQ-safe virq_lock protects against delivering VIRQ to stale evtchn.
> */
> +    /*
> +     * Writer-side-IRQ-safe virq_lock protects against delivering VIRQ
> +     * to stale evtchn.
> +     */
>      u16              virq_to_evtchn[NR_VIRQS];
> -    spinlock_t       virq_lock;
> +    rwlock_t         virq_lock;
>  
>      /* Bitmask of CPUs on which this VCPU may run. */
>      cpumask_t        cpu_affinity;
> --- a/xen/include/xen/spinlock.h
> +++ b/xen/include/xen/spinlock.h
> @@ -170,6 +170,9 @@ void _write_unlock_irqrestore(rwlock_t *
>  int _rw_is_locked(rwlock_t *lock);
>  int _rw_is_write_locked(rwlock_t *lock);
>  
> +void _write_barrier(rwlock_t *lock);
> +void _write_barrier_irq(rwlock_t *lock);
> +
>  #define spin_lock(l)                  _spin_lock(l)
>  #define spin_lock_irq(l)              _spin_lock_irq(l)
>  #define spin_lock_irqsave(l, f)       ((f) = _spin_lock_irqsave(l))
> @@ -223,4 +226,7 @@ int _rw_is_write_locked(rwlock_t *lock);
>  #define rw_is_locked(l)               _rw_is_locked(l)
>  #define rw_is_write_locked(l)         _rw_is_write_locked(l)
>  
> +#define write_barrier(l)              _write_barrier(l)
> +#define write_barrier_irq(l)          _write_barrier_irq(l)
> +
>  #endif /* __SPINLOCK_H__ */
> 
> 
> 
> _______________________________________________
> Xen-devel mailing list
> Xen-devel@xxxxxxxxxxxxxxxxxxx
> http://lists.xensource.com/xen-devel



_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.