[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] [xen-unstable] rwlock: Allow to scale to 2^31-1 readers on x86.



# HG changeset patch
# User Keir Fraser <keir@xxxxxxx>
# Date 1301126601 0
# Node ID 612171ff82ea51aaf65d98fd1a551eb8d50fb481
# Parent  c9f745c153ec8c3775e2ee03adc3cb30370b84f6
rwlock: Allow to scale to 2^31-1 readers on x86.

Also rework to match the 'trylock' style of raw function used for
spinlocks.

Inspired by Jan Beulich's patch to do similar improved scaling.

Signed-off-by: Keir Fraser <keir@xxxxxxx>
---


diff -r c9f745c153ec -r 612171ff82ea xen/common/spinlock.c
--- a/xen/common/spinlock.c     Fri Mar 25 21:59:20 2011 +0000
+++ b/xen/common/spinlock.c     Sat Mar 26 08:03:21 2011 +0000
@@ -234,7 +234,11 @@
 void _read_lock(rwlock_t *lock)
 {
     check_lock(&lock->debug);
-    _raw_read_lock(&lock->raw);
+    while ( unlikely(!_raw_read_trylock(&lock->raw)) )
+    {
+        while ( likely(_raw_rw_is_write_locked(&lock->raw)) )
+            cpu_relax();
+    }
     preempt_disable();
 }
 
@@ -243,7 +247,13 @@
     ASSERT(local_irq_is_enabled());
     local_irq_disable();
     check_lock(&lock->debug);
-    _raw_read_lock(&lock->raw);
+    while ( unlikely(!_raw_read_trylock(&lock->raw)) )
+    {
+        local_irq_enable();
+        while ( likely(_raw_rw_is_write_locked(&lock->raw)) )
+            cpu_relax();
+        local_irq_disable();
+    }
     preempt_disable();
 }
 
@@ -252,11 +262,26 @@
     unsigned long flags;
     local_irq_save(flags);
     check_lock(&lock->debug);
-    _raw_read_lock(&lock->raw);
+    while ( unlikely(!_raw_read_trylock(&lock->raw)) )
+    {
+        local_irq_restore(flags);
+        while ( likely(_raw_rw_is_write_locked(&lock->raw)) )
+            cpu_relax();
+        local_irq_save(flags);
+    }
     preempt_disable();
     return flags;
 }
 
+int _read_trylock(rwlock_t *lock)
+{
+    check_lock(&lock->debug);
+    if ( !_raw_read_trylock(&lock->raw) )
+        return 0;
+    preempt_disable();
+    return 1;
+}
+
 void _read_unlock(rwlock_t *lock)
 {
     preempt_enable();
@@ -280,7 +305,11 @@
 void _write_lock(rwlock_t *lock)
 {
     check_lock(&lock->debug);
-    _raw_write_lock(&lock->raw);
+    while ( unlikely(!_raw_write_trylock(&lock->raw)) )
+    {
+        while ( likely(_raw_rw_is_locked(&lock->raw)) )
+            cpu_relax();
+    }
     preempt_disable();
 }
 
@@ -289,7 +318,13 @@
     ASSERT(local_irq_is_enabled());
     local_irq_disable();
     check_lock(&lock->debug);
-    _raw_write_lock(&lock->raw);
+    while ( unlikely(!_raw_write_trylock(&lock->raw)) )
+    {
+        local_irq_enable();
+        while ( likely(_raw_rw_is_locked(&lock->raw)) )
+            cpu_relax();
+        local_irq_disable();
+    }
     preempt_disable();
 }
 
@@ -298,7 +333,13 @@
     unsigned long flags;
     local_irq_save(flags);
     check_lock(&lock->debug);
-    _raw_write_lock(&lock->raw);
+    while ( unlikely(!_raw_write_trylock(&lock->raw)) )
+    {
+        local_irq_restore(flags);
+        while ( likely(_raw_rw_is_locked(&lock->raw)) )
+            cpu_relax();
+        local_irq_save(flags);
+    }
     preempt_disable();
     return flags;
 }
diff -r c9f745c153ec -r 612171ff82ea 
xen/include/asm-ia64/linux-xen/asm/spinlock.h
--- a/xen/include/asm-ia64/linux-xen/asm/spinlock.h     Fri Mar 25 21:59:20 
2011 +0000
+++ b/xen/include/asm-ia64/linux-xen/asm/spinlock.h     Sat Mar 26 08:03:21 
2011 +0000
@@ -35,17 +35,6 @@
 } raw_rwlock_t;
 #define _RAW_RW_LOCK_UNLOCKED /*(raw_rwlock_t)*/ { 0, 0 }
 
-#define _raw_read_lock(rw)                                                     
        \
-do {                                                                           
        \
-       raw_rwlock_t *__read_lock_ptr = (rw);                                   
        \
-                                                                               
        \
-       while (unlikely(ia64_fetchadd(1, (int *) __read_lock_ptr, acq) < 0)) {  
        \
-               ia64_fetchadd(-1, (int *) __read_lock_ptr, rel);                
        \
-               while (*(volatile int *)__read_lock_ptr < 0)                    
        \
-                       cpu_relax();                                            
        \
-       }                                                                       
        \
-} while (0)
-
 #define _raw_read_unlock(rw)                                   \
 do {                                                           \
        raw_rwlock_t *__read_lock_ptr = (rw);                   \
@@ -53,20 +42,6 @@
 } while (0)
 
 #ifdef ASM_SUPPORTED
-#define _raw_write_lock(rw)                                                    
\
-do {                                                                           
\
-       __asm__ __volatile__ (                                                  
\
-               "mov ar.ccv = r0\n"                                             
\
-               "dep r29 = -1, r0, 31, 1;;\n"                                   
\
-               "1:\n"                                                          
\
-               "ld4 r2 = [%0];;\n"                                             
\
-               "cmp4.eq p0,p7 = r0,r2\n"                                       
\
-               "(p7) br.cond.spnt.few 1b \n"                                   
\
-               "cmpxchg4.acq r2 = [%0], r29, ar.ccv;;\n"                       
\
-               "cmp4.eq p0,p7 = r0, r2\n"                                      
\
-               "(p7) br.cond.spnt.few 1b;;\n"                                  
\
-               :: "r"(rw) : "ar.ccv", "p7", "r2", "r29", "memory");            
\
-} while(0)
 
 #define _raw_write_trylock(rw)                                                 
\
 ({                                                                             
\
@@ -82,16 +57,6 @@
 
 #else /* !ASM_SUPPORTED */
 
-#define _raw_write_lock(l)                                                     
        \
-({                                                                             
        \
-       __u64 ia64_val, ia64_set_val = ia64_dep_mi(-1, 0, 31, 1);               
        \
-       __u32 *ia64_write_lock_ptr = (__u32 *) (l);                             
        \
-       do {                                                                    
        \
-               while (*ia64_write_lock_ptr)                                    
        \
-                       ia64_barrier();                                         
        \
-               ia64_val = ia64_cmpxchg4_acq(ia64_write_lock_ptr, ia64_set_val, 
0);     \
-       } while (ia64_val);                                                     
        \
-})
 
 #define _raw_write_trylock(rw)                                         \
 ({                                                                     \
diff -r c9f745c153ec -r 612171ff82ea xen/include/asm-x86/spinlock.h
--- a/xen/include/asm-x86/spinlock.h    Fri Mar 25 21:59:20 2011 +0000
+++ b/xen/include/asm-x86/spinlock.h    Sat Mar 26 08:03:21 2011 +0000
@@ -35,51 +35,29 @@
     volatile int lock;
 } raw_rwlock_t;
 
-#define RW_LOCK_BIAS 0x01000000
-#define _RAW_RW_LOCK_UNLOCKED /*(raw_rwlock_t)*/ { RW_LOCK_BIAS }
+#define RW_WRITE_BIAS 0x7fffffff
+#define _RAW_RW_LOCK_UNLOCKED /*(raw_rwlock_t)*/ { 0 }
 
-static always_inline void _raw_read_lock(raw_rwlock_t *rw)
+static always_inline int _raw_read_trylock(raw_rwlock_t *rw)
 {
+    bool_t acquired;
+
     asm volatile (
-        "1:  lock; decl %0         \n"
-        "    jns 3f                \n"
-        "    lock; incl %0         \n"
-        "2:  rep; nop              \n"
-        "    cmpl $1,%0            \n"
-        "    js 2b                 \n"
+        "    lock; decl %0         \n"
+        "    jns 2f                \n"
+        "1:  .subsection 1         \n"
+        "2:  lock; incl %0         \n"
+        "    dec %1                \n"
         "    jmp 1b                \n"
-        "3:"
-        : "=m" (rw->lock) : : "memory" );
-}
+        "    .subsection 0         \n"
+        : "=m" (rw->lock), "=r" (acquired) : "1" (1) : "memory" );
 
-static always_inline void _raw_write_lock(raw_rwlock_t *rw)
-{
-    asm volatile (
-        "1:  lock; subl %1,%0      \n"
-        "    jz 3f                 \n"
-        "    lock; addl %1,%0      \n"
-        "2:  rep; nop              \n"
-        "    cmpl %1,%0            \n"
-        "    jne 2b                \n"
-        "    jmp 1b                \n"
-        "3:"
-        : "=m" (rw->lock) : "i" (RW_LOCK_BIAS) : "memory" );
+    return acquired;
 }
 
 static always_inline int _raw_write_trylock(raw_rwlock_t *rw)
 {
-    int rc;
-
-    asm volatile (
-        "    lock; subl %2,%0      \n"
-        "    jz 1f                 \n"
-        "    lock; addl %2,%0      \n"
-        "    dec %1                \n"
-        "1:"
-        : "=m" (rw->lock), "=r" (rc) : "i" (RW_LOCK_BIAS), "1" (1)
-        : "memory" );
-
-    return rc;
+    return (cmpxchg(&rw->lock, 0, RW_WRITE_BIAS) == 0);
 }
 
 static always_inline void _raw_read_unlock(raw_rwlock_t *rw)
@@ -92,11 +70,11 @@
 static always_inline void _raw_write_unlock(raw_rwlock_t *rw)
 {
     asm volatile (
-        "lock ; addl %1,%0"
-        : "=m" ((rw)->lock) : "i" (RW_LOCK_BIAS) : "memory" );
+        "lock ; subl %1,%0"
+        : "=m" ((rw)->lock) : "i" (RW_WRITE_BIAS) : "memory" );
 }
 
-#define _raw_rw_is_locked(x) ((x)->lock < RW_LOCK_BIAS)
-#define _raw_rw_is_write_locked(x) ((x)->lock <= 0)
+#define _raw_rw_is_locked(x) ((x)->lock != 0)
+#define _raw_rw_is_write_locked(x) ((x)->lock > 0)
 
 #endif /* __ASM_SPINLOCK_H */
diff -r c9f745c153ec -r 612171ff82ea xen/include/xen/spinlock.h
--- a/xen/include/xen/spinlock.h        Fri Mar 25 21:59:20 2011 +0000
+++ b/xen/include/xen/spinlock.h        Sat Mar 26 08:03:21 2011 +0000
@@ -157,6 +157,7 @@
 void _read_unlock(rwlock_t *lock);
 void _read_unlock_irq(rwlock_t *lock);
 void _read_unlock_irqrestore(rwlock_t *lock, unsigned long flags);
+int _read_trylock(rwlock_t *lock);
 
 void _write_lock(rwlock_t *lock);
 void _write_lock_irq(rwlock_t *lock);
@@ -210,6 +211,7 @@
 #define read_unlock(l)                _read_unlock(l)
 #define read_unlock_irq(l)            _read_unlock_irq(l)
 #define read_unlock_irqrestore(l, f)  _read_unlock_irqrestore(l, f)
+#define read_trylock(l)               _read_trylock(l)
 
 #define write_lock(l)                 _write_lock(l)
 #define write_lock_irq(l)             _write_lock_irq(l)

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.