[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] [xen-unstable] Clean up spinlock operations and compile as first-class functions.



# HG changeset patch
# User Keir Fraser <keir.fraser@xxxxxxxxxx>
# Date 1224517697 -3600
# Node ID c003e5a23a4e1afe35f1b547575fcc7734bf5ea6
# Parent  824892134573b05573ca1945757bb2a99e220a02
Clean up spinlock operations and compile as first-class functions.

This follows modern Linux, since apparently outlining spinlock
operations does not slow down execution. The cleanups will also allow
more convenient addition of diagnostic code.

Signed-off-by: Keir Fraser <keir.fraser@xxxxxxxxxx>
---
 xen/include/asm-ia64/xenspinlock.h            |   30 -----
 xen/arch/x86/x86_64/mm.c                      |    2 
 xen/common/Makefile                           |    1 
 xen/common/spinlock.c                         |  147 ++++++++++++++++++++++++
 xen/include/asm-ia64/linux-xen/asm/spinlock.h |   47 ++-----
 xen/include/asm-x86/spinlock.h                |   80 ++++---------
 xen/include/xen/spinlock.h                    |  156 +++++++++++++-------------
 7 files changed, 268 insertions(+), 195 deletions(-)

diff -r 824892134573 -r c003e5a23a4e xen/arch/x86/x86_64/mm.c
--- a/xen/arch/x86/x86_64/mm.c  Mon Oct 20 15:31:54 2008 +0100
+++ b/xen/arch/x86/x86_64/mm.c  Mon Oct 20 16:48:17 2008 +0100
@@ -252,8 +252,6 @@ void __init subarch_init_memory(void)
     BUILD_BUG_ON(offsetof(struct page_info, u.inuse._domain) != 
                  (offsetof(struct page_info, count_info) + sizeof(u32)));
     BUILD_BUG_ON((offsetof(struct page_info, count_info) & 7) != 0);
-    BUILD_BUG_ON(sizeof(struct page_info) !=
-                 (32 + BITS_TO_LONGS(NR_CPUS)*sizeof(long)));
 
     /* M2P table is mappable read-only by privileged domains. */
     for ( v  = RDWR_MPT_VIRT_START;
diff -r 824892134573 -r c003e5a23a4e xen/common/Makefile
--- a/xen/common/Makefile       Mon Oct 20 15:31:54 2008 +0100
+++ b/xen/common/Makefile       Mon Oct 20 16:48:17 2008 +0100
@@ -16,6 +16,7 @@ obj-y += schedule.o
 obj-y += schedule.o
 obj-y += shutdown.o
 obj-y += softirq.o
+obj-y += spinlock.o
 obj-y += stop_machine.o
 obj-y += string.o
 obj-y += symbols.o
diff -r 824892134573 -r c003e5a23a4e xen/common/spinlock.c
--- /dev/null   Thu Jan 01 00:00:00 1970 +0000
+++ b/xen/common/spinlock.c     Mon Oct 20 16:48:17 2008 +0100
@@ -0,0 +1,147 @@
+#include <xen/config.h>
+#include <xen/smp.h>
+#include <xen/spinlock.h>
+
+void _spin_lock(spinlock_t *lock)
+{
+    _raw_spin_lock(&lock->raw);
+}
+
+void _spin_lock_irq(spinlock_t *lock)
+{
+    local_irq_disable();
+    _raw_spin_lock(&lock->raw);
+}
+
+unsigned long _spin_lock_irqsave(spinlock_t *lock)
+{
+    unsigned long flags;
+    local_irq_save(flags);
+    _raw_spin_lock(&lock->raw);
+    return flags;
+}
+
+void _spin_unlock(spinlock_t *lock)
+{
+    _raw_spin_unlock(&lock->raw);
+}
+
+void _spin_unlock_irq(spinlock_t *lock)
+{
+    _raw_spin_unlock(&lock->raw);
+    local_irq_enable();
+}
+
+void _spin_unlock_irqrestore(spinlock_t *lock, unsigned long flags)
+{
+    _raw_spin_unlock(&lock->raw);
+    local_irq_restore(flags);
+}
+
+int _spin_is_locked(spinlock_t *lock)
+{
+    return _raw_spin_is_locked(&lock->raw);
+}
+
+int _spin_trylock(spinlock_t *lock)
+{
+    return _raw_spin_trylock(&lock->raw);
+}
+
+void _spin_barrier(spinlock_t *lock)
+{
+    do { mb(); } while ( _raw_spin_is_locked(&lock->raw) );
+    mb();
+}
+
+void _spin_lock_recursive(spinlock_t *lock)
+{
+    int cpu = smp_processor_id();
+    if ( likely(lock->recurse_cpu != cpu) )
+    {
+        spin_lock(lock);
+        lock->recurse_cpu = cpu;
+    }
+    lock->recurse_cnt++;
+}
+
+void _spin_unlock_recursive(spinlock_t *lock)
+{
+    if ( likely(--lock->recurse_cnt == 0) )
+    {
+        lock->recurse_cpu = -1;
+        spin_unlock(lock);
+    }
+}
+
+void _read_lock(rwlock_t *lock)
+{
+    _raw_read_lock(&lock->raw);
+}
+
+void _read_lock_irq(rwlock_t *lock)
+{
+    local_irq_disable();
+    _raw_read_lock(&lock->raw);
+}
+
+unsigned long _read_lock_irqsave(rwlock_t *lock)
+{
+    unsigned long flags;
+    local_irq_save(flags);
+    _raw_read_lock(&lock->raw);
+    return flags;
+}
+
+void _read_unlock(rwlock_t *lock)
+{
+    _raw_read_unlock(&lock->raw);
+}
+
+void _read_unlock_irq(rwlock_t *lock)
+{
+    _raw_read_unlock(&lock->raw);
+    local_irq_enable();
+}
+
+void _read_unlock_irqrestore(rwlock_t *lock, unsigned long flags)
+{
+    _raw_read_unlock(&lock->raw);
+    local_irq_restore(flags);
+}
+
+void _write_lock(rwlock_t *lock)
+{
+    _raw_write_lock(&lock->raw);
+}
+
+void _write_lock_irq(rwlock_t *lock)
+{
+    local_irq_disable();
+    _raw_write_lock(&lock->raw);
+}
+
+unsigned long _write_lock_irqsave(rwlock_t *lock)
+{
+    unsigned long flags;
+    local_irq_save(flags);
+    _raw_write_lock(&lock->raw);
+    return flags;
+}
+
+void _write_unlock(rwlock_t *lock)
+{
+    _raw_write_unlock(&lock->raw);
+}
+
+void _write_unlock_irq(rwlock_t *lock)
+{
+    _raw_write_unlock(&lock->raw);
+    local_irq_enable();
+}
+
+void _write_unlock_irqrestore(rwlock_t *lock, unsigned long flags)
+{
+    _raw_write_unlock(&lock->raw);
+    local_irq_restore(flags);
+}
diff -r 824892134573 -r c003e5a23a4e 
xen/include/asm-ia64/linux-xen/asm/spinlock.h
--- a/xen/include/asm-ia64/linux-xen/asm/spinlock.h     Mon Oct 20 15:31:54 
2008 +0100
+++ b/xen/include/asm-ia64/linux-xen/asm/spinlock.h     Mon Oct 20 16:48:17 
2008 +0100
@@ -27,25 +27,16 @@ typedef struct {
 #ifdef DEBUG_SPINLOCK
        void *locker;
 #endif
+} raw_spinlock_t;
+
 #ifdef XEN
-       unsigned char recurse_cpu;
-       unsigned char recurse_cnt;
-#endif
-} spinlock_t;
-
-#ifdef XEN
-#ifdef DEBUG_SPINLOCK
-#define SPIN_LOCK_UNLOCKED     /*(spinlock_t)*/ { 0, NULL, -1, 0 }
-#else
-#define SPIN_LOCK_UNLOCKED     /*(spinlock_t)*/ { 0, -1, 0 }
-#endif
-static inline void spin_lock_init(spinlock_t *lock)
-{
-       *lock = ((spinlock_t)SPIN_LOCK_UNLOCKED);
-}
-#else
-#define SPIN_LOCK_UNLOCKED                     /*(spinlock_t)*/ { 0 }
-#define spin_lock_init(x)                      ((x)->lock = 0)
+#ifdef DEBUG_SPINLOCK
+#define _RAW_SPIN_LOCK_UNLOCKED        /*(raw_spinlock_t)*/ { 0, NULL }
+#else
+#define _RAW_SPIN_LOCK_UNLOCKED        /*(raw_spinlock_t)*/ { 0 }
+#endif
+#else
+#define _RAW_SPIN_LOCK_UNLOCKED        /*(raw_spinlock_t)*/ { 0 }
 #endif
 
 #ifdef ASM_SUPPORTED
@@ -59,7 +50,7 @@ static inline void spin_lock_init(spinlo
 #define IA64_SPINLOCK_CLOBBERS "ar.ccv", "ar.pfs", "p14", "p15", "r27", "r28", 
"r29", "r30", "b6", "memory"
 
 static inline void
-_raw_spin_lock_flags (spinlock_t *lock, unsigned long flags)
+_raw_spin_lock_flags (raw_spinlock_t *lock, unsigned long flags)
 {
        register volatile unsigned int *ptr asm ("r31") = &lock->lock;
 
@@ -136,8 +127,8 @@ do {                                                        
                                \
 } while (0)
 #endif /* !ASM_SUPPORTED */
 
-#define spin_is_locked(x)      ((x)->lock != 0)
-#define _raw_spin_unlock(x)    do { barrier(); ((spinlock_t *) x)->lock = 0; } 
while (0)
+#define _raw_spin_is_locked(x) ((x)->lock != 0)
+#define _raw_spin_unlock(x)    do { barrier(); (x)->lock = 0; } while (0)
 #define _raw_spin_trylock(x)   (cmpxchg_acq(&(x)->lock, 0, 1) == 0)
 #define spin_unlock_wait(x)    do { barrier(); } while ((x)->lock)
 
@@ -147,16 +138,15 @@ typedef struct {
 #ifdef CONFIG_PREEMPT
        unsigned int break_lock;
 #endif
-} rwlock_t;
-#define RW_LOCK_UNLOCKED /*(rwlock_t)*/ { 0, 0 }
-
-#define rwlock_init(x)         do { *(x) = (rwlock_t) RW_LOCK_UNLOCKED; } 
while(0)
+} raw_rwlock_t;
+#define _RAW_RW_LOCK_UNLOCKED /*(raw_rwlock_t)*/ { 0, 0 }
+
 #define read_can_lock(rw)      (*(volatile int *)(rw) >= 0)
 #define write_can_lock(rw)     (*(volatile int *)(rw) == 0)
 
 #define _raw_read_lock(rw)                                                     
        \
 do {                                                                           
        \
-       rwlock_t *__read_lock_ptr = (rw);                                       
        \
+       raw_rwlock_t *__read_lock_ptr = (rw);                                   
        \
                                                                                
        \
        while (unlikely(ia64_fetchadd(1, (int *) __read_lock_ptr, acq) < 0)) {  
        \
                ia64_fetchadd(-1, (int *) __read_lock_ptr, rel);                
        \
@@ -167,7 +157,7 @@ do {                                                        
                                \
 
 #define _raw_read_unlock(rw)                                   \
 do {                                                           \
-       rwlock_t *__read_lock_ptr = (rw);                       \
+       raw_rwlock_t *__read_lock_ptr = (rw);                   \
        ia64_fetchadd(-1, (int *) __read_lock_ptr, rel);        \
 } while (0)
 
@@ -230,7 +220,4 @@ do {                                                        
                        \
        clear_bit(31, (x));                                                     
        \
 })
 
-#ifdef XEN
-#include <asm/xenspinlock.h>
-#endif
 #endif /*  _ASM_IA64_SPINLOCK_H */
diff -r 824892134573 -r c003e5a23a4e xen/include/asm-ia64/xenspinlock.h
--- a/xen/include/asm-ia64/xenspinlock.h        Mon Oct 20 15:31:54 2008 +0100
+++ /dev/null   Thu Jan 01 00:00:00 1970 +0000
@@ -1,30 +0,0 @@
-#ifndef _ASM_IA64_XENSPINLOCK_H
-#define _ASM_IA64_XENSPINLOCK_H
-
-/*
- * spin_[un]lock_recursive(): Use these forms when the lock can (safely!) be
- * reentered recursively on the same CPU. All critical regions that may form
- * part of a recursively-nested set must be protected by these forms. If there
- * are any critical regions that cannot form part of such a set, they can use
- * standard spin_[un]lock().
- */
-#define _raw_spin_lock_recursive(_lock)            \
-    do {                                           \
-        int cpu = smp_processor_id();              \
-        if ( likely((_lock)->recurse_cpu != cpu) ) \
-        {                                          \
-            spin_lock(_lock);                      \
-            (_lock)->recurse_cpu = cpu;            \
-        }                                          \
-        (_lock)->recurse_cnt++;                    \
-    } while ( 0 )
-
-#define _raw_spin_unlock_recursive(_lock)          \
-    do {                                           \
-        if ( likely(--(_lock)->recurse_cnt == 0) ) \
-        {                                          \
-            (_lock)->recurse_cpu = -1;             \
-            spin_unlock(_lock);                    \
-        }                                          \
-    } while ( 0 )
-#endif /*  _ASM_IA64_XENSPINLOCK_H */
diff -r 824892134573 -r c003e5a23a4e xen/include/asm-x86/spinlock.h
--- a/xen/include/asm-x86/spinlock.h    Mon Oct 20 15:31:54 2008 +0100
+++ b/xen/include/asm-x86/spinlock.h    Mon Oct 20 16:48:17 2008 +0100
@@ -8,18 +8,16 @@
 
 typedef struct {
     volatile s16 lock;
-    s8 recurse_cpu;
-    u8 recurse_cnt;
-} spinlock_t;
+} raw_spinlock_t;
 
-#define SPIN_LOCK_UNLOCKED /*(spinlock_t)*/ { 1, -1, 0 }
+#define _RAW_SPIN_LOCK_UNLOCKED /*(raw_spinlock_t)*/ { 1 }
 
-#define spin_lock_init(x)      do { *(x) = (spinlock_t) SPIN_LOCK_UNLOCKED; } 
while(0)
-#define spin_is_locked(x)      (*(volatile char *)(&(x)->lock) <= 0)
+#define _raw_spin_is_locked(x)                  \
+    (*(volatile char *)(&(x)->lock) <= 0)
 
-static inline void _raw_spin_lock(spinlock_t *lock)
+static inline void _raw_spin_lock(raw_spinlock_t *lock)
 {
-    __asm__ __volatile__ (
+    asm volatile (
         "1:  lock; decb %0         \n"
         "    js 2f                 \n"
         ".section .text.lock,\"ax\"\n"
@@ -31,81 +29,51 @@ static inline void _raw_spin_lock(spinlo
         : "=m" (lock->lock) : : "memory" );
 }
 
-static inline void _raw_spin_unlock(spinlock_t *lock)
+static inline void _raw_spin_unlock(raw_spinlock_t *lock)
 {
-    ASSERT(spin_is_locked(lock));
-    __asm__ __volatile__ (
-       "movb $1,%0" 
+    ASSERT(_raw_spin_is_locked(lock));
+    asm volatile (
+        "movb $1,%0" 
         : "=m" (lock->lock) : : "memory" );
 }
 
-static inline int _raw_spin_trylock(spinlock_t *lock)
+static inline int _raw_spin_trylock(raw_spinlock_t *lock)
 {
     char oldval;
-    __asm__ __volatile__(
+    asm volatile (
         "xchgb %b0,%1"
         :"=q" (oldval), "=m" (lock->lock)
-        :"0" (0) : "memory");
-    return oldval > 0;
+        :"0" (0) : "memory" );
+    return (oldval > 0);
 }
-
-/*
- * spin_[un]lock_recursive(): Use these forms when the lock can (safely!) be
- * reentered recursively on the same CPU. All critical regions that may form
- * part of a recursively-nested set must be protected by these forms. If there
- * are any critical regions that cannot form part of such a set, they can use
- * standard spin_[un]lock().
- */
-#define _raw_spin_lock_recursive(_lock)            \
-    do {                                           \
-        int cpu = smp_processor_id();              \
-        if ( likely((_lock)->recurse_cpu != cpu) ) \
-        {                                          \
-            spin_lock(_lock);                      \
-            (_lock)->recurse_cpu = cpu;            \
-        }                                          \
-        (_lock)->recurse_cnt++;                    \
-    } while ( 0 )
-
-#define _raw_spin_unlock_recursive(_lock)          \
-    do {                                           \
-        if ( likely(--(_lock)->recurse_cnt == 0) ) \
-        {                                          \
-            (_lock)->recurse_cpu = -1;             \
-            spin_unlock(_lock);                    \
-        }                                          \
-    } while ( 0 )
-
 
 typedef struct {
     volatile unsigned int lock;
-} rwlock_t;
+} raw_rwlock_t;
 
-#define RW_LOCK_UNLOCKED /*(rwlock_t)*/ { RW_LOCK_BIAS }
-
-#define rwlock_init(x) do { *(x) = (rwlock_t) RW_LOCK_UNLOCKED; } while(0)
+#define _RAW_RW_LOCK_UNLOCKED /*(raw_rwlock_t)*/ { RW_LOCK_BIAS }
 
 /*
  * On x86, we implement read-write locks as a 32-bit counter
  * with the high bit (sign) being the "contended" bit.
  */
-static inline void _raw_read_lock(rwlock_t *rw)
+static inline void _raw_read_lock(raw_rwlock_t *rw)
 {
     __build_read_lock(rw, "__read_lock_failed");
 }
 
-static inline void _raw_write_lock(rwlock_t *rw)
+static inline void _raw_write_lock(raw_rwlock_t *rw)
 {
     __build_write_lock(rw, "__write_lock_failed");
 }
 
-#define _raw_read_unlock(rw)                       \
-    __asm__ __volatile__ (                         \
-        "lock ; incl %0" :                         \
+#define _raw_read_unlock(rw)                    \
+    asm volatile (                              \
+        "lock ; incl %0" :                      \
         "=m" ((rw)->lock) : : "memory" )
-#define _raw_write_unlock(rw)                      \
-    __asm__ __volatile__ (                         \
-        "lock ; addl $" RW_LOCK_BIAS_STR ",%0" :   \
+#define _raw_write_unlock(rw)                           \
+    asm volatile (                                      \
+        "lock ; addl $" RW_LOCK_BIAS_STR ",%0" :        \
         "=m" ((rw)->lock) : : "memory" )
 
 #endif /* __ASM_SPINLOCK_H */
diff -r 824892134573 -r c003e5a23a4e xen/include/xen/spinlock.h
--- a/xen/include/xen/spinlock.h        Mon Oct 20 15:31:54 2008 +0100
+++ b/xen/include/xen/spinlock.h        Mon Oct 20 16:48:17 2008 +0100
@@ -3,93 +3,95 @@
 
 #include <xen/config.h>
 #include <asm/system.h>
-
-#define spin_lock_irqsave(lock, flags) \
-    do { local_irq_save(flags); spin_lock(lock); } while ( 0 )
-#define spin_lock_irq(lock) \
-    do { local_irq_disable(); spin_lock(lock); } while ( 0 )
-
-#define read_lock_irqsave(lock, flags) \
-    do { local_irq_save(flags); read_lock(lock); } while ( 0 )
-#define read_lock_irq(lock) \
-    do { local_irq_disable(); read_lock(lock); } while ( 0 )
-
-#define write_lock_irqsave(lock, flags) \
-    do { local_irq_save(flags); write_lock(lock); } while ( 0 )
-#define write_lock_irq(lock) \
-    do { local_irq_disable(); write_lock(lock); } while ( 0 )
-
-#define spin_unlock_irqrestore(lock, flags) \
-    do { spin_unlock(lock); local_irq_restore(flags); } while ( 0 )
-#define spin_unlock_irq(lock) \
-    do { spin_unlock(lock); local_irq_enable(); } while ( 0 )
-
-#define read_unlock_irqrestore(lock, flags) \
-    do { read_unlock(lock); local_irq_restore(flags); } while ( 0 )
-#define read_unlock_irq(lock) \
-    do { read_unlock(lock); local_irq_enable(); } while ( 0 )
-
-#define write_unlock_irqrestore(lock, flags) \
-    do { write_unlock(lock); local_irq_restore(flags); } while ( 0 )
-#define write_unlock_irq(lock) \
-    do { write_unlock(lock); local_irq_enable(); } while ( 0 )
-
-#ifdef CONFIG_SMP
-
 #include <asm/spinlock.h>
 
-#else
+typedef struct {
+    raw_spinlock_t raw;
+    s8 recurse_cpu;
+    u8 recurse_cnt;
+} spinlock_t;
 
-#if (__GNUC__ > 2)
-typedef struct { } spinlock_t;
-#define SPIN_LOCK_UNLOCKED /*(spinlock_t)*/ { }
-#else
-typedef struct { int gcc_is_buggy; } spinlock_t;
-#define SPIN_LOCK_UNLOCKED /*(spinlock_t)*/ { 0 }
-#endif
+#define SPIN_LOCK_UNLOCKED { _RAW_SPIN_LOCK_UNLOCKED, -1, 0 }
+#define DEFINE_SPINLOCK(l) spinlock_t l = SPIN_LOCK_UNLOCKED
+#define spin_lock_init(l) (*(l) = (spinlock_t)SPIN_LOCK_UNLOCKED)
 
-#define spin_lock_init(lock)             do { } while(0)
-#define spin_is_locked(lock)             (0)
-#define _raw_spin_lock(lock)             (void)(lock)
-#define _raw_spin_trylock(lock)          ({1; })
-#define _raw_spin_unlock(lock)           do { } while(0)
-#define _raw_spin_lock_recursive(lock)   do { } while(0)
-#define _raw_spin_unlock_recursive(lock) do { } while(0)
+typedef struct {
+    raw_rwlock_t raw;
+} rwlock_t;
 
-#if (__GNUC__ > 2)
-typedef struct { } rwlock_t;
-#define RW_LOCK_UNLOCKED /*(rwlock_t)*/ { }
-#else
-typedef struct { int gcc_is_buggy; } rwlock_t;
-#define RW_LOCK_UNLOCKED /*(rwlock_t)*/ { 0 }
-#endif
+#define RW_LOCK_UNLOCKED { _RAW_RW_LOCK_UNLOCKED }
+#define DEFINE_RWLOCK(l) rwlock_t l = RW_LOCK_UNLOCKED
+#define rwlock_init(l) (*(l) = (rwlock_t)RW_LOCK_UNLOCKED)
 
-#define rwlock_init(lock)            do { } while(0)
-#define _raw_read_lock(lock)         (void)(lock) /* Not "unused variable". */
-#define _raw_read_unlock(lock)       do { } while(0)
-#define _raw_write_lock(lock)        (void)(lock) /* Not "unused variable". */
-#define _raw_write_unlock(lock)      do { } while(0)
+void _spin_lock(spinlock_t *lock);
+void _spin_lock_irq(spinlock_t *lock);
+unsigned long _spin_lock_irqsave(spinlock_t *lock);
 
-#endif
+void _spin_unlock(spinlock_t *lock);
+void _spin_unlock_irq(spinlock_t *lock);
+void _spin_unlock_irqrestore(spinlock_t *lock, unsigned long flags);
 
-#define spin_lock(_lock)             _raw_spin_lock(_lock)
-#define spin_trylock(_lock)          _raw_spin_trylock(_lock)
-#define spin_unlock(_lock)           _raw_spin_unlock(_lock)
-#define spin_lock_recursive(_lock)   _raw_spin_lock_recursive(_lock)
-#define spin_unlock_recursive(_lock) _raw_spin_unlock_recursive(_lock)
-#define read_lock(_lock)             _raw_read_lock(_lock)
-#define read_unlock(_lock)           _raw_read_unlock(_lock)
-#define write_lock(_lock)            _raw_write_lock(_lock)
-#define write_unlock(_lock)          _raw_write_unlock(_lock)
+int _spin_is_locked(spinlock_t *lock);
+int _spin_trylock(spinlock_t *lock);
+void _spin_barrier(spinlock_t *lock);
+
+void _spin_lock_recursive(spinlock_t *lock);
+void _spin_unlock_recursive(spinlock_t *lock);
+
+void _read_lock(rwlock_t *lock);
+void _read_lock_irq(rwlock_t *lock);
+unsigned long _read_lock_irqsave(rwlock_t *lock);
+
+void _read_unlock(rwlock_t *lock);
+void _read_unlock_irq(rwlock_t *lock);
+void _read_unlock_irqrestore(rwlock_t *lock, unsigned long flags);
+
+void _write_lock(rwlock_t *lock);
+void _write_lock_irq(rwlock_t *lock);
+unsigned long _write_lock_irqsave(rwlock_t *lock);
+
+void _write_unlock(rwlock_t *lock);
+void _write_unlock_irq(rwlock_t *lock);
+void _write_unlock_irqrestore(rwlock_t *lock, unsigned long flags);
+
+#define spin_lock(l)                  _spin_lock(l)
+#define spin_lock_irq(l)              _spin_lock_irq(l)
+#define spin_lock_irqsave(l, f)       ((f) = _spin_lock_irqsave(l))
+
+#define spin_unlock(l)                _spin_unlock(l)
+#define spin_unlock_irq(l)            _spin_unlock_irq(l)
+#define spin_unlock_irqrestore(l, f)  _spin_unlock_irqrestore(l, f)
+
+#define spin_is_locked(l)             _raw_spin_is_locked(&(l)->raw)
+#define spin_trylock(l)               _spin_trylock(l)
 
 /* Ensure a lock is quiescent between two critical operations. */
-static inline void spin_barrier(spinlock_t *lock)
-{
-    do { mb(); } while ( spin_is_locked(lock) );
-    mb();
-}
+#define spin_barrier(l)               _spin_barrier(l)
 
-#define DEFINE_SPINLOCK(x) spinlock_t x = SPIN_LOCK_UNLOCKED
-#define DEFINE_RWLOCK(x) rwlock_t x = RW_LOCK_UNLOCKED
+/*
+ * spin_[un]lock_recursive(): Use these forms when the lock can (safely!) be
+ * reentered recursively on the same CPU. All critical regions that may form
+ * part of a recursively-nested set must be protected by these forms. If there
+ * are any critical regions that cannot form part of such a set, they can use
+ * standard spin_[un]lock().
+ */
+#define spin_lock_recursive(l)        _spin_lock_recursive(l)
+#define spin_unlock_recursive(l)      _spin_unlock_recursive(l)
+
+#define read_lock(l)                  _read_lock(l)
+#define read_lock_irq(l)              _read_lock_irq(l)
+#define read_lock_irqsave(l, f)       ((f) = _read_lock_irqsave(l))
+
+#define read_unlock(l)                _read_unlock(l)
+#define read_unlock_irq(l)            _read_unlock_irq(l)
+#define read_unlock_irqrestore(l, f)  _read_unlock_irqrestore(l, f)
+
+#define write_lock(l)                 _write_lock(l)
+#define write_lock_irq(l)             _write_lock_irq(l)
+#define write_lock_irqsave(l, f)      ((f) = _write_lock_irqsave(l))
+
+#define write_unlock(l)               _write_unlock(l)
+#define write_unlock_irq(l)           _write_unlock_irq(l)
+#define write_unlock_irqrestore(l, f) _write_unlock_irqrestore(l, f)
 
 #endif /* __SPINLOCK_H__ */

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.