[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] [xen-unstable] spinlock: Add debug-build checks for IRQ-safe spinlocks.



# HG changeset patch
# User Keir Fraser <keir.fraser@xxxxxxxxxx>
# Date 1224759232 -3600
# Node ID 0358305c68830f8af398192fb0a0b0f1a2517f73
# Parent  4941c5a1459839c9923f7dafe6fe7705f90ca436
spinlock: Add debug-build checks for IRQ-safe spinlocks.
Signed-off-by: Keir Fraser <keir.fraser@xxxxxxxxxx>
---
 xen/arch/x86/setup.c       |    2 +
 xen/common/spinlock.c      |   52 +++++++++++++++++++++++++++++++++++++++++++++
 xen/include/xen/spinlock.h |   21 ++++++++++++++++--
 3 files changed, 73 insertions(+), 2 deletions(-)

diff -r 4941c5a14598 -r 0358305c6883 xen/arch/x86/setup.c
--- a/xen/arch/x86/setup.c      Thu Oct 23 11:40:59 2008 +0100
+++ b/xen/arch/x86/setup.c      Thu Oct 23 11:53:52 2008 +0100
@@ -1059,6 +1059,8 @@ void __init __start_xen(unsigned long mb
                         cmdline) != 0)
         panic("Could not set up DOM0 guest OS\n");
 
+    spin_debug_enable();
+
     /* Scrub RAM that is still free and so may go to an unprivileged domain. */
     scrub_heap_pages();
 
diff -r 4941c5a14598 -r 0358305c6883 xen/common/spinlock.c
--- a/xen/common/spinlock.c     Thu Oct 23 11:40:59 2008 +0100
+++ b/xen/common/spinlock.c     Thu Oct 23 11:53:52 2008 +0100
@@ -1,9 +1,48 @@
 #include <xen/config.h>
+#include <xen/irq.h>
 #include <xen/smp.h>
 #include <xen/spinlock.h>
 
+#ifndef NDEBUG
+
+static atomic_t spin_debug __read_mostly = ATOMIC_INIT(0);
+
+static void check_lock(struct lock_debug *debug)
+{
+    int irq_safe = !local_irq_is_enabled();
+
+    if ( unlikely(atomic_read(&spin_debug) <= 0) )
+        return;
+
+    /* A few places take liberties with this. */
+    /* BUG_ON(in_irq() && !irq_safe); */
+
+    if ( unlikely(debug->irq_safe != irq_safe) )
+    {
+        int seen = cmpxchg(&debug->irq_safe, -1, irq_safe);
+        BUG_ON(seen == !irq_safe);
+    }
+}
+
+void spin_debug_enable(void)
+{
+    atomic_inc(&spin_debug);
+}
+
+void spin_debug_disable(void)
+{
+    atomic_dec(&spin_debug);
+}
+
+#else /* defined(NDEBUG) */
+
+#define check_lock(l) ((void)0)
+
+#endif
+
 void _spin_lock(spinlock_t *lock)
 {
+    check_lock(&lock->debug);
     _raw_spin_lock(&lock->raw);
 }
 
@@ -11,6 +50,7 @@ void _spin_lock_irq(spinlock_t *lock)
 {
     ASSERT(local_irq_is_enabled());
     local_irq_disable();
+    check_lock(&lock->debug);
     _raw_spin_lock(&lock->raw);
 }
 
@@ -18,6 +58,7 @@ unsigned long _spin_lock_irqsave(spinloc
 {
     unsigned long flags;
     local_irq_save(flags);
+    check_lock(&lock->debug);
     _raw_spin_lock(&lock->raw);
     return flags;
 }
@@ -41,16 +82,19 @@ void _spin_unlock_irqrestore(spinlock_t 
 
 int _spin_is_locked(spinlock_t *lock)
 {
+    check_lock(&lock->debug);
     return _raw_spin_is_locked(&lock->raw);
 }
 
 int _spin_trylock(spinlock_t *lock)
 {
+    check_lock(&lock->debug);
     return _raw_spin_trylock(&lock->raw);
 }
 
 void _spin_barrier(spinlock_t *lock)
 {
+    check_lock(&lock->debug);
     do { mb(); } while ( _raw_spin_is_locked(&lock->raw) );
     mb();
 }
@@ -69,6 +113,8 @@ void _spin_lock_recursive(spinlock_t *lo
 
     /* Don't allow overflow of recurse_cpu field. */
     BUILD_BUG_ON(NR_CPUS > 0xfffu);
+
+    check_lock(&lock->debug);
 
     if ( likely(lock->recurse_cpu != cpu) )
     {
@@ -92,6 +138,7 @@ void _spin_unlock_recursive(spinlock_t *
 
 void _read_lock(rwlock_t *lock)
 {
+    check_lock(&lock->debug);
     _raw_read_lock(&lock->raw);
 }
 
@@ -99,6 +146,7 @@ void _read_lock_irq(rwlock_t *lock)
 {
     ASSERT(local_irq_is_enabled());
     local_irq_disable();
+    check_lock(&lock->debug);
     _raw_read_lock(&lock->raw);
 }
 
@@ -106,6 +154,7 @@ unsigned long _read_lock_irqsave(rwlock_
 {
     unsigned long flags;
     local_irq_save(flags);
+    check_lock(&lock->debug);
     _raw_read_lock(&lock->raw);
     return flags;
 }
@@ -129,6 +178,7 @@ void _read_unlock_irqrestore(rwlock_t *l
 
 void _write_lock(rwlock_t *lock)
 {
+    check_lock(&lock->debug);
     _raw_write_lock(&lock->raw);
 }
 
@@ -136,6 +186,7 @@ void _write_lock_irq(rwlock_t *lock)
 {
     ASSERT(local_irq_is_enabled());
     local_irq_disable();
+    check_lock(&lock->debug);
     _raw_write_lock(&lock->raw);
 }
 
@@ -143,6 +194,7 @@ unsigned long _write_lock_irqsave(rwlock
 {
     unsigned long flags;
     local_irq_save(flags);
+    check_lock(&lock->debug);
     _raw_write_lock(&lock->raw);
     return flags;
 }
diff -r 4941c5a14598 -r 0358305c6883 xen/include/xen/spinlock.h
--- a/xen/include/xen/spinlock.h        Thu Oct 23 11:40:59 2008 +0100
+++ b/xen/include/xen/spinlock.h        Thu Oct 23 11:53:52 2008 +0100
@@ -5,21 +5,38 @@
 #include <asm/system.h>
 #include <asm/spinlock.h>
 
+#ifndef NDEBUG
+struct lock_debug {
+    int irq_safe; /* +1: IRQ-safe; 0: not IRQ-safe; -1: don't know yet */
+};
+#define _LOCK_DEBUG { -1 }
+void spin_debug_enable(void);
+void spin_debug_disable(void);
+#else
+struct lock_debug { };
+#define _LOCK_DEBUG { }
+#define spin_debug_enable() ((void)0)
+#define spin_debug_disable() ((void)0)
+#endif
+
 typedef struct {
     raw_spinlock_t raw;
     u16 recurse_cpu:12;
     u16 recurse_cnt:4;
+    struct lock_debug debug;
 } spinlock_t;
 
-#define SPIN_LOCK_UNLOCKED { _RAW_SPIN_LOCK_UNLOCKED, 0xfffu, 0 }
+
+#define SPIN_LOCK_UNLOCKED { _RAW_SPIN_LOCK_UNLOCKED, 0xfffu, 0, _LOCK_DEBUG }
 #define DEFINE_SPINLOCK(l) spinlock_t l = SPIN_LOCK_UNLOCKED
 #define spin_lock_init(l) (*(l) = (spinlock_t)SPIN_LOCK_UNLOCKED)
 
 typedef struct {
     raw_rwlock_t raw;
+    struct lock_debug debug;
 } rwlock_t;
 
-#define RW_LOCK_UNLOCKED { _RAW_RW_LOCK_UNLOCKED }
+#define RW_LOCK_UNLOCKED { _RAW_RW_LOCK_UNLOCKED, _LOCK_DEBUG }
 #define DEFINE_RWLOCK(l) rwlock_t l = RW_LOCK_UNLOCKED
 #define rwlock_init(l) (*(l) = (rwlock_t)RW_LOCK_UNLOCKED)
 

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.