[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [Patch] xen/spinlock: Improvements to check_lock()



Allow check_lock() to distinguish between an uninitialised spinlock and one
which has violated its safety states.  All new code is already on the unlikely
path, with the latching operation first with an early exit.

Make use of an enumeration rather than magic numbers.

Signed-off-by: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
CC: Keir Fraser <keir@xxxxxxx>
CC: Jan Beulich <JBeulich@xxxxxxxx>

---

This was discovered in combination with an HPET issue (patch arriving
separately), where an uninitialised spinlock was mistaken for an irq_unsafe
one resulting in a spurious fail.

This is probably for 4.5, but is presented here now for review.
---
 xen/common/spinlock.c      |   22 +++++++++++++++++-----
 xen/include/xen/spinlock.h |    9 +++++++--
 2 files changed, 24 insertions(+), 7 deletions(-)

diff --git a/xen/common/spinlock.c b/xen/common/spinlock.c
index 575cc6d..7f6a3db 100644
--- a/xen/common/spinlock.c
+++ b/xen/common/spinlock.c
@@ -16,7 +16,8 @@ static atomic_t spin_debug __read_mostly = ATOMIC_INIT(0);
 
 static void check_lock(struct lock_debug *debug)
 {
-    int irq_safe = !local_irq_is_enabled();
+    enum lock_state current_state =
+        local_irq_is_enabled() ? lock_state_irqunsafe : lock_state_irqsafe;
 
     if ( unlikely(atomic_read(&spin_debug) <= 0) )
         return;
@@ -42,10 +43,21 @@ static void check_lock(struct lock_debug *debug)
      * To guard against this subtle bug we latch the IRQ safety of every
      * spinlock in the system, on first use.
      */
-    if ( unlikely(debug->irq_safe != irq_safe) )
+    if ( unlikely(debug->state != current_state) )
     {
-        int seen = cmpxchg(&debug->irq_safe, -1, irq_safe);
-        BUG_ON(seen == !irq_safe);
+        enum lock_state old_state =
+            cmpxchg(&debug->state, lock_state_unknown, current_state);
+
+        /* Latched first safety state */
+        if ( old_state == lock_state_unknown )
+            return;
+
+        /* Uninitialised spinlock */
+        BUG_ON( !(old_state == lock_state_irqsafe ||
+                  old_state == lock_state_irqunsafe) );
+
+        /* Mixed safety states */
+        BUG_ON( current_state != old_state );
     }
 }
 
@@ -65,7 +77,7 @@ static void check_barrier(struct lock_debug *debug)
      * However, if we spin on an IRQ-unsafe lock with IRQs disabled then that
      * is clearly wrong, for the same reason outlined in check_lock() above.
      */
-    BUG_ON(!local_irq_is_enabled() && (debug->irq_safe == 0));
+    BUG_ON(!local_irq_is_enabled() && (debug->state == lock_state_irqunsafe));
 }
 
 void spin_debug_enable(void)
diff --git a/xen/include/xen/spinlock.h b/xen/include/xen/spinlock.h
index 12b0a89..2040773 100644
--- a/xen/include/xen/spinlock.h
+++ b/xen/include/xen/spinlock.h
@@ -6,9 +6,14 @@
 
 #ifndef NDEBUG
 struct lock_debug {
-    int irq_safe; /* +1: IRQ-safe; 0: not IRQ-safe; -1: don't know yet */
+    enum lock_state {
+        /* leave 0 unused to help detect uninitalised spinlocks */
+        lock_state_unknown = 1,
+        lock_state_irqsafe = 2,
+        lock_state_irqunsafe = 3
+    } state;
 };
-#define _LOCK_DEBUG { -1 }
+#define _LOCK_DEBUG { lock_state_unknown }
 void spin_debug_enable(void);
 void spin_debug_disable(void);
 #else
-- 
1.7.10.4


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.