[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH v2] xen/spinlock: Use mnemonics for recursive spinlock sentinel values



No resulting change.  The compiled binary is identical.

Signed-off-by: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
---
CC: Jan Beulich <JBeulich@xxxxxxxx>
CC: Tim Deegan <tim@xxxxxxx>
CC: Ian Campbell <Ian.Campbell@xxxxxxxxxx>

v2: Remove the fencepost adjustment.  All valid cpu ids are < NR_CPUS.
---
 xen/common/spinlock.c      | 8 ++++----
 xen/include/xen/spinlock.h | 6 ++++--
 2 files changed, 8 insertions(+), 6 deletions(-)

diff --git a/xen/common/spinlock.c b/xen/common/spinlock.c
index 7f89694..7b0cf6c 100644
--- a/xen/common/spinlock.c
+++ b/xen/common/spinlock.c
@@ -246,7 +246,7 @@ int _spin_trylock_recursive(spinlock_t *lock)
     unsigned int cpu = smp_processor_id();
 
     /* Don't allow overflow of recurse_cpu field. */
-    BUILD_BUG_ON(NR_CPUS > 0xfffu);
+    BUILD_BUG_ON(NR_CPUS > SPINLOCK_NO_CPU);
 
     check_lock(&lock->debug);
 
@@ -258,7 +258,7 @@ int _spin_trylock_recursive(spinlock_t *lock)
     }
 
     /* We support only fairly shallow recursion, else the counter overflows. */
-    ASSERT(lock->recurse_cnt < 0xfu);
+    ASSERT(lock->recurse_cnt < SPINLOCK_MAX_RECURSE);
     lock->recurse_cnt++;
 
     return 1;
@@ -275,7 +275,7 @@ void _spin_lock_recursive(spinlock_t *lock)
     }
 
     /* We support only fairly shallow recursion, else the counter overflows. */
-    ASSERT(lock->recurse_cnt < 0xfu);
+    ASSERT(lock->recurse_cnt < SPINLOCK_MAX_RECURSE);
     lock->recurse_cnt++;
 }
 
@@ -283,7 +283,7 @@ void _spin_unlock_recursive(spinlock_t *lock)
 {
     if ( likely(--lock->recurse_cnt == 0) )
     {
-        lock->recurse_cpu = 0xfffu;
+        lock->recurse_cpu = SPINLOCK_NO_CPU;
         spin_unlock(lock);
     }
 }
diff --git a/xen/include/xen/spinlock.h b/xen/include/xen/spinlock.h
index fb0438e..9555c53 100644
--- a/xen/include/xen/spinlock.h
+++ b/xen/include/xen/spinlock.h
@@ -80,7 +80,7 @@ struct lock_profile_qhead {
     static struct lock_profile *__lock_profile_##name                         \
     __used_section(".lockprofile.data") =                                     \
     &__lock_profile_data_##name
-#define _SPIN_LOCK_UNLOCKED(x) { { 0 }, 0xfffu, 0, _LOCK_DEBUG, x }
+#define _SPIN_LOCK_UNLOCKED(x) { { 0 }, SPINLOCK_NO_CPU, 0, _LOCK_DEBUG, x }
 #define SPIN_LOCK_UNLOCKED _SPIN_LOCK_UNLOCKED(NULL)
 #define DEFINE_SPINLOCK(l)                                                    \
     spinlock_t l = _SPIN_LOCK_UNLOCKED(NULL);                                 \
@@ -116,7 +116,7 @@ extern void spinlock_profile_reset(unsigned char key);
 
 struct lock_profile_qhead { };
 
-#define SPIN_LOCK_UNLOCKED { { 0 }, 0xfffu, 0, _LOCK_DEBUG }
+#define SPIN_LOCK_UNLOCKED { { 0 }, SPINLOCK_NO_CPU, 0, _LOCK_DEBUG }
 #define DEFINE_SPINLOCK(l) spinlock_t l = SPIN_LOCK_UNLOCKED
 
 #define spin_lock_init_prof(s, l) spin_lock_init(&((s)->l))
@@ -138,7 +138,9 @@ typedef union {
 typedef struct spinlock {
     spinlock_tickets_t tickets;
     u16 recurse_cpu:12;
+#define SPINLOCK_NO_CPU 0xfffu
     u16 recurse_cnt:4;
+#define SPINLOCK_MAX_RECURSE 0xfu
     struct lock_debug debug;
 #ifdef LOCK_PROFILE
     struct lock_profile *profile;
-- 
2.1.4


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.