[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [PATCH v3 11/13] xen/spinlock: split recursive spinlocks from normal ones
Recursive and normal spinlocks are sharing the same data structure for representation of the lock. This has two major disadvantages: - it is not clear from the definition of a lock, whether it is intended to be used recursive or not, while a mixture of both usage variants needs to be - in production builds (builds without CONFIG_DEBUG_LOCKS) the needed data size of an ordinary spinlock is 8 bytes instead of 4, due to the additional recursion data needed (associated with that the rwlock data is using 12 instead of only 8 bytes) Fix that by introducing a struct spinlock_recursive for recursive spinlocks only, and switch recursive spinlock functions to require pointers to this new struct. This allows to check the correct usage at build time. Signed-off-by: Juergen Gross <jgross@xxxxxxxx> --- V2: - use shorter names (Jan Beulich) - don't embed spinlock_t in rspinlock_t (Jan Beulich) --- xen/common/spinlock.c | 49 ++++++++++++++++++++++++++++++++ xen/include/xen/spinlock.h | 58 +++++++++++++++++++++++++------------- 2 files changed, 88 insertions(+), 19 deletions(-) diff --git a/xen/common/spinlock.c b/xen/common/spinlock.c index 8bb6691b14..0467541f43 100644 --- a/xen/common/spinlock.c +++ b/xen/common/spinlock.c @@ -541,6 +541,55 @@ void rspin_unlock_irqrestore(rspinlock_t *lock, unsigned long flags) local_irq_restore(flags); } +int nrspin_trylock(rspinlock_t *lock) +{ + check_lock(&lock->debug, true); + + if ( unlikely(lock->recurse_cpu != SPINLOCK_NO_CPU) ) + return 0; + + return spin_trylock_common(&lock->tickets, &lock->debug, LOCK_PROFILE_PAR); +} + +void nrspin_lock(rspinlock_t *lock) +{ + spin_lock_common(&lock->tickets, &lock->debug, LOCK_PROFILE_PAR, NULL, + NULL); +} + +void nrspin_unlock(rspinlock_t *lock) +{ + spin_unlock_common(&lock->tickets, &lock->debug, LOCK_PROFILE_PAR); +} + +void nrspin_lock_irq(rspinlock_t *lock) +{ + ASSERT(local_irq_is_enabled()); + local_irq_disable(); + nrspin_lock(lock); +} + +void nrspin_unlock_irq(rspinlock_t *lock) +{ + nrspin_unlock(lock); + local_irq_enable(); +} + +unsigned long __nrspin_lock_irqsave(rspinlock_t *lock) +{ + unsigned long flags; + + local_irq_save(flags); + nrspin_lock(lock); + return flags; +} + +void nrspin_unlock_irqrestore(rspinlock_t *lock, unsigned long flags) +{ + nrspin_unlock(lock); + local_irq_restore(flags); +} + #ifdef CONFIG_DEBUG_LOCK_PROFILE struct lock_profile_anc { diff --git a/xen/include/xen/spinlock.h b/xen/include/xen/spinlock.h index fb6ca1949a..a2b1bb2df6 100644 --- a/xen/include/xen/spinlock.h +++ b/xen/include/xen/spinlock.h @@ -76,8 +76,6 @@ union lock_debug { }; */ struct spinlock; -/* Temporary hack until a dedicated struct rspinlock is existing. */ -#define rspinlock spinlock struct lock_profile { struct lock_profile *next; /* forward link */ @@ -108,6 +106,10 @@ struct lock_profile_qhead { __used_section(".lockprofile.data") = \ &__lock_profile_data_##name #define _SPIN_LOCK_UNLOCKED(x) { \ + .debug =_LOCK_DEBUG, \ + .profile = x, \ +} +#define _RSPIN_LOCK_UNLOCKED(x) { \ .recurse_cpu = SPINLOCK_NO_CPU, \ .debug =_LOCK_DEBUG, \ .profile = x, \ @@ -117,8 +119,9 @@ struct lock_profile_qhead { spinlock_t l = _SPIN_LOCK_UNLOCKED(NULL); \ static struct lock_profile __lock_profile_data_##l = _LOCK_PROFILE(l); \ _LOCK_PROFILE_PTR(l) +#define RSPIN_LOCK_UNLOCKED _RSPIN_LOCK_UNLOCKED(NULL) #define DEFINE_RSPINLOCK(l) \ - rspinlock_t l = _SPIN_LOCK_UNLOCKED(NULL); \ + rspinlock_t l = _RSPIN_LOCK_UNLOCKED(NULL); \ static struct lock_profile __lock_profile_data_##l = _RLOCK_PROFILE(l); \ _LOCK_PROFILE_PTR(l) @@ -143,8 +146,11 @@ struct lock_profile_qhead { #define spin_lock_init_prof(s, l) \ __spin_lock_init_prof(s, l, lock, spinlock_t, 0) -#define rspin_lock_init_prof(s, l) \ - __spin_lock_init_prof(s, l, rlock, rspinlock_t, 1) +#define rspin_lock_init_prof(s, l) do { \ + __spin_lock_init_prof(s, l, rlock, rspinlock_t, 1); \ + (s)->l.recurse_cpu = SPINLOCK_NO_CPU; \ + (s)->l.recurse_cnt = 0; \ + } while (0) void _lock_profile_register_struct( int32_t type, struct lock_profile_qhead *qhead, int32_t idx); @@ -166,11 +172,15 @@ struct lock_profile_qhead { }; struct lock_profile { }; #define SPIN_LOCK_UNLOCKED { \ + .debug =_LOCK_DEBUG, \ +} +#define RSPIN_LOCK_UNLOCKED { \ + .debug =_LOCK_DEBUG, \ .recurse_cpu = SPINLOCK_NO_CPU, \ .debug =_LOCK_DEBUG, \ } #define DEFINE_SPINLOCK(l) spinlock_t l = SPIN_LOCK_UNLOCKED -#define DEFINE_RSPINLOCK(l) rspinlock_t l = SPIN_LOCK_UNLOCKED +#define DEFINE_RSPINLOCK(l) rspinlock_t l = RSPIN_LOCK_UNLOCKED #define spin_lock_init_prof(s, l) spin_lock_init(&((s)->l)) #define rspin_lock_init_prof(s, l) rspin_lock_init(&((s)->l)) @@ -180,7 +190,6 @@ struct lock_profile { }; #endif - typedef union { uint32_t head_tail; struct { @@ -192,6 +201,14 @@ typedef union { #define SPINLOCK_TICKET_INC { .head_tail = 0x10000, } typedef struct spinlock { + spinlock_tickets_t tickets; + union lock_debug debug; +#ifdef CONFIG_DEBUG_LOCK_PROFILE + struct lock_profile *profile; +#endif +} spinlock_t; + +typedef struct rspinlock { spinlock_tickets_t tickets; uint16_t recurse_cpu:SPINLOCK_CPU_BITS; #define SPINLOCK_NO_CPU ((1u << SPINLOCK_CPU_BITS) - 1) @@ -202,12 +219,10 @@ typedef struct spinlock { #ifdef CONFIG_DEBUG_LOCK_PROFILE struct lock_profile *profile; #endif -} spinlock_t; - -typedef spinlock_t rspinlock_t; +} rspinlock_t; #define spin_lock_init(l) (*(l) = (spinlock_t)SPIN_LOCK_UNLOCKED) -#define rspin_lock_init(l) (*(l) = (rspinlock_t)SPIN_LOCK_UNLOCKED) +#define rspin_lock_init(l) (*(l) = (rspinlock_t)RSPIN_LOCK_UNLOCKED) void _spin_lock(spinlock_t *lock); void _spin_lock_cb(spinlock_t *lock, void (*cb)(void *), void *data); @@ -242,6 +257,19 @@ void rspin_unlock_irqrestore(rspinlock_t *lock, unsigned long flags); int rspin_is_locked(const rspinlock_t *lock); void rspin_barrier(rspinlock_t *lock); +int nrspin_trylock(rspinlock_t *lock); +void nrspin_lock(rspinlock_t *lock); +void nrspin_unlock(rspinlock_t *lock); +void nrspin_lock_irq(rspinlock_t *lock); +void nrspin_unlock_irq(rspinlock_t *lock); +#define nrspin_lock_irqsave(l, f) \ + ({ \ + BUILD_BUG_ON(sizeof(f) != sizeof(unsigned long)); \ + ((f) = __nrspin_lock_irqsave(l)); \ + }) +unsigned long __nrspin_lock_irqsave(rspinlock_t *lock); +void nrspin_unlock_irqrestore(rspinlock_t *lock, unsigned long flags); + #define spin_lock(l) _spin_lock(l) #define spin_lock_cb(l, c, d) _spin_lock_cb(l, c, d) #define spin_lock_irq(l) _spin_lock_irq(l) @@ -270,12 +298,4 @@ void rspin_barrier(rspinlock_t *lock); /* Ensure a lock is quiescent between two critical operations. */ #define spin_barrier(l) _spin_barrier(l) -#define nrspin_trylock(l) spin_trylock(l) -#define nrspin_lock(l) spin_lock(l) -#define nrspin_unlock(l) spin_unlock(l) -#define nrspin_lock_irq(l) spin_lock_irq(l) -#define nrspin_unlock_irq(l) spin_unlock_irq(l) -#define nrspin_lock_irqsave(l, f) spin_lock_irqsave(l, f) -#define nrspin_unlock_irqrestore(l, f) spin_unlock_irqrestore(l, f) - #endif /* __SPINLOCK_H__ */ -- 2.35.3
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |