[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [xen staging] xen/rwlock: add check_lock() handling to rwlocks
commit e19bcb626f50a652fb1854a8b2f2c9c371687a11 Author: Juergen Gross <jgross@xxxxxxxx> AuthorDate: Fri Nov 6 10:48:07 2020 +0100 Commit: Jan Beulich <jbeulich@xxxxxxxx> CommitDate: Fri Nov 6 10:48:07 2020 +0100 xen/rwlock: add check_lock() handling to rwlocks Checking whether a lock is consistently used regarding interrupts on or off is beneficial for rwlocks, too. So add check_lock() calls to rwlock functions. For this purpose make check_lock() globally accessible. Signed-off-by: Juergen Gross <jgross@xxxxxxxx> Reviewed-by: Julien Grall <jgrall@xxxxxxxxxx> Reviewed-by: Jan Beulich <jbeulich@xxxxxxxx> --- xen/common/spinlock.c | 3 +-- xen/include/xen/rwlock.h | 15 +++++++++++++++ xen/include/xen/spinlock.h | 2 ++ 3 files changed, 18 insertions(+), 2 deletions(-) diff --git a/xen/common/spinlock.c b/xen/common/spinlock.c index f4eb50f030..b90981bb27 100644 --- a/xen/common/spinlock.c +++ b/xen/common/spinlock.c @@ -13,7 +13,7 @@ static atomic_t spin_debug __read_mostly = ATOMIC_INIT(0); -static void check_lock(union lock_debug *debug, bool try) +void check_lock(union lock_debug *debug, bool try) { bool irq_safe = !local_irq_is_enabled(); @@ -108,7 +108,6 @@ void spin_debug_disable(void) #else /* CONFIG_DEBUG_LOCKS */ -#define check_lock(l, t) ((void)0) #define check_barrier(l) ((void)0) #define got_lock(l) ((void)0) #define rel_lock(l) ((void)0) diff --git a/xen/include/xen/rwlock.h b/xen/include/xen/rwlock.h index 427664037a..0cc9167715 100644 --- a/xen/include/xen/rwlock.h +++ b/xen/include/xen/rwlock.h @@ -56,6 +56,7 @@ static inline int _read_trylock(rwlock_t *lock) u32 cnts; preempt_disable(); + check_lock(&lock->lock.debug, true); cnts = atomic_read(&lock->cnts); if ( likely(_can_read_lock(cnts)) ) { @@ -87,7 +88,11 @@ static inline void _read_lock(rwlock_t *lock) * arch_lock_acquire_barrier(). */ if ( likely(_can_read_lock(cnts)) ) + { + /* The slow path calls check_lock() via spin_lock(). */ + check_lock(&lock->lock.debug, false); return; + } /* The slowpath will decrement the reader count, if necessary. */ queue_read_lock_slowpath(lock); @@ -162,7 +167,11 @@ static inline void _write_lock(rwlock_t *lock) * arch_lock_acquire_barrier(). */ if ( atomic_cmpxchg(&lock->cnts, 0, _write_lock_val()) == 0 ) + { + /* The slow path calls check_lock() via spin_lock(). */ + check_lock(&lock->lock.debug, false); return; + } queue_write_lock_slowpath(lock); /* @@ -197,6 +206,7 @@ static inline int _write_trylock(rwlock_t *lock) u32 cnts; preempt_disable(); + check_lock(&lock->lock.debug, true); cnts = atomic_read(&lock->cnts); if ( unlikely(cnts) || unlikely(atomic_cmpxchg(&lock->cnts, 0, _write_lock_val()) != 0) ) @@ -328,6 +338,11 @@ static inline void _percpu_read_lock(percpu_rwlock_t **per_cpudata, /* Drop the read lock because we don't need it anymore. */ read_unlock(&percpu_rwlock->rwlock); } + else + { + /* All other paths have implicit check_lock() calls via read_lock(). */ + check_lock(&percpu_rwlock->rwlock.lock.debug, false); + } } static inline void _percpu_read_unlock(percpu_rwlock_t **per_cpudata, diff --git a/xen/include/xen/spinlock.h b/xen/include/xen/spinlock.h index ca13b600a0..9fa4e600c1 100644 --- a/xen/include/xen/spinlock.h +++ b/xen/include/xen/spinlock.h @@ -21,11 +21,13 @@ union lock_debug { }; }; #define _LOCK_DEBUG { LOCK_DEBUG_INITVAL } +void check_lock(union lock_debug *debug, bool try); void spin_debug_enable(void); void spin_debug_disable(void); #else union lock_debug { }; #define _LOCK_DEBUG { } +#define check_lock(l, t) ((void)0) #define spin_debug_enable() ((void)0) #define spin_debug_disable() ((void)0) #endif -- generated by git-patchbot for /home/xen/git/xen.git#staging
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |