[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] [xen staging] rwlock: allow recursive read locking when already locked in write mode



commit 868a01021c6f429e4f47647edfe2e76b4a9c753d
Author:     Roger Pau Monné <roger.pau@xxxxxxxxxx>
AuthorDate: Wed Feb 26 10:53:03 2020 +0100
Commit:     Jan Beulich <jbeulich@xxxxxxxx>
CommitDate: Wed Feb 26 10:53:03 2020 +0100

    rwlock: allow recursive read locking when already locked in write mode
    
    Allow a CPU already holding the lock in write mode to also lock it in
    read mode. There's no harm in allowing read locking a rwlock that's
    already owned by the caller (ie: CPU) in write mode. Allowing such
    accesses is required at least for the CPU maps use-case.
    
    In order to do this reserve 12bits of the lock, this allows to support
    up to 4096 CPUs. Also reduce the write lock mask to 2 bits: one to
    signal there are pending writers waiting on the lock and the other to
    signal the lock is owned in write mode.
    
    This reduces the maximum number of concurrent readers from 16777216 to
    262144, I think this should still be enough, or else the lock field
    can be expanded from 32 to 64bits if all architectures support atomic
    operations on 64bit integers.
    
    Fixes: 5872c83b42c608 ('smp: convert the cpu maps lock into a rw lock')
    Reported-by: Jan Beulich <jbeulich@xxxxxxxx>
    Reported-by: Jürgen Gro� <jgross@xxxxxxxx>
    Signed-off-by: Roger Pau Monné <roger.pau@xxxxxxxxxx>
    Reviewed-by: Julien Grall <julien@xxxxxxx>
    Reviewed-by: Jan Beulich <jbeulich@xxxxxxxx>
---
 xen/common/rwlock.c      |  4 ++--
 xen/include/xen/rwlock.h | 52 +++++++++++++++++++++++++++++-------------------
 2 files changed, 34 insertions(+), 22 deletions(-)

diff --git a/xen/common/rwlock.c b/xen/common/rwlock.c
index d568bbf6de..dadab372b5 100644
--- a/xen/common/rwlock.c
+++ b/xen/common/rwlock.c
@@ -69,7 +69,7 @@ void queue_write_lock_slowpath(rwlock_t *lock)
 
     /* Try to acquire the lock directly if no reader is present. */
     if ( !atomic_read(&lock->cnts) &&
-         (atomic_cmpxchg(&lock->cnts, 0, _QW_LOCKED) == 0) )
+         (atomic_cmpxchg(&lock->cnts, 0, _write_lock_val()) == 0) )
         goto unlock;
 
     /*
@@ -93,7 +93,7 @@ void queue_write_lock_slowpath(rwlock_t *lock)
         cnts = atomic_read(&lock->cnts);
         if ( (cnts == _QW_WAITING) &&
              (atomic_cmpxchg(&lock->cnts, _QW_WAITING,
-                             _QW_LOCKED) == _QW_WAITING) )
+                             _write_lock_val()) == _QW_WAITING) )
             break;
 
         cpu_relax();
diff --git a/xen/include/xen/rwlock.h b/xen/include/xen/rwlock.h
index 3dfea1ac2a..1c221dd0d9 100644
--- a/xen/include/xen/rwlock.h
+++ b/xen/include/xen/rwlock.h
@@ -2,6 +2,7 @@
 #define __RWLOCK_H__
 
 #include <xen/percpu.h>
+#include <xen/smp.h>
 #include <xen/spinlock.h>
 
 #include <asm/atomic.h>
@@ -20,21 +21,30 @@ typedef struct {
 #define DEFINE_RWLOCK(l) rwlock_t l = RW_LOCK_UNLOCKED
 #define rwlock_init(l) (*(l) = (rwlock_t)RW_LOCK_UNLOCKED)
 
-/*
- * Writer states & reader shift and bias.
- *
- * Writer field is 8 bit to allow for potential optimisation, see
- * _write_unlock().
- */
-#define    _QW_WAITING  1               /* A writer is waiting     */
-#define    _QW_LOCKED   0xff            /* A writer holds the lock */
-#define    _QW_WMASK    0xff            /* Writer mask.*/
-#define    _QR_SHIFT    8               /* Reader count shift      */
+/* Writer states & reader shift and bias. */
+#define    _QW_CPUMASK  0xfffU             /* Writer CPU mask */
+#define    _QW_SHIFT    12                 /* Writer flags shift */
+#define    _QW_WAITING  (1U << _QW_SHIFT)  /* A writer is waiting */
+#define    _QW_LOCKED   (3U << _QW_SHIFT)  /* A writer holds the lock */
+#define    _QW_WMASK    (3U << _QW_SHIFT)  /* Writer mask */
+#define    _QR_SHIFT    14                 /* Reader count shift */
 #define    _QR_BIAS     (1U << _QR_SHIFT)
 
 void queue_read_lock_slowpath(rwlock_t *lock);
 void queue_write_lock_slowpath(rwlock_t *lock);
 
+static inline bool _is_write_locked_by_me(unsigned int cnts)
+{
+    BUILD_BUG_ON(_QW_CPUMASK < NR_CPUS);
+    return (cnts & _QW_WMASK) == _QW_LOCKED &&
+           (cnts & _QW_CPUMASK) == smp_processor_id();
+}
+
+static inline bool _can_read_lock(unsigned int cnts)
+{
+    return !(cnts & _QW_WMASK) || _is_write_locked_by_me(cnts);
+}
+
 /*
  * _read_trylock - try to acquire read lock of a queue rwlock.
  * @lock : Pointer to queue rwlock structure.
@@ -45,10 +55,10 @@ static inline int _read_trylock(rwlock_t *lock)
     u32 cnts;
 
     cnts = atomic_read(&lock->cnts);
-    if ( likely(!(cnts & _QW_WMASK)) )
+    if ( likely(_can_read_lock(cnts)) )
     {
         cnts = (u32)atomic_add_return(_QR_BIAS, &lock->cnts);
-        if ( likely(!(cnts & _QW_WMASK)) )
+        if ( likely(_can_read_lock(cnts)) )
             return 1;
         atomic_sub(_QR_BIAS, &lock->cnts);
     }
@@ -64,7 +74,7 @@ static inline void _read_lock(rwlock_t *lock)
     u32 cnts;
 
     cnts = atomic_add_return(_QR_BIAS, &lock->cnts);
-    if ( likely(!(cnts & _QW_WMASK)) )
+    if ( likely(_can_read_lock(cnts)) )
         return;
 
     /* The slowpath will decrement the reader count, if necessary. */
@@ -115,6 +125,11 @@ static inline int _rw_is_locked(rwlock_t *lock)
     return atomic_read(&lock->cnts);
 }
 
+static inline unsigned int _write_lock_val(void)
+{
+    return _QW_LOCKED | smp_processor_id();
+}
+
 /*
  * queue_write_lock - acquire write lock of a queue rwlock.
  * @lock : Pointer to queue rwlock structure.
@@ -122,7 +137,7 @@ static inline int _rw_is_locked(rwlock_t *lock)
 static inline void _write_lock(rwlock_t *lock)
 {
     /* Optimize for the unfair lock case where the fair flag is 0. */
-    if ( atomic_cmpxchg(&lock->cnts, 0, _QW_LOCKED) == 0 )
+    if ( atomic_cmpxchg(&lock->cnts, 0, _write_lock_val()) == 0 )
         return;
 
     queue_write_lock_slowpath(lock);
@@ -157,16 +172,13 @@ static inline int _write_trylock(rwlock_t *lock)
     if ( unlikely(cnts) )
         return 0;
 
-    return likely(atomic_cmpxchg(&lock->cnts, 0, _QW_LOCKED) == 0);
+    return likely(atomic_cmpxchg(&lock->cnts, 0, _write_lock_val()) == 0);
 }
 
 static inline void _write_unlock(rwlock_t *lock)
 {
-    /*
-     * If the writer field is atomic, it can be cleared directly.
-     * Otherwise, an atomic subtraction will be used to clear it.
-     */
-    atomic_sub(_QW_LOCKED, &lock->cnts);
+    ASSERT(_is_write_locked_by_me(atomic_read(&lock->cnts)));
+    atomic_and(~(_QW_CPUMASK | _QW_WMASK), &lock->cnts);
 }
 
 static inline void _write_unlock_irq(rwlock_t *lock)
--
generated by git-patchbot for /home/xen/git/xen.git#staging

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxxx
https://lists.xenproject.org/xen-changelog

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.