[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] [xen master] spinlock: move rwlock API and per-cpu rwlocks into their own files



commit 57fa2ce1a188caa222c7e524f2f5b6b34dec099a
Author:     Jennifer Herbert <jennifer.herbert@xxxxxxxxxx>
AuthorDate: Wed Feb 3 14:09:09 2016 +0100
Commit:     Jan Beulich <jbeulich@xxxxxxxx>
CommitDate: Wed Feb 3 14:09:09 2016 +0100

    spinlock: move rwlock API and per-cpu rwlocks into their own files
    
    In preparation for a replacement read-write lock implementation, move
    the API and the per-cpu read-write locks into their own files.
    
    Signed-off-by: Jennifer Herbert <jennifer.herbert@xxxxxxxxxx>
    Signed-off-by: David Vrabel <david.vrabel@xxxxxxxxxx>
    Acked-by: Jan Beulich <jbeulich@xxxxxxxx>
---
 xen/arch/x86/mm/mem_sharing.c |   1 +
 xen/common/Makefile           |   1 +
 xen/common/rwlock.c           |  47 +++++++++++++
 xen/common/spinlock.c         |  45 -------------
 xen/include/asm-x86/mm.h      |   1 +
 xen/include/xen/grant_table.h |   1 +
 xen/include/xen/rwlock.h      | 150 ++++++++++++++++++++++++++++++++++++++++++
 xen/include/xen/sched.h       |   1 +
 xen/include/xen/spinlock.h    | 143 ----------------------------------------
 xen/xsm/flask/ss/services.c   |   1 +
 10 files changed, 203 insertions(+), 188 deletions(-)

diff --git a/xen/arch/x86/mm/mem_sharing.c b/xen/arch/x86/mm/mem_sharing.c
index a95e105..a522423 100644
--- a/xen/arch/x86/mm/mem_sharing.c
+++ b/xen/arch/x86/mm/mem_sharing.c
@@ -23,6 +23,7 @@
 #include <xen/types.h>
 #include <xen/domain_page.h>
 #include <xen/spinlock.h>
+#include <xen/rwlock.h>
 #include <xen/mm.h>
 #include <xen/grant_table.h>
 #include <xen/sched.h>
diff --git a/xen/common/Makefile b/xen/common/Makefile
index 4df71ee..6e82b33 100644
--- a/xen/common/Makefile
+++ b/xen/common/Makefile
@@ -30,6 +30,7 @@ obj-y += rangeset.o
 obj-y += radix-tree.o
 obj-y += rbtree.o
 obj-y += rcupdate.o
+obj-y += rwlock.o
 obj-$(CONFIG_SCHED_ARINC653) += sched_arinc653.o
 obj-$(CONFIG_SCHED_CREDIT) += sched_credit.o
 obj-$(CONFIG_SCHED_CREDIT2) += sched_credit2.o
diff --git a/xen/common/rwlock.c b/xen/common/rwlock.c
new file mode 100644
index 0000000..410d4dc
--- /dev/null
+++ b/xen/common/rwlock.c
@@ -0,0 +1,47 @@
+#include <xen/rwlock.h>
+#include <xen/irq.h>
+
+static DEFINE_PER_CPU(cpumask_t, percpu_rwlock_readers);
+
+void _percpu_write_lock(percpu_rwlock_t **per_cpudata,
+                percpu_rwlock_t *percpu_rwlock)
+{
+    unsigned int cpu;
+    cpumask_t *rwlock_readers = &this_cpu(percpu_rwlock_readers);
+
+    /* Validate the correct per_cpudata variable has been provided. */
+    _percpu_rwlock_owner_check(per_cpudata, percpu_rwlock);
+
+    /*
+     * First take the write lock to protect against other writers or slow
+     * path readers.
+     */
+    write_lock(&percpu_rwlock->rwlock);
+
+    /* Now set the global variable so that readers start using read_lock. */
+    percpu_rwlock->writer_activating = 1;
+    smp_mb();
+
+    /* Using a per cpu cpumask is only safe if there is no nesting. */
+    ASSERT(!in_irq());
+    cpumask_copy(rwlock_readers, &cpu_online_map);
+
+    /* Check if there are any percpu readers in progress on this rwlock. */
+    for ( ; ; )
+    {
+        for_each_cpu(cpu, rwlock_readers)
+        {
+            /*
+             * Remove any percpu readers not contending on this rwlock
+             * from our check mask.
+             */
+            if ( per_cpu_ptr(per_cpudata, cpu) != percpu_rwlock )
+                __cpumask_clear_cpu(cpu, rwlock_readers);
+        }
+        /* Check if we've cleared all percpu readers from check mask. */
+        if ( cpumask_empty(rwlock_readers) )
+            break;
+        /* Give the coherency fabric a break. */
+        cpu_relax();
+    };
+}
diff --git a/xen/common/spinlock.c b/xen/common/spinlock.c
index bab1f95..7b0cf6c 100644
--- a/xen/common/spinlock.c
+++ b/xen/common/spinlock.c
@@ -10,8 +10,6 @@
 #include <asm/processor.h>
 #include <asm/atomic.h>
 
-static DEFINE_PER_CPU(cpumask_t, percpu_rwlock_readers);
-
 #ifndef NDEBUG
 
 static atomic_t spin_debug __read_mostly = ATOMIC_INIT(0);
@@ -494,49 +492,6 @@ int _rw_is_write_locked(rwlock_t *lock)
     return (lock->lock == RW_WRITE_FLAG); /* writer in critical section? */
 }
 
-void _percpu_write_lock(percpu_rwlock_t **per_cpudata,
-                percpu_rwlock_t *percpu_rwlock)
-{
-    unsigned int cpu;
-    cpumask_t *rwlock_readers = &this_cpu(percpu_rwlock_readers);
-
-    /* Validate the correct per_cpudata variable has been provided. */
-    _percpu_rwlock_owner_check(per_cpudata, percpu_rwlock);
-
-    /* 
-     * First take the write lock to protect against other writers or slow 
-     * path readers.
-     */
-    write_lock(&percpu_rwlock->rwlock);
-
-    /* Now set the global variable so that readers start using read_lock. */
-    percpu_rwlock->writer_activating = 1;
-    smp_mb();
-
-    /* Using a per cpu cpumask is only safe if there is no nesting. */
-    ASSERT(!in_irq());
-    cpumask_copy(rwlock_readers, &cpu_online_map);
-
-    /* Check if there are any percpu readers in progress on this rwlock. */
-    for ( ; ; )
-    {
-        for_each_cpu(cpu, rwlock_readers)
-        {
-            /* 
-             * Remove any percpu readers not contending on this rwlock
-             * from our check mask.
-             */
-            if ( per_cpu_ptr(per_cpudata, cpu) != percpu_rwlock )
-                __cpumask_clear_cpu(cpu, rwlock_readers);
-        }
-        /* Check if we've cleared all percpu readers from check mask. */
-        if ( cpumask_empty(rwlock_readers) )
-            break;
-        /* Give the coherency fabric a break. */
-        cpu_relax();
-    };
-}
-
 #ifdef LOCK_PROFILE
 
 struct lock_profile_anc {
diff --git a/xen/include/asm-x86/mm.h b/xen/include/asm-x86/mm.h
index 7598414..4560deb 100644
--- a/xen/include/asm-x86/mm.h
+++ b/xen/include/asm-x86/mm.h
@@ -5,6 +5,7 @@
 #include <xen/config.h>
 #include <xen/list.h>
 #include <xen/spinlock.h>
+#include <xen/rwlock.h>
 #include <asm/io.h>
 #include <asm/uaccess.h>
 #include <asm/x86_emulate.h>
diff --git a/xen/include/xen/grant_table.h b/xen/include/xen/grant_table.h
index b4f064e..32b0ecd 100644
--- a/xen/include/xen/grant_table.h
+++ b/xen/include/xen/grant_table.h
@@ -23,6 +23,7 @@
 #ifndef __XEN_GRANT_TABLE_H__
 #define __XEN_GRANT_TABLE_H__
 
+#include <xen/rwlock.h>
 #include <public/grant_table.h>
 #include <asm/page.h>
 #include <asm/grant_table.h>
diff --git a/xen/include/xen/rwlock.h b/xen/include/xen/rwlock.h
new file mode 100644
index 0000000..9d87783
--- /dev/null
+++ b/xen/include/xen/rwlock.h
@@ -0,0 +1,150 @@
+#ifndef __RWLOCK_H__
+#define __RWLOCK_H__
+
+#include <xen/spinlock.h>
+
+#define read_lock(l)                  _read_lock(l)
+#define read_lock_irq(l)              _read_lock_irq(l)
+#define read_lock_irqsave(l, f)                                 \
+    ({                                                          \
+        BUILD_BUG_ON(sizeof(f) != sizeof(unsigned long));       \
+        ((f) = _read_lock_irqsave(l));                          \
+    })
+
+#define read_unlock(l)                _read_unlock(l)
+#define read_unlock_irq(l)            _read_unlock_irq(l)
+#define read_unlock_irqrestore(l, f)  _read_unlock_irqrestore(l, f)
+#define read_trylock(l)               _read_trylock(l)
+
+#define write_lock(l)                 _write_lock(l)
+#define write_lock_irq(l)             _write_lock_irq(l)
+#define write_lock_irqsave(l, f)                                \
+    ({                                                          \
+        BUILD_BUG_ON(sizeof(f) != sizeof(unsigned long));       \
+        ((f) = _write_lock_irqsave(l));                         \
+    })
+#define write_trylock(l)              _write_trylock(l)
+
+#define write_unlock(l)               _write_unlock(l)
+#define write_unlock_irq(l)           _write_unlock_irq(l)
+#define write_unlock_irqrestore(l, f) _write_unlock_irqrestore(l, f)
+
+#define rw_is_locked(l)               _rw_is_locked(l)
+#define rw_is_write_locked(l)         _rw_is_write_locked(l)
+
+
+typedef struct percpu_rwlock percpu_rwlock_t;
+
+struct percpu_rwlock {
+    rwlock_t            rwlock;
+    bool_t              writer_activating;
+#ifndef NDEBUG
+    percpu_rwlock_t     **percpu_owner;
+#endif
+};
+
+#ifndef NDEBUG
+#define PERCPU_RW_LOCK_UNLOCKED(owner) { RW_LOCK_UNLOCKED, 0, owner }
+static inline void _percpu_rwlock_owner_check(percpu_rwlock_t **per_cpudata,
+                                         percpu_rwlock_t *percpu_rwlock)
+{
+    ASSERT(per_cpudata == percpu_rwlock->percpu_owner);
+}
+#else
+#define PERCPU_RW_LOCK_UNLOCKED(owner) { RW_LOCK_UNLOCKED, 0 }
+#define _percpu_rwlock_owner_check(data, lock) ((void)0)
+#endif
+
+#define DEFINE_PERCPU_RWLOCK_RESOURCE(l, owner) \
+    percpu_rwlock_t l = PERCPU_RW_LOCK_UNLOCKED(&get_per_cpu_var(owner))
+#define percpu_rwlock_resource_init(l, owner) \
+    (*(l) = (percpu_rwlock_t)PERCPU_RW_LOCK_UNLOCKED(&get_per_cpu_var(owner)))
+
+static inline void _percpu_read_lock(percpu_rwlock_t **per_cpudata,
+                                         percpu_rwlock_t *percpu_rwlock)
+{
+    /* Validate the correct per_cpudata variable has been provided. */
+    _percpu_rwlock_owner_check(per_cpudata, percpu_rwlock);
+
+    /* We cannot support recursion on the same lock. */
+    ASSERT(this_cpu_ptr(per_cpudata) != percpu_rwlock);
+    /*
+     * Detect using a second percpu_rwlock_t simulatenously and fallback
+     * to standard read_lock.
+     */
+    if ( unlikely(this_cpu_ptr(per_cpudata) != NULL ) )
+    {
+        read_lock(&percpu_rwlock->rwlock);
+        return;
+    }
+
+    /* Indicate this cpu is reading. */
+    this_cpu_ptr(per_cpudata) = percpu_rwlock;
+    smp_mb();
+    /* Check if a writer is waiting. */
+    if ( unlikely(percpu_rwlock->writer_activating) )
+    {
+        /* Let the waiting writer know we aren't holding the lock. */
+        this_cpu_ptr(per_cpudata) = NULL;
+        /* Wait using the read lock to keep the lock fair. */
+        read_lock(&percpu_rwlock->rwlock);
+        /* Set the per CPU data again and continue. */
+        this_cpu_ptr(per_cpudata) = percpu_rwlock;
+        /* Drop the read lock because we don't need it anymore. */
+        read_unlock(&percpu_rwlock->rwlock);
+    }
+}
+
+static inline void _percpu_read_unlock(percpu_rwlock_t **per_cpudata,
+                percpu_rwlock_t *percpu_rwlock)
+{
+    /* Validate the correct per_cpudata variable has been provided. */
+    _percpu_rwlock_owner_check(per_cpudata, percpu_rwlock);
+
+    /* Verify the read lock was taken for this lock */
+    ASSERT(this_cpu_ptr(per_cpudata) != NULL);
+    /*
+     * Detect using a second percpu_rwlock_t simulatenously and fallback
+     * to standard read_unlock.
+     */
+    if ( unlikely(this_cpu_ptr(per_cpudata) != percpu_rwlock ) )
+    {
+        read_unlock(&percpu_rwlock->rwlock);
+        return;
+    }
+    this_cpu_ptr(per_cpudata) = NULL;
+    smp_wmb();
+}
+
+/* Don't inline percpu write lock as it's a complex function. */
+void _percpu_write_lock(percpu_rwlock_t **per_cpudata,
+                        percpu_rwlock_t *percpu_rwlock);
+
+static inline void _percpu_write_unlock(percpu_rwlock_t **per_cpudata,
+                percpu_rwlock_t *percpu_rwlock)
+{
+    /* Validate the correct per_cpudata variable has been provided. */
+    _percpu_rwlock_owner_check(per_cpudata, percpu_rwlock);
+
+    ASSERT(percpu_rwlock->writer_activating);
+    percpu_rwlock->writer_activating = 0;
+    write_unlock(&percpu_rwlock->rwlock);
+}
+
+#define percpu_rw_is_write_locked(l)         
_rw_is_write_locked(&((l)->rwlock))
+
+#define percpu_read_lock(percpu, lock) \
+    _percpu_read_lock(&get_per_cpu_var(percpu), lock)
+#define percpu_read_unlock(percpu, lock) \
+    _percpu_read_unlock(&get_per_cpu_var(percpu), lock)
+#define percpu_write_lock(percpu, lock) \
+    _percpu_write_lock(&get_per_cpu_var(percpu), lock)
+#define percpu_write_unlock(percpu, lock) \
+    _percpu_write_unlock(&get_per_cpu_var(percpu), lock)
+
+#define DEFINE_PERCPU_RWLOCK_GLOBAL(name) DEFINE_PER_CPU(percpu_rwlock_t *, \
+                                                         name)
+#define DECLARE_PERCPU_RWLOCK_GLOBAL(name) DECLARE_PER_CPU(percpu_rwlock_t *, \
+                                                           name)
+
+#endif /* __RWLOCK_H__ */
diff --git a/xen/include/xen/sched.h b/xen/include/xen/sched.h
index 5870745..b47a3fe 100644
--- a/xen/include/xen/sched.h
+++ b/xen/include/xen/sched.h
@@ -4,6 +4,7 @@
 
 #include <xen/types.h>
 #include <xen/spinlock.h>
+#include <xen/rwlock.h>
 #include <xen/shared.h>
 #include <xen/timer.h>
 #include <xen/rangeset.h>
diff --git a/xen/include/xen/spinlock.h b/xen/include/xen/spinlock.h
index 22c4fc2..765db51 100644
--- a/xen/include/xen/spinlock.h
+++ b/xen/include/xen/spinlock.h
@@ -236,147 +236,4 @@ int _rw_is_write_locked(rwlock_t *lock);
 #define spin_lock_recursive(l)        _spin_lock_recursive(l)
 #define spin_unlock_recursive(l)      _spin_unlock_recursive(l)
 
-#define read_lock(l)                  _read_lock(l)
-#define read_lock_irq(l)              _read_lock_irq(l)
-#define read_lock_irqsave(l, f)                                 \
-    ({                                                          \
-        BUILD_BUG_ON(sizeof(f) != sizeof(unsigned long));       \
-        ((f) = _read_lock_irqsave(l));                          \
-    })
-
-#define read_unlock(l)                _read_unlock(l)
-#define read_unlock_irq(l)            _read_unlock_irq(l)
-#define read_unlock_irqrestore(l, f)  _read_unlock_irqrestore(l, f)
-#define read_trylock(l)               _read_trylock(l)
-
-#define write_lock(l)                 _write_lock(l)
-#define write_lock_irq(l)             _write_lock_irq(l)
-#define write_lock_irqsave(l, f)                                \
-    ({                                                          \
-        BUILD_BUG_ON(sizeof(f) != sizeof(unsigned long));       \
-        ((f) = _write_lock_irqsave(l));                         \
-    })
-#define write_trylock(l)              _write_trylock(l)
-
-#define write_unlock(l)               _write_unlock(l)
-#define write_unlock_irq(l)           _write_unlock_irq(l)
-#define write_unlock_irqrestore(l, f) _write_unlock_irqrestore(l, f)
-
-#define rw_is_locked(l)               _rw_is_locked(l)
-#define rw_is_write_locked(l)         _rw_is_write_locked(l)
-
-typedef struct percpu_rwlock percpu_rwlock_t;
-
-struct percpu_rwlock {
-    rwlock_t            rwlock;
-    bool_t              writer_activating;
-#ifndef NDEBUG
-    percpu_rwlock_t     **percpu_owner;
-#endif
-};
-
-#ifndef NDEBUG
-#define PERCPU_RW_LOCK_UNLOCKED(owner) { RW_LOCK_UNLOCKED, 0, owner }
-static inline void _percpu_rwlock_owner_check(percpu_rwlock_t **per_cpudata,
-                                         percpu_rwlock_t *percpu_rwlock)
-{
-    ASSERT(per_cpudata == percpu_rwlock->percpu_owner);
-}
-#else
-#define PERCPU_RW_LOCK_UNLOCKED(owner) { RW_LOCK_UNLOCKED, 0 }
-#define _percpu_rwlock_owner_check(data, lock) ((void)0)
-#endif
-
-#define DEFINE_PERCPU_RWLOCK_RESOURCE(l, owner) \
-    percpu_rwlock_t l = PERCPU_RW_LOCK_UNLOCKED(&get_per_cpu_var(owner))
-#define percpu_rwlock_resource_init(l, owner) \
-    (*(l) = (percpu_rwlock_t)PERCPU_RW_LOCK_UNLOCKED(&get_per_cpu_var(owner)))
-
-static inline void _percpu_read_lock(percpu_rwlock_t **per_cpudata,
-                                         percpu_rwlock_t *percpu_rwlock)
-{
-    /* Validate the correct per_cpudata variable has been provided. */
-    _percpu_rwlock_owner_check(per_cpudata, percpu_rwlock);
-
-    /* We cannot support recursion on the same lock. */
-    ASSERT(this_cpu_ptr(per_cpudata) != percpu_rwlock);
-    /* 
-     * Detect using a second percpu_rwlock_t simulatenously and fallback
-     * to standard read_lock.
-     */
-    if ( unlikely(this_cpu_ptr(per_cpudata) != NULL ) )
-    {
-        read_lock(&percpu_rwlock->rwlock);
-        return;
-    }
-
-    /* Indicate this cpu is reading. */
-    this_cpu_ptr(per_cpudata) = percpu_rwlock;
-    smp_mb();
-    /* Check if a writer is waiting. */
-    if ( unlikely(percpu_rwlock->writer_activating) )
-    {
-        /* Let the waiting writer know we aren't holding the lock. */
-        this_cpu_ptr(per_cpudata) = NULL;
-        /* Wait using the read lock to keep the lock fair. */
-        read_lock(&percpu_rwlock->rwlock);
-        /* Set the per CPU data again and continue. */
-        this_cpu_ptr(per_cpudata) = percpu_rwlock;
-        /* Drop the read lock because we don't need it anymore. */
-        read_unlock(&percpu_rwlock->rwlock);
-    }
-}
-
-static inline void _percpu_read_unlock(percpu_rwlock_t **per_cpudata,
-                percpu_rwlock_t *percpu_rwlock)
-{
-    /* Validate the correct per_cpudata variable has been provided. */
-    _percpu_rwlock_owner_check(per_cpudata, percpu_rwlock);
-
-    /* Verify the read lock was taken for this lock */
-    ASSERT(this_cpu_ptr(per_cpudata) != NULL);
-    /* 
-     * Detect using a second percpu_rwlock_t simulatenously and fallback
-     * to standard read_unlock.
-     */
-    if ( unlikely(this_cpu_ptr(per_cpudata) != percpu_rwlock ) )
-    {
-        read_unlock(&percpu_rwlock->rwlock);
-        return;
-    }
-    this_cpu_ptr(per_cpudata) = NULL;
-    smp_wmb();
-}
-
-/* Don't inline percpu write lock as it's a complex function. */
-void _percpu_write_lock(percpu_rwlock_t **per_cpudata,
-                        percpu_rwlock_t *percpu_rwlock);
-
-static inline void _percpu_write_unlock(percpu_rwlock_t **per_cpudata,
-                percpu_rwlock_t *percpu_rwlock)
-{
-    /* Validate the correct per_cpudata variable has been provided. */
-    _percpu_rwlock_owner_check(per_cpudata, percpu_rwlock);
-
-    ASSERT(percpu_rwlock->writer_activating);
-    percpu_rwlock->writer_activating = 0;
-    write_unlock(&percpu_rwlock->rwlock);
-}
-
-#define percpu_rw_is_write_locked(l)         
_rw_is_write_locked(&((l)->rwlock))
-
-#define percpu_read_lock(percpu, lock) \
-    _percpu_read_lock(&get_per_cpu_var(percpu), lock)
-#define percpu_read_unlock(percpu, lock) \
-    _percpu_read_unlock(&get_per_cpu_var(percpu), lock)
-#define percpu_write_lock(percpu, lock) \
-    _percpu_write_lock(&get_per_cpu_var(percpu), lock)
-#define percpu_write_unlock(percpu, lock) \
-    _percpu_write_unlock(&get_per_cpu_var(percpu), lock)
-
-#define DEFINE_PERCPU_RWLOCK_GLOBAL(name) DEFINE_PER_CPU(percpu_rwlock_t *, \
-                                                         name)
-#define DECLARE_PERCPU_RWLOCK_GLOBAL(name) DECLARE_PER_CPU(percpu_rwlock_t *, \
-                                                         name)
-
 #endif /* __SPINLOCK_H__ */
diff --git a/xen/xsm/flask/ss/services.c b/xen/xsm/flask/ss/services.c
index f31d7d7..9da358b 100644
--- a/xen/xsm/flask/ss/services.c
+++ b/xen/xsm/flask/ss/services.c
@@ -40,6 +40,7 @@
 #include <xen/xmalloc.h>
 #include <xen/string.h>
 #include <xen/spinlock.h>
+#include <xen/rwlock.h>
 #include <xen/errno.h>
 #include "flask.h"
 #include "avc.h"
--
generated by git-patchbot for /home/xen/git/xen.git#master

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.