[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH V2 07/46] xen: arm64: spinlocks



Signed-off-by: Ian Campbell <ian.campbell@xxxxxxxxxx>
Acked-by: Tim Deegan <tim@xxxxxxx>
---
v2: no change, but these need to be revisited considering the interaction of
sev/wfe etc. May need to rework the generic code in order to make best use of
wfe (on 32-bit ARM too)
---
 xen/include/asm-arm/arm32/spinlock.h |  141 ++++++++++++++++++++++++++++++++++
 xen/include/asm-arm/arm64/spinlock.h |  125 ++++++++++++++++++++++++++++++
 xen/include/asm-arm/spinlock.h       |  135 ++------------------------------
 3 files changed, 273 insertions(+), 128 deletions(-)
 create mode 100644 xen/include/asm-arm/arm32/spinlock.h
 create mode 100644 xen/include/asm-arm/arm64/spinlock.h

diff --git a/xen/include/asm-arm/arm32/spinlock.h 
b/xen/include/asm-arm/arm32/spinlock.h
new file mode 100644
index 0000000..a7bcdbf
--- /dev/null
+++ b/xen/include/asm-arm/arm32/spinlock.h
@@ -0,0 +1,141 @@
+#ifndef __ASM_ARM32_SPINLOCK_H
+#define __ASM_ARM32_SPINLOCK_H
+
+static inline void dsb_sev(void)
+{
+    __asm__ __volatile__ (
+        "dsb\n"
+        "sev\n"
+        );
+}
+
+typedef struct {
+    volatile unsigned int lock;
+} raw_spinlock_t;
+
+#define _RAW_SPIN_LOCK_UNLOCKED { 0 }
+
+#define _raw_spin_is_locked(x)          ((x)->lock != 0)
+
+static always_inline void _raw_spin_unlock(raw_spinlock_t *lock)
+{
+    ASSERT(_raw_spin_is_locked(lock));
+
+    smp_mb();
+
+    __asm__ __volatile__(
+"   str     %1, [%0]\n"
+    :
+    : "r" (&lock->lock), "r" (0)
+    : "cc");
+
+    dsb_sev();
+}
+
+static always_inline int _raw_spin_trylock(raw_spinlock_t *lock)
+{
+    unsigned long tmp;
+
+    __asm__ __volatile__(
+"   ldrex   %0, [%1]\n"
+"   teq     %0, #0\n"
+"   strexeq %0, %2, [%1]"
+    : "=&r" (tmp)
+    : "r" (&lock->lock), "r" (1)
+    : "cc");
+
+    if (tmp == 0) {
+        smp_mb();
+        return 1;
+    } else {
+        return 0;
+    }
+}
+
+typedef struct {
+    volatile unsigned int lock;
+} raw_rwlock_t;
+
+#define _RAW_RW_LOCK_UNLOCKED { 0 }
+
+static always_inline int _raw_read_trylock(raw_rwlock_t *rw)
+{
+    unsigned long tmp, tmp2 = 1;
+
+    __asm__ __volatile__(
+"1: ldrex   %0, [%2]\n"
+"   adds    %0, %0, #1\n"
+"   strexpl %1, %0, [%2]\n"
+    : "=&r" (tmp), "+r" (tmp2)
+    : "r" (&rw->lock)
+    : "cc");
+
+    smp_mb();
+    return tmp2 == 0;
+}
+
+static always_inline int _raw_write_trylock(raw_rwlock_t *rw)
+{
+    unsigned long tmp;
+
+    __asm__ __volatile__(
+"1: ldrex   %0, [%1]\n"
+"   teq     %0, #0\n"
+"   strexeq %0, %2, [%1]"
+    : "=&r" (tmp)
+    : "r" (&rw->lock), "r" (0x80000000)
+    : "cc");
+
+    if (tmp == 0) {
+        smp_mb();
+        return 1;
+    } else {
+        return 0;
+    }
+}
+
+static inline void _raw_read_unlock(raw_rwlock_t *rw)
+{
+    unsigned long tmp, tmp2;
+
+    smp_mb();
+
+    __asm__ __volatile__(
+"1: ldrex   %0, [%2]\n"
+"   sub     %0, %0, #1\n"
+"   strex   %1, %0, [%2]\n"
+"   teq     %1, #0\n"
+"   bne     1b"
+    : "=&r" (tmp), "=&r" (tmp2)
+    : "r" (&rw->lock)
+    : "cc");
+
+    if (tmp == 0)
+        dsb_sev();
+}
+
+static inline void _raw_write_unlock(raw_rwlock_t *rw)
+{
+    smp_mb();
+
+    __asm__ __volatile__(
+    "str    %1, [%0]\n"
+    :
+    : "r" (&rw->lock), "r" (0)
+    : "cc");
+
+    dsb_sev();
+}
+
+#define _raw_rw_is_locked(x) ((x)->lock != 0)
+#define _raw_rw_is_write_locked(x) ((x)->lock == 0x80000000)
+
+#endif /* __ASM_SPINLOCK_H */
+/*
+ * Local variables:
+ * mode: C
+ * c-set-style: "BSD"
+ * c-basic-offset: 4
+ * indent-tabs-mode: nil
+ * End:
+ */
diff --git a/xen/include/asm-arm/arm64/spinlock.h 
b/xen/include/asm-arm/arm64/spinlock.h
new file mode 100644
index 0000000..52ad688
--- /dev/null
+++ b/xen/include/asm-arm/arm64/spinlock.h
@@ -0,0 +1,125 @@
+/*
+ * Derived from Linux arch64 spinlock.h which is:
+ * Copyright (C) 2012 ARM Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef __ASM_ARM64_SPINLOCK_H
+#define __ASM_ARM64_SPINLOCK_H
+
+typedef struct {
+    volatile unsigned int lock;
+} raw_spinlock_t;
+
+#define _RAW_SPIN_LOCK_UNLOCKED { 0 }
+
+#define _raw_spin_is_locked(x)          ((x)->lock != 0)
+
+static always_inline void _raw_spin_unlock(raw_spinlock_t *lock)
+{
+    ASSERT(_raw_spin_is_locked(lock));
+
+    asm volatile(
+        "       stlr    %w1, [%0]\n"
+        : : "r" (&lock->lock), "r" (0) : "memory");
+}
+
+static always_inline int _raw_spin_trylock(raw_spinlock_t *lock)
+{
+    unsigned int tmp;
+
+    asm volatile(
+        "       ldaxr   %w0, [%1]\n"
+        "       cbnz    %w0, 1f\n"
+        "       stxr    %w0, %w2, [%1]\n"
+        "1:\n"
+        : "=&r" (tmp)
+        : "r" (&lock->lock), "r" (1)
+        : "memory");
+
+    return !tmp;
+}
+
+typedef struct {
+    volatile unsigned int lock;
+} raw_rwlock_t;
+
+#define _RAW_RW_LOCK_UNLOCKED { 0 }
+
+static always_inline int _raw_read_trylock(raw_rwlock_t *rw)
+{
+    unsigned int tmp, tmp2 = 1;
+
+    asm volatile(
+        "       ldaxr   %w0, [%2]\n"
+        "       add     %w0, %w0, #1\n"
+        "       tbnz    %w0, #31, 1f\n"
+        "       stxr    %w1, %w0, [%2]\n"
+        "1:\n"
+        : "=&r" (tmp), "+r" (tmp2)
+        : "r" (&rw->lock)
+        : "memory");
+
+    return !tmp2;
+}
+
+static always_inline int _raw_write_trylock(raw_rwlock_t *rw)
+{
+    unsigned int tmp;
+
+    asm volatile(
+        "       ldaxr   %w0, [%1]\n"
+        "       cbnz    %w0, 1f\n"
+        "       stxr    %w0, %w2, [%1]\n"
+        "1:\n"
+        : "=&r" (tmp)
+        : "r" (&rw->lock), "r" (0x80000000)
+        : "memory");
+
+    return !tmp;
+}
+
+static inline void _raw_read_unlock(raw_rwlock_t *rw)
+{
+    unsigned int tmp, tmp2;
+
+    asm volatile(
+        "1:     ldxr    %w0, [%2]\n"
+        "       sub     %w0, %w0, #1\n"
+        "       stlxr   %w1, %w0, [%2]\n"
+        "       cbnz    %w1, 1b\n"
+        : "=&r" (tmp), "=&r" (tmp2)
+        : "r" (&rw->lock)
+        : "memory");
+}
+
+static inline void _raw_write_unlock(raw_rwlock_t *rw)
+{
+    asm volatile(
+        "       stlr    %w1, [%0]\n"
+        : : "r" (&rw->lock), "r" (0) : "memory");
+}
+
+#define _raw_rw_is_locked(x) ((x)->lock != 0)
+#define _raw_rw_is_write_locked(x) ((x)->lock == 0x80000000)
+
+#endif /* __ASM_SPINLOCK_H */
+/*
+ * Local variables:
+ * mode: C
+ * c-set-style: "BSD"
+ * c-basic-offset: 4
+ * indent-tabs-mode: nil
+ * End:
+ */
diff --git a/xen/include/asm-arm/spinlock.h b/xen/include/asm-arm/spinlock.h
index b1825c9..d753210 100644
--- a/xen/include/asm-arm/spinlock.h
+++ b/xen/include/asm-arm/spinlock.h
@@ -4,134 +4,13 @@
 #include <xen/config.h>
 #include <xen/lib.h>
 
-static inline void dsb_sev(void)
-{
-    __asm__ __volatile__ (
-        "dsb\n"
-        "sev\n"
-        );
-}
-
-typedef struct {
-    volatile unsigned int lock;
-} raw_spinlock_t;
-
-#define _RAW_SPIN_LOCK_UNLOCKED { 0 }
-
-#define _raw_spin_is_locked(x)          ((x)->lock != 0)
-
-static always_inline void _raw_spin_unlock(raw_spinlock_t *lock)
-{
-    ASSERT(_raw_spin_is_locked(lock));
-
-    smp_mb();
-
-    __asm__ __volatile__(
-"   str     %1, [%0]\n"
-    :
-    : "r" (&lock->lock), "r" (0)
-    : "cc");
-
-    dsb_sev();
-}
-
-static always_inline int _raw_spin_trylock(raw_spinlock_t *lock)
-{
-    unsigned long tmp;
-
-    __asm__ __volatile__(
-"   ldrex   %0, [%1]\n"
-"   teq     %0, #0\n"
-"   strexeq %0, %2, [%1]"
-    : "=&r" (tmp)
-    : "r" (&lock->lock), "r" (1)
-    : "cc");
-
-    if (tmp == 0) {
-        smp_mb();
-        return 1;
-    } else {
-        return 0;
-    }
-}
-
-typedef struct {
-    volatile unsigned int lock;
-} raw_rwlock_t;
-
-#define _RAW_RW_LOCK_UNLOCKED { 0 }
-
-static always_inline int _raw_read_trylock(raw_rwlock_t *rw)
-{
-    unsigned long tmp, tmp2 = 1;
-
-    __asm__ __volatile__(
-"1: ldrex   %0, [%2]\n"
-"   adds    %0, %0, #1\n"
-"   strexpl %1, %0, [%2]\n"
-    : "=&r" (tmp), "+r" (tmp2)
-    : "r" (&rw->lock)
-    : "cc");
-
-    smp_mb();
-    return tmp2 == 0;
-}
-
-static always_inline int _raw_write_trylock(raw_rwlock_t *rw)
-{
-    unsigned long tmp;
-
-    __asm__ __volatile__(
-"1: ldrex   %0, [%1]\n"
-"   teq     %0, #0\n"
-"   strexeq %0, %2, [%1]"
-    : "=&r" (tmp)
-    : "r" (&rw->lock), "r" (0x80000000)
-    : "cc");
-
-    if (tmp == 0) {
-        smp_mb();
-        return 1;
-    } else {
-        return 0;
-    }
-}
-
-static inline void _raw_read_unlock(raw_rwlock_t *rw)
-{
-    unsigned long tmp, tmp2;
-
-    smp_mb();
-
-    __asm__ __volatile__(
-"1: ldrex   %0, [%2]\n"
-"   sub     %0, %0, #1\n"
-"   strex   %1, %0, [%2]\n"
-"   teq     %1, #0\n"
-"   bne     1b"
-    : "=&r" (tmp), "=&r" (tmp2)
-    : "r" (&rw->lock)
-    : "cc");
-
-    if (tmp == 0)
-        dsb_sev();
-}
-
-static inline void _raw_write_unlock(raw_rwlock_t *rw)
-{
-    smp_mb();
-
-    __asm__ __volatile__(
-    "str    %1, [%0]\n"
-    :
-    : "r" (&rw->lock), "r" (0)
-    : "cc");
-
-    dsb_sev();
-}
-
-#define _raw_rw_is_locked(x) ((x)->lock != 0)
-#define _raw_rw_is_write_locked(x) ((x)->lock == 0x80000000)
+#if defined(CONFIG_ARM_32)
+# include <asm/arm32/spinlock.h>
+#elif defined(CONFIG_ARM_64)
+# include <asm/arm64/spinlock.h>
+#else
+# error "unknown ARM variant"
+#endif
 
 #endif /* __ASM_SPINLOCK_H */
 /*
-- 
1.7.2.5


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.