[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH v2 16/17] xen: arm: refactor xchg and cmpxchg into their own headers



Since these functions are taken from Linux this makes it easier to compare
against the Lihnux cmpxchg.h headers (which were split out from Linux's
system.h a while back).

Since these functions are from Linux the intention is to use Linux coding
style, therefore include a suitable emacs magic block.

For this reason also fix up the indentation in the 32-bit version to use hard
tabs while moving it. The 64-bit version was already correct.

Signed-off-by: Ian Campbell <ian.campbell@xxxxxxxxxx>
---
 xen/include/asm-arm/arm32/cmpxchg.h |  146 ++++++++++++++++++++++++++++++
 xen/include/asm-arm/arm32/system.h  |  135 +---------------------------
 xen/include/asm-arm/arm64/cmpxchg.h |  167 +++++++++++++++++++++++++++++++++++
 xen/include/asm-arm/arm64/system.h  |  155 +-------------------------------
 4 files changed, 315 insertions(+), 288 deletions(-)
 create mode 100644 xen/include/asm-arm/arm32/cmpxchg.h
 create mode 100644 xen/include/asm-arm/arm64/cmpxchg.h

diff --git a/xen/include/asm-arm/arm32/cmpxchg.h 
b/xen/include/asm-arm/arm32/cmpxchg.h
new file mode 100644
index 0000000..70c6090
--- /dev/null
+++ b/xen/include/asm-arm/arm32/cmpxchg.h
@@ -0,0 +1,146 @@
+#ifndef __ASM_ARM32_CMPXCHG_H
+#define __ASM_ARM32_CMPXCHG_H
+
+extern void __bad_xchg(volatile void *, int);
+
+static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int 
size)
+{
+       unsigned long ret;
+       unsigned int tmp;
+
+       smp_mb();
+
+       switch (size) {
+       case 1:
+               asm volatile("@ __xchg1\n"
+               "1:     ldrexb  %0, [%3]\n"
+               "       strexb  %1, %2, [%3]\n"
+               "       teq     %1, #0\n"
+               "       bne     1b"
+                       : "=&r" (ret), "=&r" (tmp)
+                       : "r" (x), "r" (ptr)
+                       : "memory", "cc");
+               break;
+       case 4:
+               asm volatile("@ __xchg4\n"
+               "1:     ldrex   %0, [%3]\n"
+               "       strex   %1, %2, [%3]\n"
+               "       teq     %1, #0\n"
+               "       bne     1b"
+                       : "=&r" (ret), "=&r" (tmp)
+                       : "r" (x), "r" (ptr)
+                       : "memory", "cc");
+               break;
+       default:
+               __bad_xchg(ptr, size), ret = 0;
+               break;
+       }
+       smp_mb();
+
+       return ret;
+}
+
+/*
+ * Atomic compare and exchange.  Compare OLD with MEM, if identical,
+ * store NEW in MEM.  Return the initial value in MEM.  Success is
+ * indicated by comparing RETURN with OLD.
+ */
+
+extern void __bad_cmpxchg(volatile void *ptr, int size);
+
+static always_inline unsigned long __cmpxchg(
+    volatile void *ptr, unsigned long old, unsigned long new, int size)
+{
+       unsigned long oldval, res;
+
+       switch (size) {
+       case 1:
+               do {
+                       asm volatile("@ __cmpxchg1\n"
+                       "       ldrexb  %1, [%2]\n"
+                       "       mov     %0, #0\n"
+                       "       teq     %1, %3\n"
+                       "       strexbeq %0, %4, [%2]\n"
+                               : "=&r" (res), "=&r" (oldval)
+                               : "r" (ptr), "Ir" (old), "r" (new)
+                               : "memory", "cc");
+               } while (res);
+               break;
+       case 2:
+               do {
+                       asm volatile("@ __cmpxchg2\n"
+                       "       ldrexh  %1, [%2]\n"
+                       "       mov     %0, #0\n"
+                       "       teq     %1, %3\n"
+                       "       strexheq %0, %4, [%2]\n"
+                               : "=&r" (res), "=&r" (oldval)
+                               : "r" (ptr), "Ir" (old), "r" (new)
+                               : "memory", "cc");
+               } while (res);
+               break;
+       case 4:
+               do {
+                       asm volatile("@ __cmpxchg4\n"
+                       "       ldrex   %1, [%2]\n"
+                       "       mov     %0, #0\n"
+                       "       teq     %1, %3\n"
+                       "       strexeq %0, %4, [%2]\n"
+                               : "=&r" (res), "=&r" (oldval)
+                               : "r" (ptr), "Ir" (old), "r" (new)
+                               : "memory", "cc");
+           } while (res);
+           break;
+#if 0
+       case 8:
+               do {
+                       asm volatile("@ __cmpxchg8\n"
+                       "       ldrexd  %1, [%2]\n"
+                       "       mov     %0, #0\n"
+                       "       teq     %1, %3\n"
+                       "       strexdeq %0, %4, [%2]\n"
+                               : "=&r" (res), "=&r" (oldval)
+                               : "r" (ptr), "Ir" (old), "r" (new)
+                               : "memory", "cc");
+               } while (res);
+               break;
+#endif
+       default:
+               __bad_cmpxchg(ptr, size);
+               oldval = 0;
+       }
+
+       return oldval;
+}
+
+static inline unsigned long __cmpxchg_mb(volatile void *ptr, unsigned long old,
+                                        unsigned long new, int size)
+{
+       unsigned long ret;
+
+       smp_mb();
+       ret = __cmpxchg(ptr, old, new, size);
+       smp_mb();
+
+       return ret;
+}
+
+#define cmpxchg(ptr,o,n)                                               \
+       ((__typeof__(*(ptr)))__cmpxchg_mb((ptr),                        \
+                                         (unsigned long)(o),           \
+                                         (unsigned long)(n),           \
+                                         sizeof(*(ptr))))
+
+#define cmpxchg_local(ptr,o,n)                                         \
+       ((__typeof__(*(ptr)))__cmpxchg((ptr),                           \
+                                      (unsigned long)(o),              \
+                                      (unsigned long)(n),              \
+                                      sizeof(*(ptr))))
+#endif
+/*
+ * Local variables:
+ * mode: C
+ * c-file-style: "BSD"
+ * c-basic-offset: 8
+ * indent-tabs-mode: t
+ * End:
+ */
diff --git a/xen/include/asm-arm/arm32/system.h 
b/xen/include/asm-arm/arm32/system.h
index dfaa3b6..b47b942 100644
--- a/xen/include/asm-arm/arm32/system.h
+++ b/xen/include/asm-arm/arm32/system.h
@@ -2,140 +2,7 @@
 #ifndef __ASM_ARM32_SYSTEM_H
 #define __ASM_ARM32_SYSTEM_H
 
-extern void __bad_xchg(volatile void *, int);
-
-static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int 
size)
-{
-        unsigned long ret;
-        unsigned int tmp;
-
-        smp_mb();
-
-        switch (size) {
-        case 1:
-                asm volatile("@ __xchg1\n"
-                "1:     ldrexb  %0, [%3]\n"
-                "       strexb  %1, %2, [%3]\n"
-                "       teq     %1, #0\n"
-                "       bne     1b"
-                        : "=&r" (ret), "=&r" (tmp)
-                        : "r" (x), "r" (ptr)
-                        : "memory", "cc");
-                break;
-        case 4:
-                asm volatile("@ __xchg4\n"
-                "1:     ldrex   %0, [%3]\n"
-                "       strex   %1, %2, [%3]\n"
-                "       teq     %1, #0\n"
-                "       bne     1b"
-                        : "=&r" (ret), "=&r" (tmp)
-                        : "r" (x), "r" (ptr)
-                        : "memory", "cc");
-                break;
-        default:
-                __bad_xchg(ptr, size), ret = 0;
-                break;
-        }
-        smp_mb();
-
-        return ret;
-}
-
-/*
- * Atomic compare and exchange.  Compare OLD with MEM, if identical,
- * store NEW in MEM.  Return the initial value in MEM.  Success is
- * indicated by comparing RETURN with OLD.
- */
-
-extern void __bad_cmpxchg(volatile void *ptr, int size);
-
-static always_inline unsigned long __cmpxchg(
-    volatile void *ptr, unsigned long old, unsigned long new, int size)
-{
-    unsigned long /*long*/ oldval, res;
-
-    switch (size) {
-    case 1:
-        do {
-            asm volatile("@ __cmpxchg1\n"
-                         "       ldrexb  %1, [%2]\n"
-                         "       mov     %0, #0\n"
-                         "       teq     %1, %3\n"
-                         "       strexbeq %0, %4, [%2]\n"
-                         : "=&r" (res), "=&r" (oldval)
-                         : "r" (ptr), "Ir" (old), "r" (new)
-                         : "memory", "cc");
-        } while (res);
-        break;
-    case 2:
-        do {
-            asm volatile("@ __cmpxchg2\n"
-                         "       ldrexh  %1, [%2]\n"
-                         "       mov     %0, #0\n"
-                         "       teq     %1, %3\n"
-                         "       strexheq %0, %4, [%2]\n"
-                         : "=&r" (res), "=&r" (oldval)
-                         : "r" (ptr), "Ir" (old), "r" (new)
-                         : "memory", "cc");
-        } while (res);
-        break;
-    case 4:
-        do {
-            asm volatile("@ __cmpxchg4\n"
-                         "       ldrex   %1, [%2]\n"
-                         "       mov     %0, #0\n"
-                         "       teq     %1, %3\n"
-                         "       strexeq %0, %4, [%2]\n"
-                         : "=&r" (res), "=&r" (oldval)
-                         : "r" (ptr), "Ir" (old), "r" (new)
-                         : "memory", "cc");
-        } while (res);
-        break;
-#if 0
-    case 8:
-        do {
-            asm volatile("@ __cmpxchg8\n"
-                         "       ldrexd   %1, [%2]\n"
-                         "       mov      %0, #0\n"
-                         "       teq      %1, %3\n"
-                         "       strexdeq %0, %4, [%2]\n"
-                         : "=&r" (res), "=&r" (oldval)
-                         : "r" (ptr), "Ir" (old), "r" (new)
-                         : "memory", "cc");
-        } while (res);
-        break;
-#endif
-    default:
-        __bad_cmpxchg(ptr, size);
-        oldval = 0;
-    }
-
-    return oldval;
-}
-
-static inline unsigned long __cmpxchg_mb(volatile void *ptr, unsigned long old,
-                                        unsigned long new, int size)
-{
-       unsigned long ret;
-
-       smp_mb();
-       ret = __cmpxchg(ptr, old, new, size);
-       smp_mb();
-
-       return ret;
-}
-
-#define cmpxchg(ptr,o,n)                                               \
-       ((__typeof__(*(ptr)))__cmpxchg_mb((ptr),                        \
-                                         (unsigned long)(o),           \
-                                         (unsigned long)(n),           \
-                                         sizeof(*(ptr))))
-
-#define cmpxchg_local(ptr,o,n)                                         \
-       ((__typeof__(*(ptr)))__cmpxchg((ptr),                           \
-                                      (unsigned long)(o),              \
-                                      (unsigned long)(n),              \
-                                      sizeof(*(ptr))))
+#include <asm/arm32/cmpxchg.h>
 
 #define local_irq_disable() asm volatile ( "cpsid i @ local_irq_disable\n" : : 
: "cc" )
 #define local_irq_enable()  asm volatile ( "cpsie i @ local_irq_enable\n" : : 
: "cc" )
diff --git a/xen/include/asm-arm/arm64/cmpxchg.h 
b/xen/include/asm-arm/arm64/cmpxchg.h
new file mode 100644
index 0000000..4e930ce
--- /dev/null
+++ b/xen/include/asm-arm/arm64/cmpxchg.h
@@ -0,0 +1,167 @@
+#ifndef __ASM_ARM64_CMPXCHG_H
+#define __ASM_ARM64_CMPXCHG_H
+
+extern void __bad_xchg(volatile void *, int);
+
+static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int 
size)
+{
+       unsigned long ret, tmp;
+
+       switch (size) {
+       case 1:
+               asm volatile("//        __xchg1\n"
+               "1:     ldxrb   %w0, %2\n"
+               "       stlxrb  %w1, %w3, %2\n"
+               "       cbnz    %w1, 1b\n"
+                       : "=&r" (ret), "=&r" (tmp), "+Q" (*(u8 *)ptr)
+                       : "r" (x)
+                       : "memory");
+               break;
+       case 2:
+               asm volatile("//        __xchg2\n"
+               "1:     ldxrh   %w0, %2\n"
+               "       stlxrh  %w1, %w3, %2\n"
+               "       cbnz    %w1, 1b\n"
+                       : "=&r" (ret), "=&r" (tmp), "+Q" (*(u16 *)ptr)
+                       : "r" (x)
+                       : "memory");
+               break;
+       case 4:
+               asm volatile("//        __xchg4\n"
+               "1:     ldxr    %w0, %2\n"
+               "       stlxr   %w1, %w3, %2\n"
+               "       cbnz    %w1, 1b\n"
+                       : "=&r" (ret), "=&r" (tmp), "+Q" (*(u32 *)ptr)
+                       : "r" (x)
+                       : "memory");
+               break;
+       case 8:
+               asm volatile("//        __xchg8\n"
+               "1:     ldxr    %0, %2\n"
+               "       stlxr   %w1, %3, %2\n"
+               "       cbnz    %w1, 1b\n"
+                       : "=&r" (ret), "=&r" (tmp), "+Q" (*(u64 *)ptr)
+                       : "r" (x)
+                       : "memory");
+               break;
+       default:
+               __bad_xchg(ptr, size), ret = 0;
+               break;
+       }
+
+       smp_mb();
+       return ret;
+}
+
+#define xchg(ptr,x) \
+       ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
+
+extern void __bad_cmpxchg(volatile void *ptr, int size);
+
+static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
+                                     unsigned long new, int size)
+{
+       unsigned long oldval = 0, res;
+
+       switch (size) {
+       case 1:
+               do {
+                       asm volatile("// __cmpxchg1\n"
+                       "       ldxrb   %w1, %2\n"
+                       "       mov     %w0, #0\n"
+                       "       cmp     %w1, %w3\n"
+                       "       b.ne    1f\n"
+                       "       stxrb   %w0, %w4, %2\n"
+                       "1:\n"
+                               : "=&r" (res), "=&r" (oldval), "+Q" (*(u8 *)ptr)
+                               : "Ir" (old), "r" (new)
+                               : "cc");
+               } while (res);
+               break;
+
+       case 2:
+               do {
+                       asm volatile("// __cmpxchg2\n"
+                       "       ldxrh   %w1, %2\n"
+                       "       mov     %w0, #0\n"
+                       "       cmp     %w1, %w3\n"
+                       "       b.ne    1f\n"
+                       "       stxrh   %w0, %w4, %2\n"
+                       "1:\n"
+                               : "=&r" (res), "=&r" (oldval), "+Q" (*(u16 
*)ptr)
+                               : "Ir" (old), "r" (new)
+                               : "cc");
+               } while (res);
+               break;
+
+       case 4:
+               do {
+                       asm volatile("// __cmpxchg4\n"
+                       "       ldxr    %w1, %2\n"
+                       "       mov     %w0, #0\n"
+                       "       cmp     %w1, %w3\n"
+                       "       b.ne    1f\n"
+                       "       stxr    %w0, %w4, %2\n"
+                       "1:\n"
+                               : "=&r" (res), "=&r" (oldval), "+Q" (*(u32 
*)ptr)
+                               : "Ir" (old), "r" (new)
+                               : "cc");
+               } while (res);
+               break;
+
+       case 8:
+               do {
+                       asm volatile("// __cmpxchg8\n"
+                       "       ldxr    %1, %2\n"
+                       "       mov     %w0, #0\n"
+                       "       cmp     %1, %3\n"
+                       "       b.ne    1f\n"
+                       "       stxr    %w0, %4, %2\n"
+                       "1:\n"
+                               : "=&r" (res), "=&r" (oldval), "+Q" (*(u64 
*)ptr)
+                               : "Ir" (old), "r" (new)
+                               : "cc");
+               } while (res);
+               break;
+
+       default:
+               __bad_cmpxchg(ptr, size);
+               oldval = 0;
+       }
+
+       return oldval;
+}
+
+static inline unsigned long __cmpxchg_mb(volatile void *ptr, unsigned long old,
+                                        unsigned long new, int size)
+{
+       unsigned long ret;
+
+       smp_mb();
+       ret = __cmpxchg(ptr, old, new, size);
+       smp_mb();
+
+       return ret;
+}
+
+#define cmpxchg(ptr,o,n)                                               \
+       ((__typeof__(*(ptr)))__cmpxchg_mb((ptr),                        \
+                                         (unsigned long)(o),           \
+                                         (unsigned long)(n),           \
+                                         sizeof(*(ptr))))
+
+#define cmpxchg_local(ptr,o,n)                                         \
+       ((__typeof__(*(ptr)))__cmpxchg((ptr),                           \
+                                      (unsigned long)(o),              \
+                                      (unsigned long)(n),              \
+                                      sizeof(*(ptr))))
+
+#endif
+/*
+ * Local variables:
+ * mode: C
+ * c-file-style: "BSD"
+ * c-basic-offset: 8
+ * indent-tabs-mode: t
+ * End:
+ */
diff --git a/xen/include/asm-arm/arm64/system.h 
b/xen/include/asm-arm/arm64/system.h
index fa50ead..6efced3 100644
--- a/xen/include/asm-arm/arm64/system.h
+++ b/xen/include/asm-arm/arm64/system.h
@@ -2,160 +2,7 @@
 #ifndef __ASM_ARM64_SYSTEM_H
 #define __ASM_ARM64_SYSTEM_H
 
-extern void __bad_xchg(volatile void *, int);
-
-static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int 
size)
-{
-       unsigned long ret, tmp;
-
-       switch (size) {
-       case 1:
-               asm volatile("//        __xchg1\n"
-               "1:     ldxrb   %w0, %2\n"
-               "       stlxrb  %w1, %w3, %2\n"
-               "       cbnz    %w1, 1b\n"
-                       : "=&r" (ret), "=&r" (tmp), "+Q" (*(u8 *)ptr)
-                       : "r" (x)
-                       : "memory");
-               break;
-       case 2:
-               asm volatile("//        __xchg2\n"
-               "1:     ldxrh   %w0, %2\n"
-               "       stlxrh  %w1, %w3, %2\n"
-               "       cbnz    %w1, 1b\n"
-                       : "=&r" (ret), "=&r" (tmp), "+Q" (*(u16 *)ptr)
-                       : "r" (x)
-                       : "memory");
-               break;
-       case 4:
-               asm volatile("//        __xchg4\n"
-               "1:     ldxr    %w0, %2\n"
-               "       stlxr   %w1, %w3, %2\n"
-               "       cbnz    %w1, 1b\n"
-                       : "=&r" (ret), "=&r" (tmp), "+Q" (*(u32 *)ptr)
-                       : "r" (x)
-                       : "memory");
-               break;
-       case 8:
-               asm volatile("//        __xchg8\n"
-               "1:     ldxr    %0, %2\n"
-               "       stlxr   %w1, %3, %2\n"
-               "       cbnz    %w1, 1b\n"
-                       : "=&r" (ret), "=&r" (tmp), "+Q" (*(u64 *)ptr)
-                       : "r" (x)
-                       : "memory");
-               break;
-       default:
-               __bad_xchg(ptr, size), ret = 0;
-               break;
-       }
-
-       smp_mb();
-       return ret;
-}
-
-#define xchg(ptr,x) \
-       ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
-
-extern void __bad_cmpxchg(volatile void *ptr, int size);
-
-static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
-                                     unsigned long new, int size)
-{
-       unsigned long oldval = 0, res;
-
-       switch (size) {
-       case 1:
-               do {
-                       asm volatile("// __cmpxchg1\n"
-                       "       ldxrb   %w1, %2\n"
-                       "       mov     %w0, #0\n"
-                       "       cmp     %w1, %w3\n"
-                       "       b.ne    1f\n"
-                       "       stxrb   %w0, %w4, %2\n"
-                       "1:\n"
-                               : "=&r" (res), "=&r" (oldval), "+Q" (*(u8 *)ptr)
-                               : "Ir" (old), "r" (new)
-                               : "cc");
-               } while (res);
-               break;
-
-       case 2:
-               do {
-                       asm volatile("// __cmpxchg2\n"
-                       "       ldxrh   %w1, %2\n"
-                       "       mov     %w0, #0\n"
-                       "       cmp     %w1, %w3\n"
-                       "       b.ne    1f\n"
-                       "       stxrh   %w0, %w4, %2\n"
-                       "1:\n"
-                               : "=&r" (res), "=&r" (oldval), "+Q" (*(u16 
*)ptr)
-                               : "Ir" (old), "r" (new)
-                               : "cc");
-               } while (res);
-               break;
-
-       case 4:
-               do {
-                       asm volatile("// __cmpxchg4\n"
-                       "       ldxr    %w1, %2\n"
-                       "       mov     %w0, #0\n"
-                       "       cmp     %w1, %w3\n"
-                       "       b.ne    1f\n"
-                       "       stxr    %w0, %w4, %2\n"
-                       "1:\n"
-                               : "=&r" (res), "=&r" (oldval), "+Q" (*(u32 
*)ptr)
-                               : "Ir" (old), "r" (new)
-                               : "cc");
-               } while (res);
-               break;
-
-       case 8:
-               do {
-                       asm volatile("// __cmpxchg8\n"
-                       "       ldxr    %1, %2\n"
-                       "       mov     %w0, #0\n"
-                       "       cmp     %1, %3\n"
-                       "       b.ne    1f\n"
-                       "       stxr    %w0, %4, %2\n"
-                       "1:\n"
-                               : "=&r" (res), "=&r" (oldval), "+Q" (*(u64 
*)ptr)
-                               : "Ir" (old), "r" (new)
-                               : "cc");
-               } while (res);
-               break;
-
-       default:
-               __bad_cmpxchg(ptr, size);
-               oldval = 0;
-       }
-
-       return oldval;
-}
-
-static inline unsigned long __cmpxchg_mb(volatile void *ptr, unsigned long old,
-                                        unsigned long new, int size)
-{
-       unsigned long ret;
-
-       smp_mb();
-       ret = __cmpxchg(ptr, old, new, size);
-       smp_mb();
-
-       return ret;
-}
-
-#define cmpxchg(ptr,o,n)                                               \
-       ((__typeof__(*(ptr)))__cmpxchg_mb((ptr),                        \
-                                         (unsigned long)(o),           \
-                                         (unsigned long)(n),           \
-                                         sizeof(*(ptr))))
-
-#define cmpxchg_local(ptr,o,n)                                         \
-       ((__typeof__(*(ptr)))__cmpxchg((ptr),                           \
-                                      (unsigned long)(o),              \
-                                      (unsigned long)(n),              \
-                                      sizeof(*(ptr))))
+#include <asm/arm64/cmpxchg.h>
 
 /* Uses uimm4 as a bitmask to select the clearing of one or more of
  * the DAIF exception mask bits:
-- 
1.7.10.4


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.