[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] [xen master] xen: arm64: reinstate hard tabs in system.h cmpxchg



commit 7ccbe11dddb0174db3b2191603b43a2e284f8fe7
Author:     Ian Campbell <ian.campbell@xxxxxxxxxx>
AuthorDate: Wed Mar 26 13:38:47 2014 +0000
Commit:     Ian Campbell <ian.campbell@xxxxxxxxxx>
CommitDate: Thu Apr 3 17:15:43 2014 +0100

    xen: arm64: reinstate hard tabs in system.h cmpxchg
    
    These functions are from Linux and the intention was to keep the formatting
    the same to make resyncing easier.
    
    Signed-off-by: Ian Campbell <ian.campbell@xxxxxxxxxx>
    Acked-by: Julien Grall <julien.grall@xxxxxxxxxx>
    Acked-by: Tim Deegan <tim@xxxxxxx>
---
 xen/include/asm-arm/arm64/system.h |  196 ++++++++++++++++++------------------
 1 files changed, 98 insertions(+), 98 deletions(-)

diff --git a/xen/include/asm-arm/arm64/system.h 
b/xen/include/asm-arm/arm64/system.h
index 0db96e0..9fa698b 100644
--- a/xen/include/asm-arm/arm64/system.h
+++ b/xen/include/asm-arm/arm64/system.h
@@ -6,7 +6,7 @@ extern void __bad_xchg(volatile void *, int);
 
 static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int 
size)
 {
-        unsigned long ret, tmp;
+       unsigned long ret, tmp;
 
        switch (size) {
        case 1:
@@ -15,8 +15,8 @@ static inline unsigned long __xchg(unsigned long x, volatile 
void *ptr, int size
                "       stlxrb  %w1, %w3, %2\n"
                "       cbnz    %w1, 1b\n"
                        : "=&r" (ret), "=&r" (tmp), "+Q" (*(u8 *)ptr)
-                        : "r" (x)
-                        : "cc", "memory");
+                       : "r" (x)
+                       : "cc", "memory");
                break;
        case 2:
                asm volatile("//        __xchg2\n"
@@ -24,8 +24,8 @@ static inline unsigned long __xchg(unsigned long x, volatile 
void *ptr, int size
                "       stlxrh  %w1, %w3, %2\n"
                "       cbnz    %w1, 1b\n"
                        : "=&r" (ret), "=&r" (tmp), "+Q" (*(u16 *)ptr)
-                        : "r" (x)
-                        : "cc", "memory");
+                       : "r" (x)
+                       : "cc", "memory");
                break;
        case 4:
                asm volatile("//        __xchg4\n"
@@ -33,8 +33,8 @@ static inline unsigned long __xchg(unsigned long x, volatile 
void *ptr, int size
                "       stlxr   %w1, %w3, %2\n"
                "       cbnz    %w1, 1b\n"
                        : "=&r" (ret), "=&r" (tmp), "+Q" (*(u32 *)ptr)
-                        : "r" (x)
-                        : "cc", "memory");
+                       : "r" (x)
+                       : "cc", "memory");
                break;
        case 8:
                asm volatile("//        __xchg8\n"
@@ -42,12 +42,12 @@ static inline unsigned long __xchg(unsigned long x, 
volatile void *ptr, int size
                "       stlxr   %w1, %3, %2\n"
                "       cbnz    %w1, 1b\n"
                        : "=&r" (ret), "=&r" (tmp), "+Q" (*(u64 *)ptr)
-                        : "r" (x)
-                        : "cc", "memory");
-                break;
-        default:
-                __bad_xchg(ptr, size), ret = 0;
-                break;
+                       : "r" (x)
+                       : "cc", "memory");
+               break;
+       default:
+               __bad_xchg(ptr, size), ret = 0;
+               break;
        }
 
        smp_mb();
@@ -55,107 +55,107 @@ static inline unsigned long __xchg(unsigned long x, 
volatile void *ptr, int size
 }
 
 #define xchg(ptr,x) \
-        ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
+       ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
 
 extern void __bad_cmpxchg(volatile void *ptr, int size);
 
 static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
-                                      unsigned long new, int size)
+                                     unsigned long new, int size)
 {
-        unsigned long oldval = 0, res;
-
-        switch (size) {
-        case 1:
-                do {
-                        asm volatile("// __cmpxchg1\n"
-                        "       ldxrb   %w1, %2\n"
-                        "       mov     %w0, #0\n"
-                        "       cmp     %w1, %w3\n"
-                        "       b.ne    1f\n"
-                        "       stxrb   %w0, %w4, %2\n"
-                        "1:\n"
-                                : "=&r" (res), "=&r" (oldval), "+Q" (*(u8 
*)ptr)
-                                : "Ir" (old), "r" (new)
-                                : "cc");
-                } while (res);
-                break;
-
-        case 2:
-                do {
-                        asm volatile("// __cmpxchg2\n"
-                        "       ldxrh   %w1, %2\n"
-                        "       mov     %w0, #0\n"
-                        "       cmp     %w1, %w3\n"
-                        "       b.ne    1f\n"
-                        "       stxrh   %w0, %w4, %2\n"
-                        "1:\n"
-                                : "=&r" (res), "=&r" (oldval), "+Q" (*(u16 
*)ptr)
-                                : "Ir" (old), "r" (new)
-                                : "cc");
-                } while (res);
-                break;
-
-        case 4:
-                do {
-                        asm volatile("// __cmpxchg4\n"
-                        "       ldxr    %w1, %2\n"
-                        "       mov     %w0, #0\n"
-                        "       cmp     %w1, %w3\n"
-                        "       b.ne    1f\n"
-                        "       stxr    %w0, %w4, %2\n"
-                        "1:\n"
-                                : "=&r" (res), "=&r" (oldval), "+Q" (*(u32 
*)ptr)
-                                : "Ir" (old), "r" (new)
-                                : "cc");
-                } while (res);
-                break;
-
-        case 8:
-                do {
-                        asm volatile("// __cmpxchg8\n"
-                        "       ldxr    %1, %2\n"
-                        "       mov     %w0, #0\n"
-                        "       cmp     %1, %3\n"
-                        "       b.ne    1f\n"
-                        "       stxr    %w0, %4, %2\n"
-                        "1:\n"
-                                : "=&r" (res), "=&r" (oldval), "+Q" (*(u64 
*)ptr)
-                                : "Ir" (old), "r" (new)
-                                : "cc");
-                } while (res);
-                break;
-
-        default:
+       unsigned long oldval = 0, res;
+
+       switch (size) {
+       case 1:
+               do {
+                       asm volatile("// __cmpxchg1\n"
+                       "       ldxrb   %w1, %2\n"
+                       "       mov     %w0, #0\n"
+                       "       cmp     %w1, %w3\n"
+                       "       b.ne    1f\n"
+                       "       stxrb   %w0, %w4, %2\n"
+                       "1:\n"
+                               : "=&r" (res), "=&r" (oldval), "+Q" (*(u8 *)ptr)
+                               : "Ir" (old), "r" (new)
+                               : "cc");
+               } while (res);
+               break;
+
+       case 2:
+               do {
+                       asm volatile("// __cmpxchg2\n"
+                       "       ldxrh   %w1, %2\n"
+                       "       mov     %w0, #0\n"
+                       "       cmp     %w1, %w3\n"
+                       "       b.ne    1f\n"
+                       "       stxrh   %w0, %w4, %2\n"
+                       "1:\n"
+                               : "=&r" (res), "=&r" (oldval), "+Q" (*(u16 
*)ptr)
+                               : "Ir" (old), "r" (new)
+                               : "cc");
+               } while (res);
+               break;
+
+       case 4:
+               do {
+                       asm volatile("// __cmpxchg4\n"
+                       "       ldxr    %w1, %2\n"
+                       "       mov     %w0, #0\n"
+                       "       cmp     %w1, %w3\n"
+                       "       b.ne    1f\n"
+                       "       stxr    %w0, %w4, %2\n"
+                       "1:\n"
+                               : "=&r" (res), "=&r" (oldval), "+Q" (*(u32 
*)ptr)
+                               : "Ir" (old), "r" (new)
+                               : "cc");
+               } while (res);
+               break;
+
+       case 8:
+               do {
+                       asm volatile("// __cmpxchg8\n"
+                       "       ldxr    %1, %2\n"
+                       "       mov     %w0, #0\n"
+                       "       cmp     %1, %3\n"
+                       "       b.ne    1f\n"
+                       "       stxr    %w0, %4, %2\n"
+                       "1:\n"
+                               : "=&r" (res), "=&r" (oldval), "+Q" (*(u64 
*)ptr)
+                               : "Ir" (old), "r" (new)
+                               : "cc");
+               } while (res);
+               break;
+
+       default:
                __bad_cmpxchg(ptr, size);
                oldval = 0;
-        }
+       }
 
-        return oldval;
+       return oldval;
 }
 
 static inline unsigned long __cmpxchg_mb(volatile void *ptr, unsigned long old,
-                                         unsigned long new, int size)
+                                        unsigned long new, int size)
 {
-        unsigned long ret;
+       unsigned long ret;
 
-        smp_mb();
-        ret = __cmpxchg(ptr, old, new, size);
-        smp_mb();
+       smp_mb();
+       ret = __cmpxchg(ptr, old, new, size);
+       smp_mb();
 
-        return ret;
+       return ret;
 }
 
-#define cmpxchg(ptr,o,n)                                                \
-        ((__typeof__(*(ptr)))__cmpxchg_mb((ptr),                        \
-                                          (unsigned long)(o),           \
-                                          (unsigned long)(n),           \
-                                          sizeof(*(ptr))))
-
-#define cmpxchg_local(ptr,o,n)                                          \
-        ((__typeof__(*(ptr)))__cmpxchg((ptr),                           \
-                                       (unsigned long)(o),              \
-                                       (unsigned long)(n),              \
-                                       sizeof(*(ptr))))
+#define cmpxchg(ptr,o,n)                                               \
+       ((__typeof__(*(ptr)))__cmpxchg_mb((ptr),                        \
+                                         (unsigned long)(o),           \
+                                         (unsigned long)(n),           \
+                                         sizeof(*(ptr))))
+
+#define cmpxchg_local(ptr,o,n)                                         \
+       ((__typeof__(*(ptr)))__cmpxchg((ptr),                           \
+                                      (unsigned long)(o),              \
+                                      (unsigned long)(n),              \
+                                      sizeof(*(ptr))))
 
 /* Uses uimm4 as a bitmask to select the clearing of one or more of
  * the DAIF exception mask bits:
--
generated by git-patchbot for /home/xen/git/xen.git#master

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.