[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] [xen master] xen: arm32: replace hard tabs in atomics.h



commit 7b92e1becbba9425f3ef0dcb1a32f210f8e57fb7
Author:     Ian Campbell <ian.campbell@xxxxxxxxxx>
AuthorDate: Wed Mar 26 13:38:39 2014 +0000
Commit:     Ian Campbell <ian.campbell@xxxxxxxxxx>
CommitDate: Thu Apr 3 17:15:42 2014 +0100

    xen: arm32: replace hard tabs in atomics.h
    
    This file is from Linux and the intention was to keep the formatting the 
same
    to make resyncing easier. Put the hardtabs back and adjust the emacs magic 
to
    reflect the desired use of whitespace.
    
    Adjust the 64-bit emacs magic too.
    
    Signed-off-by: Ian Campbell <ian.campbell@xxxxxxxxxx>
    Acked-by: Julien Grall <julien.grall@xxxxxxxxxx>
    Acked-by: Tim Deegan <tim@xxxxxxx>
---
 xen/include/asm-arm/arm32/atomic.h |  166 ++++++++++++++++++------------------
 xen/include/asm-arm/arm64/atomic.h |    4 +-
 2 files changed, 85 insertions(+), 85 deletions(-)

diff --git a/xen/include/asm-arm/arm32/atomic.h 
b/xen/include/asm-arm/arm32/atomic.h
index 523c745..3f024d4 100644
--- a/xen/include/asm-arm/arm32/atomic.h
+++ b/xen/include/asm-arm/arm32/atomic.h
@@ -18,122 +18,122 @@
  */
 static inline void atomic_add(int i, atomic_t *v)
 {
-        unsigned long tmp;
-        int result;
-
-        __asm__ __volatile__("@ atomic_add\n"
-"1:     ldrex   %0, [%3]\n"
-"       add     %0, %0, %4\n"
-"       strex   %1, %0, [%3]\n"
-"       teq     %1, #0\n"
-"       bne     1b"
-        : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
-        : "r" (&v->counter), "Ir" (i)
-        : "cc");
+       unsigned long tmp;
+       int result;
+
+       __asm__ __volatile__("@ atomic_add\n"
+"1:    ldrex   %0, [%3]\n"
+"      add     %0, %0, %4\n"
+"      strex   %1, %0, [%3]\n"
+"      teq     %1, #0\n"
+"      bne     1b"
+       : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
+       : "r" (&v->counter), "Ir" (i)
+       : "cc");
 }
 
 static inline int atomic_add_return(int i, atomic_t *v)
 {
-        unsigned long tmp;
-        int result;
+       unsigned long tmp;
+       int result;
 
-        smp_mb();
+       smp_mb();
 
-        __asm__ __volatile__("@ atomic_add_return\n"
-"1:     ldrex   %0, [%3]\n"
-"       add     %0, %0, %4\n"
-"       strex   %1, %0, [%3]\n"
-"       teq     %1, #0\n"
-"       bne     1b"
-        : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
-        : "r" (&v->counter), "Ir" (i)
-        : "cc");
+       __asm__ __volatile__("@ atomic_add_return\n"
+"1:    ldrex   %0, [%3]\n"
+"      add     %0, %0, %4\n"
+"      strex   %1, %0, [%3]\n"
+"      teq     %1, #0\n"
+"      bne     1b"
+       : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
+       : "r" (&v->counter), "Ir" (i)
+       : "cc");
 
-        smp_mb();
+       smp_mb();
 
-        return result;
+       return result;
 }
 
 static inline void atomic_sub(int i, atomic_t *v)
 {
-        unsigned long tmp;
-        int result;
-
-        __asm__ __volatile__("@ atomic_sub\n"
-"1:     ldrex   %0, [%3]\n"
-"       sub     %0, %0, %4\n"
-"       strex   %1, %0, [%3]\n"
-"       teq     %1, #0\n"
-"       bne     1b"
-        : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
-        : "r" (&v->counter), "Ir" (i)
-        : "cc");
+       unsigned long tmp;
+       int result;
+
+       __asm__ __volatile__("@ atomic_sub\n"
+"1:    ldrex   %0, [%3]\n"
+"      sub     %0, %0, %4\n"
+"      strex   %1, %0, [%3]\n"
+"      teq     %1, #0\n"
+"      bne     1b"
+       : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
+       : "r" (&v->counter), "Ir" (i)
+       : "cc");
 }
 
 static inline int atomic_sub_return(int i, atomic_t *v)
 {
-        unsigned long tmp;
-        int result;
+       unsigned long tmp;
+       int result;
 
-        smp_mb();
+       smp_mb();
 
-        __asm__ __volatile__("@ atomic_sub_return\n"
-"1:     ldrex   %0, [%3]\n"
-"       sub     %0, %0, %4\n"
-"       strex   %1, %0, [%3]\n"
-"       teq     %1, #0\n"
-"       bne     1b"
-        : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
-        : "r" (&v->counter), "Ir" (i)
-        : "cc");
+       __asm__ __volatile__("@ atomic_sub_return\n"
+"1:    ldrex   %0, [%3]\n"
+"      sub     %0, %0, %4\n"
+"      strex   %1, %0, [%3]\n"
+"      teq     %1, #0\n"
+"      bne     1b"
+       : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
+       : "r" (&v->counter), "Ir" (i)
+       : "cc");
 
-        smp_mb();
+       smp_mb();
 
-        return result;
+       return result;
 }
 
 static inline int atomic_cmpxchg(atomic_t *ptr, int old, int new)
 {
-        unsigned long oldval, res;
+       unsigned long oldval, res;
 
-        smp_mb();
+       smp_mb();
 
-        do {
-                __asm__ __volatile__("@ atomic_cmpxchg\n"
-                "ldrex  %1, [%3]\n"
-                "mov    %0, #0\n"
-                "teq    %1, %4\n"
-                "strexeq %0, %5, [%3]\n"
-                    : "=&r" (res), "=&r" (oldval), "+Qo" (ptr->counter)
-                    : "r" (&ptr->counter), "Ir" (old), "r" (new)
-                    : "cc");
-        } while (res);
+       do {
+               __asm__ __volatile__("@ atomic_cmpxchg\n"
+               "ldrex  %1, [%3]\n"
+               "mov    %0, #0\n"
+               "teq    %1, %4\n"
+               "strexeq %0, %5, [%3]\n"
+                   : "=&r" (res), "=&r" (oldval), "+Qo" (ptr->counter)
+                   : "r" (&ptr->counter), "Ir" (old), "r" (new)
+                   : "cc");
+       } while (res);
 
-        smp_mb();
+       smp_mb();
 
-        return oldval;
+       return oldval;
 }
 
 static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr)
 {
-        unsigned long tmp, tmp2;
-
-        __asm__ __volatile__("@ atomic_clear_mask\n"
-"1:     ldrex   %0, [%3]\n"
-"       bic     %0, %0, %4\n"
-"       strex   %1, %0, [%3]\n"
-"       teq     %1, #0\n"
-"       bne     1b"
-        : "=&r" (tmp), "=&r" (tmp2), "+Qo" (*addr)
-        : "r" (addr), "Ir" (mask)
-        : "cc");
+       unsigned long tmp, tmp2;
+
+       __asm__ __volatile__("@ atomic_clear_mask\n"
+"1:    ldrex   %0, [%3]\n"
+"      bic     %0, %0, %4\n"
+"      strex   %1, %0, [%3]\n"
+"      teq     %1, #0\n"
+"      bne     1b"
+       : "=&r" (tmp), "=&r" (tmp2), "+Qo" (*addr)
+       : "r" (addr), "Ir" (mask)
+       : "cc");
 }
 
-#define atomic_inc(v)           atomic_add(1, v)
-#define atomic_dec(v)           atomic_sub(1, v)
+#define atomic_inc(v)          atomic_add(1, v)
+#define atomic_dec(v)          atomic_sub(1, v)
 
-#define atomic_inc_and_test(v)  (atomic_add_return(1, v) == 0)
-#define atomic_dec_and_test(v)  (atomic_sub_return(1, v) == 0)
+#define atomic_inc_and_test(v) (atomic_add_return(1, v) == 0)
+#define atomic_dec_and_test(v) (atomic_sub_return(1, v) == 0)
 #define atomic_inc_return(v)    (atomic_add_return(1, v))
 #define atomic_dec_return(v)    (atomic_sub_return(1, v))
 #define atomic_sub_and_test(i, v) (atomic_sub_return(i, v) == 0)
@@ -145,7 +145,7 @@ static inline void atomic_clear_mask(unsigned long mask, 
unsigned long *addr)
  * Local variables:
  * mode: C
  * c-file-style: "BSD"
- * c-basic-offset: 4
- * indent-tabs-mode: nil
+ * c-basic-offset: 8
+ * indent-tabs-mode: t
  * End:
  */
diff --git a/xen/include/asm-arm/arm64/atomic.h 
b/xen/include/asm-arm/arm64/atomic.h
index a279755..b04e6d5 100644
--- a/xen/include/asm-arm/arm64/atomic.h
+++ b/xen/include/asm-arm/arm64/atomic.h
@@ -157,7 +157,7 @@ static inline int __atomic_add_unless(atomic_t *v, int a, 
int u)
  * Local variables:
  * mode: C
  * c-file-style: "BSD"
- * c-basic-offset: 4
- * indent-tabs-mode: nil
+ * c-basic-offset: 8
+ * indent-tabs-mode: t
  * End:
  */
--
generated by git-patchbot for /home/xen/git/xen.git#master

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.