[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH] xen/arm64: resync atomics and spinlock asm with Linux



This picks up the changes from Linux commit 3a0310eb369a:
    arm64: atomics: fix grossly inconsistent asm constraints for exclusives

    Our uses of inline asm constraints for atomic operations are fairly
    wild and varied. We basically need to guarantee the following:

      1. Any instructions with barrier implications
         (load-acquire/store-release) have a "memory" clobber

      2. When performing exclusive accesses, the addresing mode is generated
         using the "Q" constraint

      3. Atomic blocks which use the condition flags, have a "cc" clobber

    This patch addresses these concerns which, as well as fixing the
    semantics of the code, stops GCC complaining about impossible asm
    constraints.

    Signed-off-by: Will Deacon <will.deacon@xxxxxxx>
    Signed-off-by: Catalin Marinas <catalin.marinas@xxxxxxx>

Signed-off-by: Ian Campbell <ian.campbell@xxxxxxxxxx>
---
 xen/include/asm-arm/arm64/atomic.h   |   66 +++++++++++++++---------------
 xen/include/asm-arm/arm64/spinlock.h |   48 +++++++++++-----------
 xen/include/asm-arm/arm64/system.h   |   74 +++++++++++++++++-----------------
 3 files changed, 94 insertions(+), 94 deletions(-)

diff --git a/xen/include/asm-arm/arm64/atomic.h 
b/xen/include/asm-arm/arm64/atomic.h
index 5e4ffed..a279755 100644
--- a/xen/include/asm-arm/arm64/atomic.h
+++ b/xen/include/asm-arm/arm64/atomic.h
@@ -33,12 +33,12 @@ static inline void atomic_add(int i, atomic_t *v)
        int result;
 
        asm volatile("// atomic_add\n"
-"1:    ldxr    %w0, [%3]\n"
-"      add     %w0, %w0, %w4\n"
-"      stxr    %w1, %w0, [%3]\n"
+"1:    ldxr    %w0, %2\n"
+"      add     %w0, %w0, %w3\n"
+"      stxr    %w1, %w0, %2\n"
 "      cbnz    %w1, 1b"
-       : "=&r" (result), "=&r" (tmp), "+o" (v->counter)
-       : "r" (&v->counter), "Ir" (i)
+       : "=&r" (result), "=&r" (tmp), "+Q" (v->counter)
+       : "Ir" (i)
        : "cc");
 }
 
@@ -48,13 +48,13 @@ static inline int atomic_add_return(int i, atomic_t *v)
        int result;
 
        asm volatile("// atomic_add_return\n"
-"1:    ldaxr   %w0, [%3]\n"
-"      add     %w0, %w0, %w4\n"
-"      stlxr   %w1, %w0, [%3]\n"
+"1:    ldaxr   %w0, %2\n"
+"      add     %w0, %w0, %w3\n"
+"      stlxr   %w1, %w0, %2\n"
 "      cbnz    %w1, 1b"
-       : "=&r" (result), "=&r" (tmp), "+o" (v->counter)
-       : "r" (&v->counter), "Ir" (i)
-       : "cc");
+       : "=&r" (result), "=&r" (tmp), "+Q" (v->counter)
+       : "Ir" (i)
+       : "cc", "memory");
 
        return result;
 }
@@ -65,12 +65,12 @@ static inline void atomic_sub(int i, atomic_t *v)
        int result;
 
        asm volatile("// atomic_sub\n"
-"1:    ldxr    %w0, [%3]\n"
-"      sub     %w0, %w0, %w4\n"
-"      stxr    %w1, %w0, [%3]\n"
+"1:    ldxr    %w0, %2\n"
+"      sub     %w0, %w0, %w3\n"
+"      stxr    %w1, %w0, %2\n"
 "      cbnz    %w1, 1b"
-       : "=&r" (result), "=&r" (tmp), "+o" (v->counter)
-       : "r" (&v->counter), "Ir" (i)
+       : "=&r" (result), "=&r" (tmp), "+Q" (v->counter)
+       : "Ir" (i)
        : "cc");
 }
 
@@ -80,13 +80,13 @@ static inline int atomic_sub_return(int i, atomic_t *v)
        int result;
 
        asm volatile("// atomic_sub_return\n"
-"1:    ldaxr   %w0, [%3]\n"
-"      sub     %w0, %w0, %w4\n"
-"      stlxr   %w1, %w0, [%3]\n"
+"1:    ldaxr   %w0, %2\n"
+"      sub     %w0, %w0, %w3\n"
+"      stlxr   %w1, %w0, %2\n"
 "      cbnz    %w1, 1b"
-       : "=&r" (result), "=&r" (tmp), "+o" (v->counter)
-       : "r" (&v->counter), "Ir" (i)
-       : "cc");
+       : "=&r" (result), "=&r" (tmp), "+Q" (v->counter)
+       : "Ir" (i)
+       : "cc", "memory");
 
        return result;
 }
@@ -97,15 +97,15 @@ static inline int atomic_cmpxchg(atomic_t *ptr, int old, 
int new)
        int oldval;
 
        asm volatile("// atomic_cmpxchg\n"
-"1:    ldaxr   %w1, [%3]\n"
-"      cmp     %w1, %w4\n"
+"1:    ldaxr   %w1, %2\n"
+"      cmp     %w1, %w3\n"
 "      b.ne    2f\n"
-"      stlxr   %w0, %w5, [%3]\n"
+"      stlxr   %w0, %w4, %2\n"
 "      cbnz    %w0, 1b\n"
 "2:"
-       : "=&r" (tmp), "=&r" (oldval), "+o" (ptr->counter)
-       : "r" (&ptr->counter), "Ir" (old), "r" (new)
-       : "cc");
+       : "=&r" (tmp), "=&r" (oldval), "+Q" (ptr->counter)
+       : "Ir" (old), "r" (new)
+       : "cc", "memory");
 
        return oldval;
 }
@@ -115,12 +115,12 @@ static inline void atomic_clear_mask(unsigned long mask, 
unsigned long *addr)
        unsigned long tmp, tmp2;
 
        asm volatile("// atomic_clear_mask\n"
-"1:    ldxr    %0, [%3]\n"
-"      bic     %0, %0, %4\n"
-"      stxr    %w1, %0, [%3]\n"
+"1:    ldxr    %0, %2\n"
+"      bic     %0, %0, %3\n"
+"      stxr    %w1, %0, %2\n"
 "      cbnz    %w1, 1b"
-       : "=&r" (tmp), "=&r" (tmp2), "+o" (*addr)
-       : "r" (addr), "Ir" (mask)
+       : "=&r" (tmp), "=&r" (tmp2), "+Q" (*addr)
+       : "Ir" (mask)
        : "cc");
 }
 
diff --git a/xen/include/asm-arm/arm64/spinlock.h 
b/xen/include/asm-arm/arm64/spinlock.h
index fe4c403..717f2fe 100644
--- a/xen/include/asm-arm/arm64/spinlock.h
+++ b/xen/include/asm-arm/arm64/spinlock.h
@@ -31,8 +31,8 @@ static always_inline void _raw_spin_unlock(raw_spinlock_t 
*lock)
     ASSERT(_raw_spin_is_locked(lock));
 
     asm volatile(
-        "       stlr    %w1, [%0]\n"
-        : : "r" (&lock->lock), "r" (0) : "memory");
+        "       stlr    %w1, %0\n"
+        : "=Q" (lock->lock) : "r" (0) : "memory");
 }
 
 static always_inline int _raw_spin_trylock(raw_spinlock_t *lock)
@@ -40,13 +40,13 @@ static always_inline int _raw_spin_trylock(raw_spinlock_t 
*lock)
     unsigned int tmp;
 
     asm volatile(
-        "       ldaxr   %w0, [%1]\n"
+        "       ldaxr   %w0, %1\n"
         "       cbnz    %w0, 1f\n"
-        "       stxr    %w0, %w2, [%1]\n"
+        "       stxr    %w0, %w2, %1\n"
         "1:\n"
-        : "=&r" (tmp)
-        : "r" (&lock->lock), "r" (1)
-        : "memory");
+        : "=&r" (tmp), "+Q" (lock->lock)
+        : "r" (1)
+        : "cc", "memory");
 
     return !tmp;
 }
@@ -62,14 +62,14 @@ static always_inline int _raw_read_trylock(raw_rwlock_t *rw)
     unsigned int tmp, tmp2 = 1;
 
     asm volatile(
-        "       ldaxr   %w0, [%2]\n"
+        "       ldaxr   %w0, %2\n"
         "       add     %w0, %w0, #1\n"
         "       tbnz    %w0, #31, 1f\n"
-        "       stxr    %w1, %w0, [%2]\n"
+        "       stxr    %w1, %w0, %2\n"
         "1:\n"
-        : "=&r" (tmp), "+r" (tmp2)
-        : "r" (&rw->lock)
-        : "memory");
+        : "=&r" (tmp), "+r" (tmp2), "+Q" (rw->lock)
+        :
+        : "cc", "memory");
 
     return !tmp2;
 }
@@ -79,13 +79,13 @@ static always_inline int _raw_write_trylock(raw_rwlock_t 
*rw)
     unsigned int tmp;
 
     asm volatile(
-        "       ldaxr   %w0, [%1]\n"
+        "       ldaxr   %w0, %1\n"
         "       cbnz    %w0, 1f\n"
-        "       stxr    %w0, %w2, [%1]\n"
+        "       stxr    %w0, %w2, %1\n"
         "1:\n"
-        : "=&r" (tmp)
-        : "r" (&rw->lock), "r" (0x80000000)
-        : "memory");
+        : "=&r" (tmp), "+Q" (rw->lock)
+        : "r" (0x80000000)
+        : "cc", "memory");
 
     return !tmp;
 }
@@ -95,20 +95,20 @@ static inline void _raw_read_unlock(raw_rwlock_t *rw)
     unsigned int tmp, tmp2;
 
     asm volatile(
-        "1:     ldxr    %w0, [%2]\n"
+        "    1: ldxr    %w0, %2\n"
         "       sub     %w0, %w0, #1\n"
-        "       stlxr   %w1, %w0, [%2]\n"
+        "       stlxr   %w1, %w0, %2\n"
         "       cbnz    %w1, 1b\n"
-        : "=&r" (tmp), "=&r" (tmp2)
-        : "r" (&rw->lock)
-        : "memory");
+        : "=&r" (tmp), "=&r" (tmp2), "+Q" (rw->lock)
+        :
+        : "cc", "memory");
 }
 
 static inline void _raw_write_unlock(raw_rwlock_t *rw)
 {
     asm volatile(
-        "       stlr    %w1, [%0]\n"
-        : : "r" (&rw->lock), "r" (0) : "memory");
+        "       stlr    %w1, %0\n"
+        : "=Q" (rw->lock) : "r" (0) : "memory");
 }
 
 #define _raw_rw_is_locked(x) ((x)->lock != 0)
diff --git a/xen/include/asm-arm/arm64/system.h 
b/xen/include/asm-arm/arm64/system.h
index 4e41913..d7e912f 100644
--- a/xen/include/asm-arm/arm64/system.h
+++ b/xen/include/asm-arm/arm64/system.h
@@ -28,39 +28,39 @@ static inline unsigned long __xchg(unsigned long x, 
volatile void *ptr, int size
         switch (size) {
         case 1:
                 asm volatile("//        __xchg1\n"
-                "1:     ldaxrb  %w0, [%3]\n"
-                "       stlxrb  %w1, %w2, [%3]\n"
+                "1:     ldaxrb  %w0, %2\n"
+                "       stlxrb  %w1, %w3, %2\n"
                 "       cbnz    %w1, 1b\n"
-                        : "=&r" (ret), "=&r" (tmp)
-                        : "r" (x), "r" (ptr)
-                        : "memory", "cc");
+                        : "=&r" (ret), "=&r" (tmp), "+Q" (*(u8 *)ptr)
+                        : "r" (x)
+                        : "cc", "memory");
                 break;
         case 2:
                 asm volatile("//        __xchg2\n"
-                "1:     ldaxrh  %w0, [%3]\n"
-                "       stlxrh  %w1, %w2, [%3]\n"
+                "1:     ldaxrh  %w0, %2\n"
+                "       stlxrh  %w1, %w3, %2\n"
                 "       cbnz    %w1, 1b\n"
-                        : "=&r" (ret), "=&r" (tmp)
-                        : "r" (x), "r" (ptr)
-                        : "memory", "cc");
+                        : "=&r" (ret), "=&r" (tmp), "+Q" (*(u16 *)ptr)
+                        : "r" (x)
+                        : "cc", "memory");
                 break;
         case 4:
                 asm volatile("//        __xchg4\n"
-                "1:     ldaxr   %w0, [%3]\n"
-                "       stlxr   %w1, %w2, [%3]\n"
+                "1:     ldaxr   %w0, %2\n"
+                "       stlxr   %w1, %w3, %2\n"
                 "       cbnz    %w1, 1b\n"
-                        : "=&r" (ret), "=&r" (tmp)
-                        : "r" (x), "r" (ptr)
-                        : "memory", "cc");
+                        : "=&r" (ret), "=&r" (tmp), "+Q" (*(u32 *)ptr)
+                        : "r" (x)
+                        : "cc", "memory");
                 break;
         case 8:
                 asm volatile("//        __xchg8\n"
-                "1:     ldaxr   %0, [%3]\n"
-                "       stlxr   %w1, %2, [%3]\n"
+                "1:     ldaxr   %0, %2\n"
+                "       stlxr   %w1, %3, %2\n"
                 "       cbnz    %w1, 1b\n"
-                        : "=&r" (ret), "=&r" (tmp)
-                        : "r" (x), "r" (ptr)
-                        : "memory", "cc");
+                        : "=&r" (ret), "=&r" (tmp), "+Q" (*(u64 *)ptr)
+                        : "r" (x)
+                        : "cc", "memory");
                 break;
         default:
                 __bad_xchg(ptr, size), ret = 0;
@@ -84,14 +84,14 @@ static inline unsigned long __cmpxchg(volatile void *ptr, 
unsigned long old,
         case 1:
                 do {
                         asm volatile("// __cmpxchg1\n"
-                        "       ldxrb   %w1, [%2]\n"
+                        "       ldxrb   %w1, %2\n"
                         "       mov     %w0, #0\n"
                         "       cmp     %w1, %w3\n"
                         "       b.ne    1f\n"
-                        "       stxrb   %w0, %w4, [%2]\n"
+                        "       stxrb   %w0, %w4, %2\n"
                         "1:\n"
-                                : "=&r" (res), "=&r" (oldval)
-                                : "r" (ptr), "Ir" (old), "r" (new)
+                                : "=&r" (res), "=&r" (oldval), "+Q" (*(u8 
*)ptr)
+                                : "Ir" (old), "r" (new)
                                 : "cc");
                 } while (res);
                 break;
@@ -99,29 +99,29 @@ static inline unsigned long __cmpxchg(volatile void *ptr, 
unsigned long old,
         case 2:
                 do {
                         asm volatile("// __cmpxchg2\n"
-                        "       ldxrh   %w1, [%2]\n"
+                        "       ldxrh   %w1, %2\n"
                         "       mov     %w0, #0\n"
                         "       cmp     %w1, %w3\n"
                         "       b.ne    1f\n"
-                        "       stxrh   %w0, %w4, [%2]\n"
+                        "       stxrh   %w0, %w4, %2\n"
                         "1:\n"
-                                : "=&r" (res), "=&r" (oldval)
-                                : "r" (ptr), "Ir" (old), "r" (new)
-                                : "memory", "cc");
+                                : "=&r" (res), "=&r" (oldval), "+Q" (*(u16 
*)ptr)
+                                : "Ir" (old), "r" (new)
+                                : "cc");
                 } while (res);
                 break;
 
         case 4:
                 do {
                         asm volatile("// __cmpxchg4\n"
-                        "       ldxr    %w1, [%2]\n"
+                        "       ldxr    %w1, %2\n"
                         "       mov     %w0, #0\n"
                         "       cmp     %w1, %w3\n"
                         "       b.ne    1f\n"
-                        "       stxr    %w0, %w4, [%2]\n"
+                        "       stxr    %w0, %w4, %2\n"
                         "1:\n"
-                                : "=&r" (res), "=&r" (oldval)
-                                : "r" (ptr), "Ir" (old), "r" (new)
+                                : "=&r" (res), "=&r" (oldval), "+Q" (*(u32 
*)ptr)
+                                : "Ir" (old), "r" (new)
                                 : "cc");
                 } while (res);
                 break;
@@ -129,14 +129,14 @@ static inline unsigned long __cmpxchg(volatile void *ptr, 
unsigned long old,
         case 8:
                 do {
                         asm volatile("// __cmpxchg8\n"
-                        "       ldxr    %1, [%2]\n"
+                        "       ldxr    %1, %2\n"
                         "       mov     %w0, #0\n"
                         "       cmp     %1, %3\n"
                         "       b.ne    1f\n"
-                        "       stxr    %w0, %4, [%2]\n"
+                        "       stxr    %w0, %4, %2\n"
                         "1:\n"
-                                : "=&r" (res), "=&r" (oldval)
-                                : "r" (ptr), "Ir" (old), "r" (new)
+                                : "=&r" (res), "=&r" (oldval), "+Q" (*(u64 
*)ptr)
+                                : "Ir" (old), "r" (new)
                                 : "cc");
                 } while (res);
                 break;
-- 
1.7.2.5


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.