[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] [xen staging] atomic: add atomic_and operations



commit 75b7411617108bdc820d337064b38ac09f794fc5
Author:     Roger Pau Monné <roger.pau@xxxxxxxxxx>
AuthorDate: Wed Feb 26 10:51:31 2020 +0100
Commit:     Jan Beulich <jbeulich@xxxxxxxx>
CommitDate: Wed Feb 26 10:51:31 2020 +0100

    atomic: add atomic_and operations
    
    To x86 and Arm. This performs an atomic AND operation against an
    atomic_t variable with the provided mask.
    
    Requested-by: Jan Beulich <jbeulich@xxxxxxxx>
    Signed-off-by: Roger Pau Monné <roger.pau@xxxxxxxxxx>
    Reviewed-by: Jan Beulich <jbeulich@xxxxxxxx>
    Reviewed-by: Julien Grall <julien@xxxxxxx>
---
 xen/include/asm-arm/arm32/atomic.h | 17 +++++++++++++++++
 xen/include/asm-arm/arm64/atomic.h | 14 ++++++++++++++
 xen/include/asm-x86/atomic.h       |  8 ++++++++
 3 files changed, 39 insertions(+)

diff --git a/xen/include/asm-arm/arm32/atomic.h 
b/xen/include/asm-arm/arm32/atomic.h
index c03eb684cd..2832a72792 100644
--- a/xen/include/asm-arm/arm32/atomic.h
+++ b/xen/include/asm-arm/arm32/atomic.h
@@ -96,6 +96,23 @@ static inline int atomic_sub_return(int i, atomic_t *v)
        return result;
 }
 
+static inline void atomic_and(int m, atomic_t *v)
+{
+       unsigned long tmp;
+       int result;
+
+       prefetchw(&v->counter);
+       __asm__ __volatile__("@ atomic_and\n"
+"1:    ldrex   %0, [%3]\n"
+"      and     %0, %0, %4\n"
+"      strex   %1, %0, [%3]\n"
+"      teq     %1, #0\n"
+"      bne     1b"
+       : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
+       : "r" (&v->counter), "Ir" (m)
+       : "cc");
+}
+
 static inline int atomic_cmpxchg(atomic_t *ptr, int old, int new)
 {
        int oldval;
diff --git a/xen/include/asm-arm/arm64/atomic.h 
b/xen/include/asm-arm/arm64/atomic.h
index bce38d4ca2..2d42567866 100644
--- a/xen/include/asm-arm/arm64/atomic.h
+++ b/xen/include/asm-arm/arm64/atomic.h
@@ -91,6 +91,20 @@ static inline int atomic_sub_return(int i, atomic_t *v)
        return result;
 }
 
+static inline void atomic_and(int m, atomic_t *v)
+{
+       unsigned long tmp;
+       int result;
+
+       asm volatile("// atomic_and\n"
+"1:    ldxr    %w0, %2\n"
+"      and     %w0, %w0, %w3\n"
+"      stxr    %w1, %w0, %2\n"
+"      cbnz    %w1, 1b"
+       : "=&r" (result), "=&r" (tmp), "+Q" (v->counter)
+       : "Ir" (m));
+}
+
 static inline int atomic_cmpxchg(atomic_t *ptr, int old, int new)
 {
        unsigned long tmp;
diff --git a/xen/include/asm-x86/atomic.h b/xen/include/asm-x86/atomic.h
index 682bcf91b1..6b40f9c9f8 100644
--- a/xen/include/asm-x86/atomic.h
+++ b/xen/include/asm-x86/atomic.h
@@ -224,6 +224,14 @@ static inline int atomic_add_unless(atomic_t *v, int a, 
int u)
     return c;
 }
 
+static inline void atomic_and(int m, atomic_t *v)
+{
+    asm volatile (
+        "lock andl %1, %0"
+        : "+m" (*(volatile int *)&v->counter)
+        : "ir" (m) );
+}
+
 #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
 
 #endif /* __ARCH_X86_ATOMIC__ */
--
generated by git-patchbot for /home/xen/git/xen.git#staging

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxxx
https://lists.xenproject.org/xen-changelog

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.