[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] [xen-4.0-testing] x86: Define atomic_{read, write}{8, 16, 32, 64} accessor functions.



# HG changeset patch
# User Keir Fraser <keir@xxxxxxx>
# Date 1292530232 0
# Node ID d11341841f449d4ec51ebcf720d28bab957ccdfe
# Parent  a453885067908f8a092b5d7f7d9ad07c8db3be9c
x86: Define atomic_{read,write}{8,16,32,64} accessor functions.

These absolutely guarantee to read/write a uint*_t with a single
atomic
processor instruction.

Also re-define atomic_read/atomic_write (act on atomic_t) similarly.

Signed-off-by: Keir Fraser <keir@xxxxxxx>
xen-unstable changeset:   22564:aa33ab320f7e
xen-unstable date:        Thu Dec 16 19:29:08 2010 +0000
---
 xen/include/asm-x86/atomic.h        |   44 ++++++++++++++++++++++++++++++++++--
 xen/include/asm-x86/x86_32/system.h |    7 -----
 xen/include/asm-x86/x86_64/system.h |    5 ----
 3 files changed, 42 insertions(+), 14 deletions(-)

diff -r a45388506790 -r d11341841f44 xen/include/asm-x86/atomic.h
--- a/xen/include/asm-x86/atomic.h      Thu Dec 16 15:40:51 2010 +0000
+++ b/xen/include/asm-x86/atomic.h      Thu Dec 16 20:10:32 2010 +0000
@@ -10,6 +10,46 @@
 #define LOCK ""
 #endif
 
+#define build_atomic_read(name, size, type, reg, barrier) \
+static inline type name(const volatile type *addr) \
+{ type ret; asm volatile("mov" size " %1,%0":reg (ret) \
+:"m" (*(volatile type *)addr) barrier); return ret; }
+
+#define build_atomic_write(name, size, type, reg, barrier) \
+static inline void name(volatile type *addr, type val) \
+{ asm volatile("mov" size " %0,%1": :reg (val), \
+"m" (*(volatile type *)addr) barrier); }
+
+build_atomic_read(atomic_read8, "b", uint8_t, "=q", )
+build_atomic_read(atomic_read16, "w", uint16_t, "=r", )
+build_atomic_read(atomic_read32, "l", uint32_t, "=r", )
+build_atomic_read(atomic_read_int, "l", int, "=r", )
+
+build_atomic_write(atomic_write8, "b", uint8_t, "q", )
+build_atomic_write(atomic_write16, "w", uint16_t, "r", )
+build_atomic_write(atomic_write32, "l", uint32_t, "r", )
+build_atomic_write(atomic_write_int, "l", int, "r", )
+
+#ifdef __x86_64__
+build_atomic_read(atomic_read64, "q", uint64_t, "=r", )
+build_atomic_write(atomic_write64, "q", uint64_t, "r", )
+#else
+static inline uint64_t atomic_read64(const volatile uint64_t *addr)
+{
+    uint64_t *__addr = (uint64_t *)addr;
+    return __cmpxchg8b(__addr, 0, 0);
+}
+static inline void atomic_write64(volatile uint64_t *addr, uint64_t val)
+{
+    uint64_t old = *addr, new, *__addr = (uint64_t *)addr;
+    while ( (old = __cmpxchg8b(__addr, old, val)) != old )
+        old = new;
+}
+#endif
+
+#undef build_atomic_read
+#undef build_atomic_write
+
 /*
  * NB. I've pushed the volatile qualifier into the operations. This allows
  * fast accessors such as _atomic_read() and _atomic_set() which don't give
@@ -26,7 +66,7 @@ typedef struct { int counter; } atomic_t
  * Atomically reads the value of @v.
  */
 #define _atomic_read(v)                ((v).counter)
-#define atomic_read(v)         (*(volatile int *)&((v)->counter))
+#define atomic_read(v)         atomic_read_int(&((v)->counter))
 
 /**
  * atomic_set - set atomic variable
@@ -36,7 +76,7 @@ typedef struct { int counter; } atomic_t
  * Atomically sets the value of @v to @i.
  */ 
 #define _atomic_set(v,i)       (((v).counter) = (i))
-#define atomic_set(v,i)                (*(volatile int *)&((v)->counter) = (i))
+#define atomic_set(v,i)                atomic_write_int(&((v)->counter), (i))
 
 /**
  * atomic_add - add integer to atomic variable
diff -r a45388506790 -r d11341841f44 xen/include/asm-x86/x86_32/system.h
--- a/xen/include/asm-x86/x86_32/system.h       Thu Dec 16 15:40:51 2010 +0000
+++ b/xen/include/asm-x86/x86_32/system.h       Thu Dec 16 20:10:32 2010 +0000
@@ -91,13 +91,6 @@ static always_inline unsigned long long 
     _rc;                                                                \
 })
 
-static inline void atomic_write64(uint64_t *p, uint64_t v)
-{
-    uint64_t w = *p, x;
-    while ( (x = __cmpxchg8b(p, w, v)) != w )
-        w = x;
-}
-
 #define mb()                    \
     asm volatile ( "lock; addl $0,0(%%esp)" : : : "memory" )
 
diff -r a45388506790 -r d11341841f44 xen/include/asm-x86/x86_64/system.h
--- a/xen/include/asm-x86/x86_64/system.h       Thu Dec 16 15:40:51 2010 +0000
+++ b/xen/include/asm-x86/x86_64/system.h       Thu Dec 16 20:10:32 2010 +0000
@@ -47,11 +47,6 @@
     _rc;                                                                \
 })
 
-static inline void atomic_write64(uint64_t *p, uint64_t v)
-{
-    *p = v;
-}
-
 #define mb()                    \
     asm volatile ( "mfence" : : : "memory" )
 

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.