|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCHv2 4/6] x86: provide xadd()
xadd() atomically adds a value and returns the previous value. This
is needed to implement ticket locks.
Signed-off-by: David Vrabel <david.vrabel@xxxxxxxxxx>
---
xen/include/asm-x86/system.h | 29 +++++++++++++++++++++++++++++
1 file changed, 29 insertions(+)
diff --git a/xen/include/asm-x86/system.h b/xen/include/asm-x86/system.h
index 7111329..1e6c6a8 100644
--- a/xen/include/asm-x86/system.h
+++ b/xen/include/asm-x86/system.h
@@ -117,6 +117,35 @@ static always_inline unsigned long __cmpxchg(
(unsigned long)__n,sizeof(*(ptr)))); \
})
+static always_inline unsigned long __xadd(
+ volatile void *ptr, unsigned long v, int size)
+{
+ switch ( size )
+ {
+ case 1:
+ asm volatile ( "lock; xaddb %b0,%1"
+ : "+r" (v), "+m" (*__xg((volatile void *)ptr)));
+ return v;
+ case 2:
+ asm volatile ( "lock; xaddw %w0,%1"
+ : "+r" (v), "+m" (*__xg((volatile void *)ptr)));
+ return v;
+ case 4:
+ asm volatile ( "lock; xaddl %k0,%1"
+ : "+r" (v), "+m" (*__xg((volatile void *)ptr)));
+ return v;
+ case 8:
+ asm volatile ( "lock; xaddq %q0,%1"
+ : "+r" (v), "+m" (*__xg((volatile void *)ptr)));
+ return v;
+ }
+ return 0;
+}
+
+#define xadd(ptr, v) ({ \
+ __xadd((ptr), (unsigned long)(v), sizeof(*(ptr))); \
+ })
+
/*
* Both Intel and AMD agree that, from a programmer's viewpoint:
* Loads cannot be reordered relative to other loads.
--
1.7.10.4
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |