[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCHv4 1/5] x86: provide add_sized()
add_sized(ptr, inc) adds inc to the value at ptr using only the correct size of loads and stores for the type of *ptr. The add is /not/ atomic. This is needed for ticket locks to ensure the increment of the head ticket does not affect the tail ticket. Signed-off-by: David Vrabel <david.vrabel@xxxxxxxxxx> --- xen/include/asm-x86/atomic.h | 27 +++++++++++++++++++++++++++ 1 file changed, 27 insertions(+) diff --git a/xen/include/asm-x86/atomic.h b/xen/include/asm-x86/atomic.h index 52af083..74fa0bc 100644 --- a/xen/include/asm-x86/atomic.h +++ b/xen/include/asm-x86/atomic.h @@ -14,6 +14,14 @@ static inline void name(volatile type *addr, type val) \ { asm volatile("mov" size " %1,%0": "=m" (*(volatile type *)addr) \ :reg (val) barrier); } +#define build_add_sized(name, size, type, reg) \ + static inline void name(volatile type *addr, type val) \ + { \ + asm volatile("add" size " %1,%0" \ + : "=m" (*addr) \ + : reg (val)); \ + } + build_read_atomic(read_u8_atomic, "b", uint8_t, "=q", ) build_read_atomic(read_u16_atomic, "w", uint16_t, "=r", ) build_read_atomic(read_u32_atomic, "l", uint32_t, "=r", ) @@ -24,8 +32,14 @@ build_write_atomic(write_u16_atomic, "w", uint16_t, "r", ) build_write_atomic(write_u32_atomic, "l", uint32_t, "r", ) build_write_atomic(write_u64_atomic, "q", uint64_t, "r", ) +build_add_sized(add_u8_sized, "b", uint8_t, "qi") +build_add_sized(add_u16_sized, "w", uint16_t, "ri") +build_add_sized(add_u32_sized, "l", uint32_t, "ri") +build_add_sized(add_u64_sized, "q", uint64_t, "ri") + #undef build_read_atomic #undef build_write_atomic +#undef build_add_sized void __bad_atomic_size(void); @@ -53,6 +67,19 @@ void __bad_atomic_size(void); } \ }) +#define add_sized(p, x) ({ \ + typeof(*(p)) __x = (x); \ + unsigned long x_ = (unsigned long)__x; \ + switch ( sizeof(*(p)) ) \ + { \ + case 1: add_u8_sized((uint8_t *)(p), x_); break; \ + case 2: add_u16_sized((uint16_t *)(p), x_); break; \ + case 4: add_u32_sized((uint32_t *)(p), x_); break; \ + case 8: add_u64_sized((uint64_t *)(p), x_); break; \ + default: __bad_atomic_size(); break; \ + } \ +}) + /* * NB. I've pushed the volatile qualifier into the operations. This allows * fast accessors such as _atomic_read() and _atomic_set() which don't give -- 1.7.10.4 _______________________________________________ Xen-devel mailing list Xen-devel@xxxxxxxxxxxxx http://lists.xen.org/xen-devel
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |