x86: simplify a few macros / inline functions Drop pointless casts and write_atomic()'s bogus and unused "return value". Signed-off-by: Jan Beulich --- a/xen/include/asm-x86/atomic.h +++ b/xen/include/asm-x86/atomic.h @@ -17,12 +17,11 @@ static inline void name(volatile type *a build_read_atomic(read_u8_atomic, "b", uint8_t, "=q", ) build_read_atomic(read_u16_atomic, "w", uint16_t, "=r", ) build_read_atomic(read_u32_atomic, "l", uint32_t, "=r", ) +build_read_atomic(read_u64_atomic, "q", uint64_t, "=r", ) build_write_atomic(write_u8_atomic, "b", uint8_t, "q", ) build_write_atomic(write_u16_atomic, "w", uint16_t, "r", ) build_write_atomic(write_u32_atomic, "l", uint32_t, "r", ) - -build_read_atomic(read_u64_atomic, "q", uint64_t, "=r", ) build_write_atomic(write_u64_atomic, "q", uint64_t, "r", ) #undef build_read_atomic @@ -30,29 +29,28 @@ build_write_atomic(write_u64_atomic, "q" void __bad_atomic_size(void); -#define read_atomic(p) ({ \ - unsigned long x_; \ - switch ( sizeof(*(p)) ) { \ - case 1: x_ = read_u8_atomic((uint8_t *)(p)); break; \ - case 2: x_ = read_u16_atomic((uint16_t *)(p)); break; \ - case 4: x_ = read_u32_atomic((uint32_t *)(p)); break; \ - case 8: x_ = read_u64_atomic((uint64_t *)(p)); break; \ - default: x_ = 0; __bad_atomic_size(); break; \ - } \ - (typeof(*(p)))x_; \ +#define read_atomic(p) ({ \ + unsigned long x_; \ + switch ( sizeof(*(p)) ) { \ + case 1: x_ = read_u8_atomic((uint8_t *)(p)); break; \ + case 2: x_ = read_u16_atomic((uint16_t *)(p)); break; \ + case 4: x_ = read_u32_atomic((uint32_t *)(p)); break; \ + case 8: x_ = read_u64_atomic((uint64_t *)(p)); break; \ + default: x_ = 0; __bad_atomic_size(); break; \ + } \ + (typeof(*(p)))x_; \ }) -#define write_atomic(p, x) ({ \ - typeof(*(p)) __x = (x); \ - unsigned long x_ = (unsigned long)__x; \ - switch ( sizeof(*(p)) ) { \ - case 1: write_u8_atomic((uint8_t *)(p), (uint8_t)x_); break; \ - case 2: write_u16_atomic((uint16_t *)(p), (uint16_t)x_); break; \ - case 4: write_u32_atomic((uint32_t *)(p), (uint32_t)x_); break; \ - case 8: write_u64_atomic((uint64_t *)(p), (uint64_t)x_); break; \ - default: __bad_atomic_size(); break; \ - } \ - __x; \ +#define write_atomic(p, x) ({ \ + typeof(*(p)) __x = (x); \ + unsigned long x_ = (unsigned long)__x; \ + switch ( sizeof(*(p)) ) { \ + case 1: write_u8_atomic((uint8_t *)(p), x_); break; \ + case 2: write_u16_atomic((uint16_t *)(p), x_); break; \ + case 4: write_u32_atomic((uint32_t *)(p), x_); break; \ + case 8: write_u64_atomic((uint64_t *)(p), x_); break; \ + default: __bad_atomic_size(); break; \ + } \ }) /* --- a/xen/include/asm-x86/system.h +++ b/xen/include/asm-x86/system.h @@ -41,25 +41,25 @@ static always_inline unsigned long __xch case 1: asm volatile ( "xchgb %b0,%1" : "=q" (x) - : "m" (*__xg((volatile void *)ptr)), "0" (x) + : "m" (*__xg(ptr)), "0" (x) : "memory" ); break; case 2: asm volatile ( "xchgw %w0,%1" : "=r" (x) - : "m" (*__xg((volatile void *)ptr)), "0" (x) + : "m" (*__xg(ptr)), "0" (x) : "memory" ); break; case 4: asm volatile ( "xchgl %k0,%1" : "=r" (x) - : "m" (*__xg((volatile void *)ptr)), "0" (x) + : "m" (*__xg(ptr)), "0" (x) : "memory" ); break; case 8: asm volatile ( "xchgq %0,%1" : "=r" (x) - : "m" (*__xg((volatile void *)ptr)), "0" (x) + : "m" (*__xg(ptr)), "0" (x) : "memory" ); break; } @@ -81,28 +81,28 @@ static always_inline unsigned long __cmp case 1: asm volatile ( "lock; cmpxchgb %b1,%2" : "=a" (prev) - : "q" (new), "m" (*__xg((volatile void *)ptr)), + : "q" (new), "m" (*__xg(ptr)), "0" (old) : "memory" ); return prev; case 2: asm volatile ( "lock; cmpxchgw %w1,%2" : "=a" (prev) - : "r" (new), "m" (*__xg((volatile void *)ptr)), + : "r" (new), "m" (*__xg(ptr)), "0" (old) : "memory" ); return prev; case 4: asm volatile ( "lock; cmpxchgl %k1,%2" : "=a" (prev) - : "r" (new), "m" (*__xg((volatile void *)ptr)), + : "r" (new), "m" (*__xg(ptr)), "0" (old) : "memory" ); return prev; case 8: asm volatile ( "lock; cmpxchgq %1,%2" : "=a" (prev) - : "r" (new), "m" (*__xg((volatile void *)ptr)), + : "r" (new), "m" (*__xg(ptr)), "0" (old) : "memory" ); return prev;