|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] Re: [Xen-devel] [PATCH 2/2] x86: consolidate atomic build_*() macros
On 09/06/17 13:44, Jan Beulich wrote:
> Use a single macro to define both read and write inline functions.
> Avoid redundant inputs (including quotes - use stringification
> instead). Generalize "add" to ease eventual addition of other
> artihmetic operations.
>
> At once correct the artihmetic asm()'s output constraint (needs to be
> "+" instead of "="), its 64-bit immediate one, and permit suitable
> immediates for writes.
>
> Signed-off-by: Jan Beulich <jbeulich@xxxxxxxx>
I can appreciate what you are trying to do here, but it is a step
backwards in terms of readability. At least in the past, if you needed
to find the implementation of read_u16_atomic, you could grep for it.
Its now completely hidden from any tools.
~Andrew
>
> --- a/xen/include/asm-x86/atomic.h
> +++ b/xen/include/asm-x86/atomic.h
> @@ -4,46 +4,41 @@
> #include <xen/atomic.h>
> #include <asm/system.h>
>
> -#define build_read_atomic(name, size, type, reg) \
> -static inline type name(const volatile type *addr) \
> +#define build_rw_atomic(width, size, reg, imm) \
> +static inline uint ## width ## _t \
> +read_u ## width ## _atomic(const volatile uint ## width ## _t *addr) \
> { \
> - type ret; \
> - asm volatile ( "mov" size " %1,%0" : reg (ret) : "m" (*addr) ); \
> + uint ## width ## _t ret; \
> + asm volatile ( "mov" #size " %1,%0" : "=" #reg (ret) : "m" (*addr) ); \
> return ret; \
> +} \
> +static inline void \
> +write_u ## width ## _atomic(volatile uint ## width ## _t *addr, \
> + uint ## width ## _t val) \
> +{ \
> + asm volatile ( "mov" #size " %1,%0" : "=m" (*addr) : #reg #imm (val) ); \
> }
>
> -#define build_write_atomic(name, size, type, reg) \
> -static inline void name(volatile type *addr, type val) \
> +#define build_arith_sized(op, width, size, input) \
> +static inline void \
> +op ## _u ## width ## _sized(volatile uint ## width ## _t *addr, \
> + uint ## width ## _t val) \
> { \
> - asm volatile ( "mov" size " %1,%0" : "=m" (*addr) : reg (val) ); \
> + asm volatile ( #op #size " %1,%0" : "+m" (*addr) : #input (val) ); \
> }
>
> -#define build_add_sized(name, size, type, reg) \
> - static inline void name(volatile type *addr, type val) \
> - { \
> - asm volatile("add" size " %1,%0" \
> - : "=m" (*addr) \
> - : reg (val)); \
> - }
> -
> -build_read_atomic(read_u8_atomic, "b", uint8_t, "=q")
> -build_read_atomic(read_u16_atomic, "w", uint16_t, "=r")
> -build_read_atomic(read_u32_atomic, "l", uint32_t, "=r")
> -build_read_atomic(read_u64_atomic, "q", uint64_t, "=r")
> -
> -build_write_atomic(write_u8_atomic, "b", uint8_t, "q")
> -build_write_atomic(write_u16_atomic, "w", uint16_t, "r")
> -build_write_atomic(write_u32_atomic, "l", uint32_t, "r")
> -build_write_atomic(write_u64_atomic, "q", uint64_t, "r")
> -
> -build_add_sized(add_u8_sized, "b", uint8_t, "qi")
> -build_add_sized(add_u16_sized, "w", uint16_t, "ri")
> -build_add_sized(add_u32_sized, "l", uint32_t, "ri")
> -build_add_sized(add_u64_sized, "q", uint64_t, "ri")
> -
> -#undef build_read_atomic
> -#undef build_write_atomic
> -#undef build_add_sized
> +build_rw_atomic( 8, b, q, i)
> +build_rw_atomic(16, w, r, i)
> +build_rw_atomic(32, l, r, i)
> +build_rw_atomic(64, q, r, e)
> +
> +build_arith_sized(add, 8, b, qi)
> +build_arith_sized(add, 16, w, ri)
> +build_arith_sized(add, 32, l, ri)
> +build_arith_sized(add, 64, q, re)
> +
> +#undef build_rw_atomic
> +#undef build_arith_sized
>
> void __bad_atomic_size(void);
>
>
>
>
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
https://lists.xen.org/xen-devel
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |