[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [PATCH v6 3/9] xen/riscv: allow write_atomic() to work with non-scalar types
Update the 2nd argument of _write_atomic() from 'unsigned long x' to 'void *x' to allow write_atomic() to handle non-scalar types, aligning it with read_atomic(), which can work with non-scalar types. Additionally, update the implementation of _add_sized() to use "writeX_cpu(readX_cpu(p) + x, p)" instead of "write_atomic(ptr, read_atomic(ptr) + x)" because 'ptr' is defined as 'volatile uintX_t *'. This avoids a compilation error that occurs when passing the 2nd argument to _write_atomic() (i.e., "passing argument 2 of '_write_atomic' discards 'volatile' qualifier from pointer target type") since the 2nd argument of _write_atomic() is now 'void *' instead of 'unsigned long'. Signed-off-by: Oleksii Kurochko <oleksii.kurochko@xxxxxxxxx> --- Changes in v6: - new patch. --- xen/arch/riscv/include/asm/atomic.h | 24 ++++++++++-------------- 1 file changed, 10 insertions(+), 14 deletions(-) diff --git a/xen/arch/riscv/include/asm/atomic.h b/xen/arch/riscv/include/asm/atomic.h index 3c6bd86406..92b92fb4d4 100644 --- a/xen/arch/riscv/include/asm/atomic.h +++ b/xen/arch/riscv/include/asm/atomic.h @@ -54,16 +54,16 @@ static always_inline void read_atomic_size(const volatile void *p, }) static always_inline void _write_atomic(volatile void *p, - unsigned long x, + void *x, unsigned int size) { switch ( size ) { - case 1: writeb_cpu(x, p); break; - case 2: writew_cpu(x, p); break; - case 4: writel_cpu(x, p); break; + case 1: writeb_cpu(*(uint8_t *)x, p); break; + case 2: writew_cpu(*(uint16_t *)x, p); break; + case 4: writel_cpu(*(uint32_t *)x, p); break; #ifndef CONFIG_RISCV_32 - case 8: writeq_cpu(x, p); break; + case 8: writeq_cpu(*(uint64_t *)x, p); break; #endif default: __bad_atomic_size(); break; } @@ -72,7 +72,7 @@ static always_inline void _write_atomic(volatile void *p, #define write_atomic(p, x) \ ({ \ typeof(*(p)) x_ = (x); \ - _write_atomic(p, x_, sizeof(*(p))); \ + _write_atomic(p, &x_, sizeof(*(p))); \ }) static always_inline void _add_sized(volatile void *p, @@ -82,27 +82,23 @@ static always_inline void _add_sized(volatile void *p, { case 1: { - volatile uint8_t *ptr = p; - write_atomic(ptr, read_atomic(ptr) + x); + writeb_cpu(readb_cpu(p) + x, p); break; } case 2: { - volatile uint16_t *ptr = p; - write_atomic(ptr, read_atomic(ptr) + x); + writew_cpu(readw_cpu(p) + x, p); break; } case 4: { - volatile uint32_t *ptr = p; - write_atomic(ptr, read_atomic(ptr) + x); + writel_cpu(readl_cpu(p) + x, p); break; } #ifndef CONFIG_RISCV_32 case 8: { - volatile uint64_t *ptr = p; - write_atomic(ptr, read_atomic(ptr) + x); + writeq_cpu(readw_cpu(p) + x, p); break; } #endif -- 2.46.0
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |