[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-changelog] [xen staging-4.8] xen/arm64: cmpxchg: Simplify the cmpxchg implementation
commit e0d6cde61d2c9483f7f107d67138c9b92eb84937 Author: Julien Grall <julien.grall@xxxxxxx> AuthorDate: Wed May 22 13:37:53 2019 -0700 Commit: Julien Grall <julien.grall@xxxxxxx> CommitDate: Fri Jun 14 15:46:00 2019 +0100 xen/arm64: cmpxchg: Simplify the cmpxchg implementation The only difference between each case of the cmpxchg is the size of used. Rather than duplicating the code, provide a macro to generate each cases. This makes the code easier to read and modify. This is part of XSA-295. Signed-off-by; Julien Grall <julien.grall@xxxxxxx> Reviewed-by: Stefano Stabellini <sstabellini@xxxxxxxxxx> Signed-off-by: Stefano Stabellini <stefanos@xxxxxxxxxx> --- xen/include/asm-arm/arm64/cmpxchg.h | 98 ++++++++++++++----------------------- 1 file changed, 36 insertions(+), 62 deletions(-) diff --git a/xen/include/asm-arm/arm64/cmpxchg.h b/xen/include/asm-arm/arm64/cmpxchg.h index ae42b2f5ff..393fbca6a5 100644 --- a/xen/include/asm-arm/arm64/cmpxchg.h +++ b/xen/include/asm-arm/arm64/cmpxchg.h @@ -61,80 +61,54 @@ static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int size __ret; \ }) -extern void __bad_cmpxchg(volatile void *ptr, int size); +extern unsigned long __bad_cmpxchg(volatile void *ptr, int size); + +#define __CMPXCHG_CASE(w, sz, name) \ +static inline unsigned long __cmpxchg_case_##name(volatile void *ptr, \ + unsigned long old, \ + unsigned long new) \ +{ \ + unsigned long res, oldval; \ + \ + do { \ + asm volatile("// __cmpxchg_case_" #name "\n" \ + " ldxr" #sz " %" #w "1, %2\n" \ + " mov %w0, #0\n" \ + " cmp %" #w "1, %" #w "3\n" \ + " b.ne 1f\n" \ + " stxr" #sz " %w0, %" #w "4, %2\n" \ + "1:\n" \ + : "=&r" (res), "=&r" (oldval), \ + "+Q" (*(unsigned long *)ptr) \ + : "Ir" (old), "r" (new) \ + : "cc"); \ + } while (res); \ + \ + return oldval; \ +} + +__CMPXCHG_CASE(w, b, 1) +__CMPXCHG_CASE(w, h, 2) +__CMPXCHG_CASE(w, , 4) +__CMPXCHG_CASE( , , 8) static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old, unsigned long new, int size) { - unsigned long oldval = 0, res; - switch (size) { case 1: - do { - asm volatile("// __cmpxchg1\n" - " ldxrb %w1, %2\n" - " mov %w0, #0\n" - " cmp %w1, %w3\n" - " b.ne 1f\n" - " stxrb %w0, %w4, %2\n" - "1:\n" - : "=&r" (res), "=&r" (oldval), "+Q" (*(u8 *)ptr) - : "Ir" (old), "r" (new) - : "cc"); - } while (res); - break; - + return __cmpxchg_case_1(ptr, old, new); case 2: - do { - asm volatile("// __cmpxchg2\n" - " ldxrh %w1, %2\n" - " mov %w0, #0\n" - " cmp %w1, %w3\n" - " b.ne 1f\n" - " stxrh %w0, %w4, %2\n" - "1:\n" - : "=&r" (res), "=&r" (oldval), "+Q" (*(u16 *)ptr) - : "Ir" (old), "r" (new) - : "cc"); - } while (res); - break; - + return __cmpxchg_case_2(ptr, old, new); case 4: - do { - asm volatile("// __cmpxchg4\n" - " ldxr %w1, %2\n" - " mov %w0, #0\n" - " cmp %w1, %w3\n" - " b.ne 1f\n" - " stxr %w0, %w4, %2\n" - "1:\n" - : "=&r" (res), "=&r" (oldval), "+Q" (*(u32 *)ptr) - : "Ir" (old), "r" (new) - : "cc"); - } while (res); - break; - + return __cmpxchg_case_4(ptr, old, new); case 8: - do { - asm volatile("// __cmpxchg8\n" - " ldxr %1, %2\n" - " mov %w0, #0\n" - " cmp %1, %3\n" - " b.ne 1f\n" - " stxr %w0, %4, %2\n" - "1:\n" - : "=&r" (res), "=&r" (oldval), "+Q" (*(u64 *)ptr) - : "Ir" (old), "r" (new) - : "cc"); - } while (res); - break; - + return __cmpxchg_case_8(ptr, old, new); default: - __bad_cmpxchg(ptr, size); - oldval = 0; + return __bad_cmpxchg(ptr, size); } - return oldval; + ASSERT_UNREACHABLE(); } static inline unsigned long __cmpxchg_mb(volatile void *ptr, unsigned long old, -- generated by git-patchbot for /home/xen/git/xen.git#staging-4.8 _______________________________________________ Xen-changelog mailing list Xen-changelog@xxxxxxxxxxxxxxxxxxxx https://lists.xenproject.org/xen-changelog
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |