[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [PATCH v3 2/6] crypto/vmac: Simplify code with byteswap
This file has its own implementation of swap bytes. Clean up the code with xen/byteswap.h. No functional change. Signed-off-by: Lin Liu <lin.liu@xxxxxxxxxx> Acked-by: Jan Beulich <jbeulich@xxxxxxxx> --- Cc: Andrew Cooper <andrew.cooper3@xxxxxxxxxx> Cc: George Dunlap <george.dunlap@xxxxxxxxxx> Cc: Jan Beulich <jbeulich@xxxxxxxx> Cc: Julien Grall <julien@xxxxxxx> Cc: Stefano Stabellini <sstabellini@xxxxxxxxxx> Cc: Wei Liu <wl@xxxxxxx> --- xen/crypto/vmac.c | 76 ++--------------------------------------------- 1 file changed, 3 insertions(+), 73 deletions(-) diff --git a/xen/crypto/vmac.c b/xen/crypto/vmac.c index 294dd16a52..acb4e015f5 100644 --- a/xen/crypto/vmac.c +++ b/xen/crypto/vmac.c @@ -8,6 +8,7 @@ /* start for Xen */ #include <xen/init.h> +#include <xen/byteswap.h> #include <xen/types.h> #include <xen/lib.h> #include <crypto/vmac.h> @@ -50,7 +51,6 @@ const uint64_t mpoly = UINT64_C(0x1fffffff1fffffff); /* Poly key mask */ * MUL64: 64x64->128-bit multiplication * PMUL64: assumes top bits cleared on inputs * ADD128: 128x128->128-bit addition - * GET_REVERSED_64: load and byte-reverse 64-bit word * ----------------------------------------------------------------------- */ /* ----------------------------------------------------------------------- */ @@ -68,22 +68,6 @@ const uint64_t mpoly = UINT64_C(0x1fffffff1fffffff); /* Poly key mask */ #define PMUL64 MUL64 -#define GET_REVERSED_64(p) \ - ({uint64_t x; \ - asm ("bswapq %0" : "=r" (x) : "0"(*(uint64_t *)(p))); x;}) - -/* ----------------------------------------------------------------------- */ -#elif (__GNUC__ && __i386__) -/* ----------------------------------------------------------------------- */ - -#define GET_REVERSED_64(p) \ - ({ uint64_t x; \ - uint32_t *tp = (uint32_t *)(p); \ - asm ("bswap %%edx\n\t" \ - "bswap %%eax" \ - : "=A"(x) \ - : "a"(tp[1]), "d"(tp[0])); \ - x; }) /* ----------------------------------------------------------------------- */ #elif (__GNUC__ && __ppc64__) @@ -103,37 +87,6 @@ const uint64_t mpoly = UINT64_C(0x1fffffff1fffffff); /* Poly key mask */ #define PMUL64 MUL64 -#define GET_REVERSED_64(p) \ - ({ uint32_t hi, lo, *_p = (uint32_t *)(p); \ - asm volatile ("lwbrx %0, %1, %2" : "=r"(lo) : "b%"(0), "r"(_p) ); \ - asm volatile ("lwbrx %0, %1, %2" : "=r"(hi) : "b%"(4), "r"(_p) ); \ - ((uint64_t)hi << 32) | (uint64_t)lo; } ) - -/* ----------------------------------------------------------------------- */ -#elif (__GNUC__ && (__ppc__ || __PPC__)) -/* ----------------------------------------------------------------------- */ - -#define GET_REVERSED_64(p) \ - ({ uint32_t hi, lo, *_p = (uint32_t *)(p); \ - asm volatile ("lwbrx %0, %1, %2" : "=r"(lo) : "b%"(0), "r"(_p) ); \ - asm volatile ("lwbrx %0, %1, %2" : "=r"(hi) : "b%"(4), "r"(_p) ); \ - ((uint64_t)hi << 32) | (uint64_t)lo; } ) - -/* ----------------------------------------------------------------------- */ -#elif (__GNUC__ && (__ARMEL__ || __ARM__)) -/* ----------------------------------------------------------------------- */ - -#define bswap32(v) \ -({ uint32_t tmp,out; \ - asm volatile( \ - "eor %1, %2, %2, ror #16\n" \ - "bic %1, %1, #0x00ff0000\n" \ - "mov %0, %2, ror #8\n" \ - "eor %0, %0, %1, lsr #8" \ - : "=r" (out), "=&r" (tmp) \ - : "r" (v)); \ - out;}) - /* ----------------------------------------------------------------------- */ #elif _MSC_VER /* ----------------------------------------------------------------------- */ @@ -154,11 +107,6 @@ const uint64_t mpoly = UINT64_C(0x1fffffff1fffffff); /* Poly key mask */ (rh) += (ih) + ((rl) < (_il)); \ } -#if _MSC_VER >= 1300 -#define GET_REVERSED_64(p) _byteswap_uint64(*(uint64_t *)(p)) -#pragma intrinsic(_byteswap_uint64) -#endif - #if _MSC_VER >= 1400 && \ (!defined(__INTEL_COMPILER) || __INTEL_COMPILER >= 1000) #define MUL32(i1,i2) (__emulu((uint32_t)(i1),(uint32_t)(i2))) @@ -219,24 +167,6 @@ const uint64_t mpoly = UINT64_C(0x1fffffff1fffffff); /* Poly key mask */ } #endif -#ifndef GET_REVERSED_64 -#ifndef bswap64 -#ifndef bswap32 -#define bswap32(x) \ - ({ uint32_t bsx = (x); \ - ((((bsx) & 0xff000000u) >> 24) | (((bsx) & 0x00ff0000u) >> 8) | \ - (((bsx) & 0x0000ff00u) << 8) | (((bsx) & 0x000000ffu) << 24)); }) -#endif -#define bswap64(x) \ - ({ union { uint64_t ll; uint32_t l[2]; } w, r; \ - w.ll = (x); \ - r.l[0] = bswap32 (w.l[1]); \ - r.l[1] = bswap32 (w.l[0]); \ - r.ll; }) -#endif -#define GET_REVERSED_64(p) bswap64(*(uint64_t *)(p)) -#endif - /* ----------------------------------------------------------------------- */ #if (VMAC_PREFER_BIG_ENDIAN) @@ -247,9 +177,9 @@ const uint64_t mpoly = UINT64_C(0x1fffffff1fffffff); /* Poly key mask */ #if (VMAC_ARCH_BIG_ENDIAN) # define get64BE(ptr) (*(uint64_t *)(ptr)) -# define get64LE(ptr) GET_REVERSED_64(ptr) +# define get64LE(ptr) bswap64(*(uint64_t *)(ptr)) #else /* assume little-endian */ -# define get64BE(ptr) GET_REVERSED_64(ptr) +# define get64BE(ptr) bswap64(*(uint64_t *)(ptr)) # define get64LE(ptr) (*(uint64_t *)(ptr)) #endif -- 2.27.0
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |