[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH v9 23/23] x86emul: add a PCLMUL/VPCLMUL test case to the harness
Also use this for AVX512_VBMI2 VPSH{L,R}D{,V}{D,Q,W} testing (only the quad word right shifts get actually used; the assumption is that their "left" counterparts as well as the double word and word forms then work as well). Signed-off-by: Jan Beulich <jbeulich@xxxxxxxx> Acked-by: Andrew Cooper <andrew.cooper3@xxxxxxxxxx> --- v8: New. --- a/tools/tests/x86_emulator/Makefile +++ b/tools/tests/x86_emulator/Makefile @@ -20,9 +20,10 @@ SIMD := 3dnow sse sse2 sse4 avx avx2 xop FMA := fma4 fma SG := avx2-sg avx512f-sg avx512vl-sg AES := ssse3-aes avx-aes avx2-vaes avx512bw-vaes +CLMUL := ssse3-pclmul avx-pclmul avx2-vpclmulqdq avx512bw-vpclmulqdq avx512vbmi2-vpclmulqdq SHA := sse4-sha avx-sha avx512f-sha GF := sse2-gf avx2-gf avx512bw-gf -TESTCASES := blowfish $(SIMD) $(FMA) $(SG) $(AES) $(SHA) $(GF) +TESTCASES := blowfish $(SIMD) $(FMA) $(SG) $(AES) $(CLMUL) $(SHA) $(GF) OPMASK := avx512f avx512dq avx512bw @@ -89,6 +90,7 @@ avx512er-flts := 4 8 avx512vbmi-vecs := $(avx512bw-vecs) avx512vbmi-ints := $(avx512bw-ints) avx512vbmi-flts := $(avx512bw-flts) +avx512vbmi2-vecs := $(avx512bw-vecs) avx512f-opmask-vecs := 2 avx512dq-opmask-vecs := 1 2 @@ -149,6 +151,10 @@ define simd-aes-defs $(1)-cflags := $(foreach vec,$($(patsubst %-aes,sse,$(1))-vecs) $($(patsubst %-vaes,%,$(1))-vecs), \ "-D_$(vec) -maes $(addprefix -m,$(subst -,$(space),$(1))) $(call non-sse,$(1)) -Os -DVEC_SIZE=$(vec)") endef +define simd-clmul-defs +$(1)-cflags := $(foreach vec,$($(patsubst %-pclmul,sse,$(1))-vecs) $($(patsubst %-vpclmulqdq,%,$(1))-vecs), \ + "-D_$(vec) -mpclmul $(addprefix -m,$(subst -,$(space),$(1))) $(call non-sse,$(1)) -Os -DVEC_SIZE=$(vec)") +endef define simd-sha-defs $(1)-cflags := $(foreach vec,$(sse-vecs), \ "-D_$(vec) $(addprefix -m,$(subst -,$(space),$(1))) -Os -DVEC_SIZE=$(vec)") @@ -164,6 +170,7 @@ endef $(foreach flavor,$(SIMD) $(FMA),$(eval $(call simd-defs,$(flavor)))) $(foreach flavor,$(SG),$(eval $(call simd-sg-defs,$(flavor)))) $(foreach flavor,$(AES),$(eval $(call simd-aes-defs,$(flavor)))) +$(foreach flavor,$(CLMUL),$(eval $(call simd-clmul-defs,$(flavor)))) $(foreach flavor,$(SHA),$(eval $(call simd-sha-defs,$(flavor)))) $(foreach flavor,$(GF),$(eval $(call simd-gf-defs,$(flavor)))) $(foreach flavor,$(OPMASK),$(eval $(call opmask-defs,$(flavor)))) @@ -218,13 +225,16 @@ $(addsuffix .c,$(SG)): $(addsuffix .c,$(AES)): ln -sf simd-aes.c $@ +$(addsuffix .c,$(CLMUL)): + ln -sf simd-clmul.c $@ + $(addsuffix .c,$(SHA)): ln -sf simd-sha.c $@ $(addsuffix .c,$(GF)): ln -sf simd-gf.c $@ -$(addsuffix .h,$(SIMD) $(FMA) $(SG) $(AES) $(SHA) $(GF)): simd.h +$(addsuffix .h,$(SIMD) $(FMA) $(SG) $(AES) $(CLMUL) $(SHA) $(GF)): simd.h xop.h avx512f.h: simd-fma.c --- /dev/null +++ b/tools/tests/x86_emulator/simd-clmul.c @@ -0,0 +1,150 @@ +#define UINT_SIZE 8 + +#include "simd.h" +ENTRY(clmul_test); + +#ifdef __AVX512F__ /* AVX512BW may get enabled only below */ +# define ALL_TRUE (~0ULL >> (64 - ELEM_COUNT)) +# define eq(x, y) (B(pcmpeqq, _mask, (vdi_t)(x), (vdi_t)(y), -1) == ALL_TRUE) +# define lane_shr_unit(x) \ + ((vec_t)B(palignr, _mask, (vdi_t)(x), (vdi_t)(x), 64, (vdi_t){}, \ + 0x00ff00ff00ff00ffULL & (~0ULL >> (64 - VEC_SIZE)))) +#else +# if defined(__AVX2__) && VEC_SIZE == 32 +# define to_bool(cmp) B(ptestc, , cmp, (vdi_t){} == 0) +# else +# define to_bool(cmp) (__builtin_ia32_pmovmskb128(cmp) == 0xffff) +# endif +# define eq(x, y) to_bool((x) == (y)) +# define lane_shr_unit(x) ((vec_t)B(palignr, , (vdi_t){}, (vdi_t)(x), 64)) +#endif + +#define CLMUL(op, x, y, c) (vec_t)(__builtin_ia32_ ## op((vdi_t)(x), (vdi_t)(y), c)) + +#if VEC_SIZE == 16 +# define clmul(x, y, c) CLMUL(pclmulqdq128, x, y, c) +# define vpshrd __builtin_ia32_vpshrd_v2di +#elif VEC_SIZE == 32 +# define clmul(x, y, c) CLMUL(vpclmulqdq_v4di, x, y, c) +# define vpshrd __builtin_ia32_vpshrd_v4di +#elif VEC_SIZE == 64 +# define clmul(x, y, c) CLMUL(vpclmulqdq_v8di, x, y, c) +# define vpshrd __builtin_ia32_vpshrd_v8di +#endif + +#define clmul_ll(x, y) clmul(x, y, 0x00) +#define clmul_hl(x, y) clmul(x, y, 0x01) +#define clmul_lh(x, y) clmul(x, y, 0x10) +#define clmul_hh(x, y) clmul(x, y, 0x11) + +#if defined(__AVX512VBMI2__) +# pragma GCC target ( "avx512bw" ) +# define lane_shr_i(x, n) ({ \ + vec_t h_ = lane_shr_unit(x); \ + touch(h_); \ + (n) < 64 ? (vec_t)vpshrd((vdi_t)(x), (vdi_t)(h_), n) : h_ >> ((n) - 64); \ +}) +# define lane_shr_v(x, n) ({ \ + vec_t t_ = (x), h_ = lane_shr_unit(x); \ + typeof(t_[0]) n_ = (n); \ + if ( (n) < 64 ) \ + /* gcc does not support embedded broadcast */ \ + asm ( "vpshrdvq %2%{1to%c3%}, %1, %0" \ + : "+v" (t_) : "v" (h_), "m" (n_), "i" (ELEM_COUNT) ); \ + else \ + t_ = h_ >> ((n) - 64); \ + t_; \ +}) +#else +# define lane_shr_i lane_shr_v +# define lane_shr_v(x, n) ({ \ + vec_t t_ = (n) > 0 ? lane_shr_unit(x) : (x); \ + (n) < 64 ? ((x) >> (n)) | (t_ << (-(n) & 0x3f)) \ + : t_ >> ((n) - 64); \ +}) +#endif + +int clmul_test(void) +{ + unsigned int i; + vec_t src; + vqi_t raw = {}; + + for ( i = 1; i < VEC_SIZE; ++i ) + raw[i] = i; + src = (vec_t)raw; + + for ( i = 0; i < 256; i += VEC_SIZE ) + { + vec_t x = {}, y, z, lo, hi; + unsigned int j; + + touch(x); + y = clmul_ll(src, x); + touch(x); + + if ( !eq(y, x) ) return __LINE__; + + for ( j = 0; j < ELEM_COUNT; j += 2 ) + x[j] = 1; + + touch(src); + y = clmul_ll(x, src); + touch(src); + z = clmul_lh(x, src); + touch(src); + + for ( j = 0; j < ELEM_COUNT; j += 2 ) + y[j + 1] = z[j]; + + if ( !eq(y, src) ) return __LINE__; + + /* + * Besides the obvious property of the low and high half products + * being the same either direction, the "square" of a number has the + * property of simply being the original bit pattern with a zero bit + * inserted between any two bits. This is what the code below checks. + */ + + x = src; + touch(src); + y = clmul_lh(x, src); + touch(src); + z = clmul_hl(x, src); + + if ( !eq(y, z) ) return __LINE__; + + touch(src); + y = lo = clmul_ll(x, src); + touch(src); + z = hi = clmul_hh(x, src); + touch(src); + + for ( j = 0; j < 64; ++j ) + { + vec_t l = lane_shr_v(lo, 2 * j); + vec_t h = lane_shr_v(hi, 2 * j); + unsigned int n; + + if ( !eq(l, y) ) return __LINE__; + if ( !eq(h, z) ) return __LINE__; + + x = src >> j; + + for ( n = 0; n < ELEM_COUNT; n += 2 ) + { + if ( (x[n + 0] & 1) != (l[n] & 3) ) return __LINE__; + if ( (x[n + 1] & 1) != (h[n] & 3) ) return __LINE__; + } + + touch(y); + y = lane_shr_i(y, 2); + touch(z); + z = lane_shr_i(z, 2); + } + + src += 0x0101010101010101ULL * VEC_SIZE; + } + + return 0; +} --- a/tools/tests/x86_emulator/simd.h +++ b/tools/tests/x86_emulator/simd.h @@ -381,6 +381,7 @@ OVR(movntdq); OVR(movntdqa); OVR(movshdup); OVR(movsldup); +OVR(pclmulqdq); OVR(permd); OVR(permq); OVR(pmovsxbd); --- a/tools/tests/x86_emulator/test_x86_emulator.c +++ b/tools/tests/x86_emulator/test_x86_emulator.c @@ -13,16 +13,19 @@ asm ( ".pushsection .test, \"ax\", @prog #include "sse2.h" #include "sse2-gf.h" #include "ssse3-aes.h" +#include "ssse3-pclmul.h" #include "sse4.h" #include "sse4-sha.h" #include "avx.h" #include "avx-aes.h" +#include "avx-pclmul.h" #include "avx-sha.h" #include "fma4.h" #include "fma.h" #include "avx2.h" #include "avx2-sg.h" #include "avx2-vaes.h" +#include "avx2-vpclmulqdq.h" #include "avx2-gf.h" #include "xop.h" #include "avx512f-opmask.h" @@ -34,10 +37,12 @@ asm ( ".pushsection .test, \"ax\", @prog #include "avx512vl-sg.h" #include "avx512bw.h" #include "avx512bw-vaes.h" +#include "avx512bw-vpclmulqdq.h" #include "avx512bw-gf.h" #include "avx512dq.h" #include "avx512er.h" #include "avx512vbmi.h" +#include "avx512vbmi2-vpclmulqdq.h" #define verbose false /* Switch to true for far more logging. */ @@ -108,6 +113,16 @@ static bool simd_check_avx_aes(void) return cpu_has_aesni && cpu_has_avx; } +static bool simd_check_ssse3_pclmul(void) +{ + return cpu_has_pclmulqdq && cpu_has_ssse3; +} + +static bool simd_check_avx_pclmul(void) +{ + return cpu_has_pclmulqdq && cpu_has_avx; +} + static bool simd_check_avx512f(void) { return cpu_has_avx512f; @@ -189,6 +204,31 @@ static bool simd_check_avx512bw_vaes_vl( cpu_has_avx512bw && cpu_has_avx512vl; } +static bool simd_check_avx2_vpclmulqdq(void) +{ + return cpu_has_vpclmulqdq && cpu_has_avx2; +} + +static bool simd_check_avx512bw_vpclmulqdq(void) +{ + return cpu_has_vpclmulqdq && cpu_has_avx512bw; +} + +static bool simd_check_avx512bw_vpclmulqdq_vl(void) +{ + return cpu_has_vpclmulqdq && cpu_has_avx512bw && cpu_has_avx512vl; +} + +static bool simd_check_avx512vbmi2_vpclmulqdq(void) +{ + return cpu_has_avx512_vbmi2 && simd_check_avx512bw_vpclmulqdq(); +} + +static bool simd_check_avx512vbmi2_vpclmulqdq_vl(void) +{ + return cpu_has_avx512_vbmi2 && simd_check_avx512bw_vpclmulqdq_vl(); +} + static bool simd_check_sse2_gf(void) { return cpu_has_gfni && cpu_has_sse2; @@ -369,6 +409,8 @@ static const struct { SIMD(XOP i64x4, xop, 32i8), SIMD(AES (legacy), ssse3_aes, 16), SIMD(AES (VEX/x16), avx_aes, 16), + SIMD(PCLMUL (legacy), ssse3_pclmul, 16), + SIMD(PCLMUL (VEX/x2), avx_pclmul, 16), SIMD(OPMASK/w, avx512f_opmask, 2), SIMD(OPMASK+DQ/b, avx512dq_opmask, 1), SIMD(OPMASK+DQ/w, avx512dq_opmask, 2), @@ -475,6 +517,13 @@ static const struct { SIMD(VAES (EVEX/x64), avx512bw_vaes, 64), AVX512VL(VL+VAES (x16), avx512bw_vaes, 16), AVX512VL(VL+VAES (x32), avx512bw_vaes, 32), + SIMD(VPCLMUL (VEX/x4), avx2_vpclmulqdq, 32), + SIMD(VPCLMUL (EVEX/x8), avx512bw_vpclmulqdq, 64), + AVX512VL(VL+VPCLMUL (x4), avx512bw_vpclmulqdq, 16), + AVX512VL(VL+VPCLMUL (x8), avx512bw_vpclmulqdq, 32), + SIMD(AVX512_VBMI2+VPCLMUL (x8), avx512vbmi2_vpclmulqdq, 64), + AVX512VL(_VBMI2+VL+VPCLMUL (x2), avx512vbmi2_vpclmulqdq, 16), + AVX512VL(_VBMI2+VL+VPCLMUL (x4), avx512vbmi2_vpclmulqdq, 32), SIMD(GFNI (legacy), sse2_gf, 16), SIMD(GFNI (VEX/x16), avx2_gf, 16), SIMD(GFNI (VEX/x32), avx2_gf, 32), --- a/tools/tests/x86_emulator/x86-emulate.h +++ b/tools/tests/x86_emulator/x86-emulate.h @@ -125,6 +125,7 @@ static inline bool xcr0_mask(uint64_t ma #define cpu_has_sse cp.basic.sse #define cpu_has_sse2 cp.basic.sse2 #define cpu_has_sse3 cp.basic.sse3 +#define cpu_has_pclmulqdq cp.basic.pclmulqdq #define cpu_has_ssse3 cp.basic.ssse3 #define cpu_has_fma (cp.basic.fma && xcr0_mask(6)) #define cpu_has_sse4_1 cp.basic.sse4_1 _______________________________________________ Xen-devel mailing list Xen-devel@xxxxxxxxxxxxxxxxxxxx https://lists.xenproject.org/mailman/listinfo/xen-devel
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |