[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [PATCH 1/4] x86emul: support AVX-VNNI-INT16
These are close relatives of the AVX-VNNI and AVX-VNNI-INT8 ISA extensions. Since the insns here and in particular their memory access patterns follow the usual scheme (and especially the word variants of AVX-VNNI), I didn't think it was necessary to add a contrived test specifically for them. Signed-off-by: Jan Beulich <jbeulich@xxxxxxxx> --- I'm actually pondering whether to contrive dot_product() for these and the other AVX-VNNI* insns, by additionally performing a horizontal add on the result vectors. Main thing I dislike about it is that everything else in simd_test() would then be re-run for no real purpose. Yet making a new standalone simd-dotp.c also doesn't look very attractive. --- SDE: -arl, -lnl, or -future --- a/tools/misc/xen-cpuid.c +++ b/tools/misc/xen-cpuid.c @@ -217,6 +217,8 @@ static const char *const str_7d1[32] = { [ 4] = "avx-vnni-int8", [ 5] = "avx-ne-convert", + [10] = "avx-vnni-int16", + [18] = "cet-sss", }; --- a/tools/tests/x86_emulator/predicates.c +++ b/tools/tests/x86_emulator/predicates.c @@ -1397,6 +1397,12 @@ static const struct vex { { { 0xbe }, 2, T, R, pfx_66, Wn, Ln }, /* vnmsub231p{s,d} */ { { 0xbf }, 2, T, R, pfx_66, Wn, LIG }, /* vnmsub231s{s,d} */ { { 0xcf }, 2, T, R, pfx_66, W0, Ln }, /* vgf2p8mulb */ + { { 0xd2 }, 2, T, R, pfx_no, W0, Ln }, /* vpdpwuud */ + { { 0xd2 }, 2, T, R, pfx_66, W0, Ln }, /* vpdpwusd */ + { { 0xd2 }, 2, T, R, pfx_f3, W0, Ln }, /* vpdpwsud */ + { { 0xd3 }, 2, T, R, pfx_no, W0, Ln }, /* vpdpwuuds */ + { { 0xd3 }, 2, T, R, pfx_66, W0, Ln }, /* vpdpwusds */ + { { 0xd3 }, 2, T, R, pfx_f3, W0, Ln }, /* vpdpwsuds */ { { 0xdb }, 2, T, R, pfx_66, WIG, L0 }, /* vaesimc */ { { 0xdc }, 2, T, R, pfx_66, WIG, Ln }, /* vaesenc */ { { 0xdd }, 2, T, R, pfx_66, WIG, Ln }, /* vaesenclast */ --- a/tools/tests/x86_emulator/x86-emulate.h +++ b/tools/tests/x86_emulator/x86-emulate.h @@ -183,6 +183,7 @@ void wrpkru(unsigned int val); #define cpu_has_avx_ifma (cp.feat.avx_ifma && xcr0_mask(6)) #define cpu_has_avx_vnni_int8 (cp.feat.avx_vnni_int8 && xcr0_mask(6)) #define cpu_has_avx_ne_convert (cp.feat.avx_ne_convert && xcr0_mask(6)) +#define cpu_has_avx_vnni_int16 (cp.feat.avx_vnni_int16 && xcr0_mask(6)) #define cpu_has_xgetbv1 (cpu_has_xsave && cp.xstate.xgetbv1) --- a/xen/arch/x86/include/asm/cpufeature.h +++ b/xen/arch/x86/include/asm/cpufeature.h @@ -183,6 +183,7 @@ static inline bool boot_cpu_has(unsigned /* CPUID level 0x00000007:1.edx */ #define cpu_has_avx_vnni_int8 boot_cpu_has(X86_FEATURE_AVX_VNNI_INT8) #define cpu_has_avx_ne_convert boot_cpu_has(X86_FEATURE_AVX_NE_CONVERT) +#define cpu_has_avx_vnni_int16 boot_cpu_has(X86_FEATURE_AVX_VNNI_INT16) /* MSR_ARCH_CAPS */ #define cpu_has_rdcl_no boot_cpu_has(X86_FEATURE_RDCL_NO) --- a/xen/arch/x86/x86_emulate/decode.c +++ b/xen/arch/x86/x86_emulate/decode.c @@ -435,6 +435,8 @@ static const struct ext0f38_table { [0xcc] = { .simd_size = simd_packed_fp, .two_op = 1, .d8s = d8s_vl }, [0xcd] = { .simd_size = simd_scalar_vexw, .d8s = d8s_dq }, [0xcf] = { .simd_size = simd_packed_int, .d8s = d8s_vl }, + [0xd2] = { .simd_size = simd_other }, + [0xd3] = { .simd_size = simd_other }, [0xd6] = { .simd_size = simd_other, .d8s = d8s_vl }, [0xd7] = { .simd_size = simd_scalar_vexw, .d8s = d8s_dq }, [0xdb] = { .simd_size = simd_packed_int, .two_op = 1 }, --- a/xen/arch/x86/x86_emulate/private.h +++ b/xen/arch/x86/x86_emulate/private.h @@ -593,6 +593,7 @@ amd_like(const struct x86_emulate_ctxt * #define vcpu_has_avx_ifma() (ctxt->cpuid->feat.avx_ifma) #define vcpu_has_avx_vnni_int8() (ctxt->cpuid->feat.avx_vnni_int8) #define vcpu_has_avx_ne_convert() (ctxt->cpuid->feat.avx_ne_convert) +#define vcpu_has_avx_vnni_int16() (ctxt->cpuid->feat.avx_vnni_int16) #define vcpu_must_have(feat) \ generate_exception_if(!vcpu_has_##feat(), X86_EXC_UD) --- a/xen/arch/x86/x86_emulate/x86_emulate.c +++ b/xen/arch/x86/x86_emulate/x86_emulate.c @@ -6871,6 +6871,17 @@ x86_emulate( elem_bytes = 1; goto avx512f_no_sae; + case X86EMUL_OPC_VEX (0x0f38, 0xd2): /* vpdpwuud [xy]mm/mem,[xy]mm,[xy]mm */ + case X86EMUL_OPC_VEX_66(0x0f38, 0xd2): /* vpdpwusd [xy]mm/mem,[xy]mm,[xy]mm */ + case X86EMUL_OPC_VEX_F3(0x0f38, 0xd2): /* vpdpwsud [xy]mm/mem,[xy]mm,[xy]mm */ + case X86EMUL_OPC_VEX (0x0f38, 0xd3): /* vpdpwuuds [xy]mm/mem,[xy]mm,[xy]mm */ + case X86EMUL_OPC_VEX_66(0x0f38, 0xd3): /* vpdpwusds [xy]mm/mem,[xy]mm,[xy]mm */ + case X86EMUL_OPC_VEX_F3(0x0f38, 0xd3): /* vpdpwsuds [xy]mm/mem,[xy]mm,[xy]mm */ + host_and_vcpu_must_have(avx_vnni_int16); + generate_exception_if(vex.w, X86_EXC_UD); + op_bytes = 16 << vex.l; + goto simd_0f_ymm; + case X86EMUL_OPC_VEX_66(0x0f38, 0xdc): /* vaesenc {x,y}mm/mem,{x,y}mm,{x,y}mm */ case X86EMUL_OPC_VEX_66(0x0f38, 0xdd): /* vaesenclast {x,y}mm/mem,{x,y}mm,{x,y}mm */ case X86EMUL_OPC_VEX_66(0x0f38, 0xde): /* vaesdec {x,y}mm/mem,{x,y}mm,{x,y}mm */ --- a/xen/include/public/arch-x86/cpufeatureset.h +++ b/xen/include/public/arch-x86/cpufeatureset.h @@ -306,6 +306,7 @@ XEN_CPUFEATURE(MCDT_NO, 13*32 /* Intel-defined CPU features, CPUID level 0x00000007:1.edx, word 15 */ XEN_CPUFEATURE(AVX_VNNI_INT8, 15*32+ 4) /*A AVX-VNNI-INT8 Instructions */ XEN_CPUFEATURE(AVX_NE_CONVERT, 15*32+ 5) /*A AVX-NE-CONVERT Instructions */ +XEN_CPUFEATURE(AVX_VNNI_INT16, 15*32+10) /*A AVX-VNNI-INT16 Instructions */ XEN_CPUFEATURE(CET_SSS, 15*32+18) /* CET Supervisor Shadow Stacks safe to use */ /* Intel-defined CPU features, MSR_ARCH_CAPS 0x10a.eax, word 16 */ --- a/xen/tools/gen-cpuid.py +++ b/xen/tools/gen-cpuid.py @@ -284,7 +284,8 @@ def crunch_numbers(state): # feature flags. If want to use AVX512, AVX2 must be supported and # enabled. Certain later extensions, acting on 256-bit vectors of # integers, better depend on AVX2 than AVX. - AVX2: [AVX512F, VAES, VPCLMULQDQ, AVX_VNNI, AVX_IFMA, AVX_VNNI_INT8], + AVX2: [AVX512F, VAES, VPCLMULQDQ, AVX_VNNI, AVX_IFMA, AVX_VNNI_INT8, + AVX_VNNI_INT16], # AVX512F is taken to mean hardware support for 512bit registers # (which in practice depends on the EVEX prefix to encode) as well
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |