[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH v10 06/13] x86emul: support AVX512_VNNI insns
Along the lines of the 4FMAPS case, convert the 4VNNIW-based table entries to a decoder adjustment. Because of the current sharing of table entries between different (implied) opcode prefixes and with the same major opcodes being used for vp4dpwssd{,s}, which have a different memory operand size and different Disp8 scaling, the pre-existing table entries get converted to a decoder override. The table entries will now represent the insns here, in line with other table entries preferably representing the prefix-66 insns. As in a few cases before, since the insns here and in particular their memory access patterns follow the usual scheme, I didn't think it was necessary to add a contrived test specifically for them, beyond the Disp8 scaling one. Signed-off-by: Jan Beulich <jbeulich@xxxxxxxx> Acked-by: Andrew Cooper <andrew.cooper3@xxxxxxxxxx> --- v9: Re-base. Explain need for decoder special case. v8: Re-base. v7: New. --- a/tools/tests/x86_emulator/evex-disp8.c +++ b/tools/tests/x86_emulator/evex-disp8.c @@ -580,6 +580,13 @@ static const struct test avx512_vbmi2_al INSN(pshrdw, 66, 0f3a, 72, vl, w, vl), }; +static const struct test avx512_vnni_all[] = { + INSN(pdpbusd, 66, 0f38, 50, vl, d, vl), + INSN(pdpbusds, 66, 0f38, 51, vl, d, vl), + INSN(pdpwssd, 66, 0f38, 52, vl, d, vl), + INSN(pdpwssds, 66, 0f38, 53, vl, d, vl), +}; + static const struct test avx512_vpopcntdq_all[] = { INSN(popcnt, 66, 0f38, 55, vl, dq, vl) }; @@ -959,5 +966,6 @@ void evex_disp8_test(void *instr, struct RUN(avx512_ifma, all); RUN(avx512_vbmi, all); RUN(avx512_vbmi2, all); + RUN(avx512_vnni, all); RUN(avx512_vpopcntdq, all); } --- a/tools/tests/x86_emulator/x86-emulate.h +++ b/tools/tests/x86_emulator/x86-emulate.h @@ -144,6 +144,7 @@ static inline bool xcr0_mask(uint64_t ma #define cpu_has_avx512vl (cp.feat.avx512vl && xcr0_mask(0xe6)) #define cpu_has_avx512_vbmi (cp.feat.avx512_vbmi && xcr0_mask(0xe6)) #define cpu_has_avx512_vbmi2 (cp.feat.avx512_vbmi2 && xcr0_mask(0xe6)) +#define cpu_has_avx512_vnni (cp.feat.avx512_vnni && xcr0_mask(0xe6)) #define cpu_has_avx512_bitalg (cp.feat.avx512_bitalg && xcr0_mask(0xe6)) #define cpu_has_avx512_vpopcntdq (cp.feat.avx512_vpopcntdq && xcr0_mask(0xe6)) #define cpu_has_avx512_4vnniw (cp.feat.avx512_4vnniw && xcr0_mask(0xe6)) --- a/xen/arch/x86/x86_emulate/x86_emulate.c +++ b/xen/arch/x86/x86_emulate/x86_emulate.c @@ -479,7 +479,7 @@ static const struct ext0f38_table { [0x4d] = { .simd_size = simd_scalar_vexw, .d8s = d8s_dq }, [0x4e] = { .simd_size = simd_packed_fp, .two_op = 1, .d8s = d8s_vl }, [0x4f] = { .simd_size = simd_scalar_vexw, .d8s = d8s_dq }, - [0x52 ... 0x53] = { .simd_size = simd_128, .d8s = 4 }, + [0x50 ... 0x53] = { .simd_size = simd_packed_int, .d8s = d8s_vl }, [0x54 ... 0x55] = { .simd_size = simd_packed_int, .two_op = 1, .d8s = d8s_vl }, [0x58] = { .simd_size = simd_other, .two_op = 1, .d8s = 2 }, [0x59] = { .simd_size = simd_other, .two_op = 1, .d8s = 3 }, @@ -1890,6 +1890,7 @@ in_protmode( #define vcpu_has_avx512vl() (ctxt->cpuid->feat.avx512vl) #define vcpu_has_avx512_vbmi() (ctxt->cpuid->feat.avx512_vbmi) #define vcpu_has_avx512_vbmi2() (ctxt->cpuid->feat.avx512_vbmi2) +#define vcpu_has_avx512_vnni() (ctxt->cpuid->feat.avx512_vnni) #define vcpu_has_avx512_bitalg() (ctxt->cpuid->feat.avx512_bitalg) #define vcpu_has_avx512_vpopcntdq() (ctxt->cpuid->feat.avx512_vpopcntdq) #define vcpu_has_rdpid() (ctxt->cpuid->feat.rdpid) @@ -3179,6 +3180,8 @@ x86_decode( switch ( b ) { + /* vp4dpwssd{,s} need special casing */ + case 0x52: case 0x53: /* v4f{,n}madd{p,s}s need special casing */ case 0x9a: case 0x9b: case 0xaa: case 0xab: if ( evex.pfx == vex_f2 ) @@ -9394,6 +9397,14 @@ x86_emulate( avx512_vlen_check(true); goto simd_zmm; + case X86EMUL_OPC_EVEX_66(0x0f38, 0x50): /* vpdpbusd [xyz]mm/mem,[xyz]mm,[xyz]mm{k} */ + case X86EMUL_OPC_EVEX_66(0x0f38, 0x51): /* vpdpbusds [xyz]mm/mem,[xyz]mm,[xyz]mm{k} */ + case X86EMUL_OPC_EVEX_66(0x0f38, 0x52): /* vpdpwssd [xyz]mm/mem,[xyz]mm,[xyz]mm{k} */ + case X86EMUL_OPC_EVEX_66(0x0f38, 0x53): /* vpdpwssds [xyz]mm/mem,[xyz]mm,[xyz]mm{k} */ + host_and_vcpu_must_have(avx512_vnni); + generate_exception_if(evex.w, EXC_UD); + goto avx512f_no_sae; + case X86EMUL_OPC_EVEX_F2(0x0f38, 0x9a): /* v4fmaddps m128,zmm+3,zmm{k} */ case X86EMUL_OPC_EVEX_F2(0x0f38, 0xaa): /* v4fnmaddps m128,zmm+3,zmm{k} */ host_and_vcpu_must_have(avx512_4fmaps); --- a/xen/include/asm-x86/cpufeature.h +++ b/xen/include/asm-x86/cpufeature.h @@ -111,6 +111,7 @@ /* CPUID level 0x00000007:0.ecx */ #define cpu_has_avx512_vbmi boot_cpu_has(X86_FEATURE_AVX512_VBMI) #define cpu_has_avx512_vbmi2 boot_cpu_has(X86_FEATURE_AVX512_VBMI2) +#define cpu_has_avx512_vnni boot_cpu_has(X86_FEATURE_AVX512_VNNI) #define cpu_has_avx512_bitalg boot_cpu_has(X86_FEATURE_AVX512_BITALG) #define cpu_has_avx512_vpopcntdq boot_cpu_has(X86_FEATURE_AVX512_VPOPCNTDQ) #define cpu_has_rdpid boot_cpu_has(X86_FEATURE_RDPID) --- a/xen/include/public/arch-x86/cpufeatureset.h +++ b/xen/include/public/arch-x86/cpufeatureset.h @@ -229,6 +229,7 @@ XEN_CPUFEATURE(UMIP, 6*32+ 2) / XEN_CPUFEATURE(PKU, 6*32+ 3) /*H Protection Keys for Userspace */ XEN_CPUFEATURE(OSPKE, 6*32+ 4) /*! OS Protection Keys Enable */ XEN_CPUFEATURE(AVX512_VBMI2, 6*32+ 6) /*A Additional AVX-512 Vector Byte Manipulation Instrs */ +XEN_CPUFEATURE(AVX512_VNNI, 6*32+11) /*A Vector Neural Network Instrs */ XEN_CPUFEATURE(AVX512_BITALG, 6*32+12) /*A Support for VPOPCNT[B,W] and VPSHUFBITQMB */ XEN_CPUFEATURE(AVX512_VPOPCNTDQ, 6*32+14) /*A POPCNT for vectors of DW/QW */ XEN_CPUFEATURE(RDPID, 6*32+22) /*A RDPID instruction */ --- a/xen/tools/gen-cpuid.py +++ b/xen/tools/gen-cpuid.py @@ -263,7 +263,7 @@ def crunch_numbers(state): # AVX512 features are built on top of AVX512F AVX512F: [AVX512DQ, AVX512_IFMA, AVX512PF, AVX512ER, AVX512CD, AVX512BW, AVX512VL, AVX512_4VNNIW, AVX512_4FMAPS, - AVX512_VPOPCNTDQ], + AVX512_VNNI, AVX512_VPOPCNTDQ], # AVX512 extensions acting on vectors of bytes/words are made # dependents of AVX512BW (as to requiring wider than 16-bit mask _______________________________________________ Xen-devel mailing list Xen-devel@xxxxxxxxxxxxxxxxxxxx https://lists.xenproject.org/mailman/listinfo/xen-devel
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |