[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH v5 32/47] x86emul: support AVX512F move duplicate insns
Judging from insn prefixes, these are scalar insns, but their (memory) operands are vector ones (with the exception of 128-bit VMOVDDUP). For this some adjustments to disp8scale calculation code are needed. No explicit test harness additions other than the overrides, as the compiler already makes use of the insns. Signed-off-by: Jan Beulich <jbeulich@xxxxxxxx> --- v4: New. --- a/tools/tests/x86_emulator/evex-disp8.c +++ b/tools/tests/x86_emulator/evex-disp8.c @@ -157,6 +157,8 @@ static const struct test avx512f_all[] = INSN(movntdq, 66, 0f, e7, vl, d_nb, vl), INSN(movntdqa, 66, 0f38, 2a, vl, d_nb, vl), INSN_PFP_NB(movnt, 0f, 2b), + INSN(movshdup, f3, 0f, 16, vl, d_nb, vl), + INSN(movsldup, f3, 0f, 12, vl, d_nb, vl), INSN_PFP_NB(movu, 0f, 10), INSN_PFP_NB(movu, 0f, 11), INSN_FP(mul, 0f, 59), @@ -253,6 +255,7 @@ static const struct test avx512f_128[] = INSN(insertps, 66, 0f3a, 21, el, d, el), INSN(mov, 66, 0f, 6e, el, dq64, el), INSN(mov, 66, 0f, 7e, el, dq64, el), + INSN(movddup, f2, 0f, 12, el, q, el), // movhlps, , 0f, 12, d INSN(movhpd, 66, 0f, 16, el, q, vl), INSN(movhpd, 66, 0f, 17, el, q, vl), @@ -275,6 +278,7 @@ static const struct test avx512f_no128[] INSN(extracti32x4, 66, 0f3a, 39, el_4, d, vl), INSN(insertf32x4, 66, 0f3a, 18, el_4, d, vl), INSN(inserti32x4, 66, 0f3a, 38, el_4, d, vl), + INSN(movddup, f2, 0f, 12, vl, q_nb, vl), INSN(shuff32x4, 66, 0f3a, 23, vl, d, vl), INSN(shuff64x2, 66, 0f3a, 23, vl, q, vl), INSN(shufi32x4, 66, 0f3a, 43, vl, d, vl), --- a/tools/tests/x86_emulator/simd.h +++ b/tools/tests/x86_emulator/simd.h @@ -326,8 +326,11 @@ REN(pandn, , d); REN(por, , d); REN(pxor, , d); # endif +OVR(movddup); OVR(movntdq); OVR(movntdqa); +OVR(movshdup); +OVR(movsldup); OVR(pmovsxbd); OVR(pmovsxbq); OVR(pmovsxdq); --- a/xen/arch/x86/x86_emulate/x86_emulate.c +++ b/xen/arch/x86/x86_emulate/x86_emulate.c @@ -3043,6 +3043,15 @@ x86_decode( switch ( b ) { + case 0x12: /* vmovsldup / vmovddup */ + if ( evex.pfx == vex_f2 ) + disp8scale = evex.lr ? 4 + evex.lr : 3; + /* fall through */ + case 0x16: /* vmovshdup */ + if ( evex.pfx == vex_f3 ) + disp8scale = 4 + evex.lr; + break; + case 0x20: /* mov cr,reg */ case 0x21: /* mov dr,reg */ case 0x22: /* mov reg,cr */ @@ -6043,6 +6052,20 @@ x86_emulate( host_and_vcpu_must_have(sse3); goto simd_0f_xmm; + case X86EMUL_OPC_EVEX_F3(0x0f, 0x12): /* vmovsldup [xyz]mm/mem,[xyz]mm{k} */ + case X86EMUL_OPC_EVEX_F2(0x0f, 0x12): /* vmovddup [xyz]mm/mem,[xyz]mm{k} */ + case X86EMUL_OPC_EVEX_F3(0x0f, 0x16): /* vmovshdup [xyz]mm/mem,[xyz]mm{k} */ + generate_exception_if((evex.br || + evex.w != (evex.pfx & VEX_PREFIX_DOUBLE_MASK)), + EXC_UD); + host_and_vcpu_must_have(avx512f); + avx512_vlen_check(false); + d |= TwoOp; + op_bytes = !(evex.pfx & VEX_PREFIX_DOUBLE_MASK) || evex.lr + ? 16 << evex.lr : 8; + fault_suppression = false; + goto simd_zmm; + CASE_SIMD_PACKED_FP(_EVEX, 0x0f, 0x14): /* vunpcklp{s,d} [xyz]mm/mem,[xyz]mm,[xyz]mm{k} */ CASE_SIMD_PACKED_FP(_EVEX, 0x0f, 0x15): /* vunpckhp{s,d} [xyz]mm/mem,[xyz]mm,[xyz]mm{k} */ generate_exception_if(evex.w != (evex.pfx & VEX_PREFIX_DOUBLE_MASK), _______________________________________________ Xen-devel mailing list Xen-devel@xxxxxxxxxxxxxxxxxxxx https://lists.xenproject.org/mailman/listinfo/xen-devel
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |