[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Xen-devel] [PATCH v3 02/18] x86emul: support most memory accessing MMX/SSE{, 2, 3} insns



On 15/02/17 11:07, Jan Beulich wrote:
> --- a/xen/arch/x86/x86_emulate/x86_emulate.c
> +++ b/xen/arch/x86/x86_emulate/x86_emulate.c
> @@ -45,6 +45,8 @@
>  #define ModRM       (1<<6)
>  /* Destination is only written; never read. */
>  #define Mov         (1<<7)
> +/* VEX/EVEX (SIMD only): 2nd source operand unused (must be all ones) */
> +#define TwoOp       Mov

Is this safe?  It looks overloaded to me.  The Mov behaviour is still
applicable even with TwoOp VEX/EVEX encodings.

>  /* All operands are implicit in the opcode. */
>  #define ImplicitOps (DstImplicit|SrcImplicit)
>  
> @@ -180,8 +182,44 @@ static const opcode_desc_t opcode_table[
>      ImplicitOps, ImplicitOps, ByteOp|DstMem|SrcNone|ModRM, 
> DstMem|SrcNone|ModRM
>  };
>  
> +enum simd_opsize {
> +    simd_none,

Please can we have newlines here,

> +    /*
> +     * Ordinary packed integers:
> +     * - 64 bits without prefix 66 (MMX)
> +     * - 128 bits with prefix 66 (SSEn)
> +     * - 128/256 bits depending on VEX.L (AVX)
> +     */

and here, etc, to help identify which comment is attached to which enum.

> +    simd_packed_int,
> +    /*
> +     * Ordinary packed/scalar floating point:
> +     * - 128 bits without prefix or with prefix 66 (SSEn)
> +     * - 128/256 bits depending on VEX.L (AVX)
> +     * - 32 bits with prefix F3 (scalar single)
> +     * - 64 bits with prefix F2 (scalar doubgle)
> +     */
> +    simd_any_fp,
> +    /*
> +     * Packed floating point:
> +     * - 128 bits without prefix or with prefix 66 (SSEn)
> +     * - 128/256 bits depending on VEX.L (AVX)
> +     */
> +    simd_packed_fp,
> +    /*
> +     * Single precision packed/scalar floating point:
> +     * - 128 bits without prefix (SSEn)
> +     * - 128/256 bits depending on VEX.L, no prefix (AVX)
> +     * - 32 bits with prefix F3 (scalar)
> +     */
> +    simd_single_fp,
> +    /* Operand size encoded in non-standard way. */
> +    simd_other

,

> +};
> +typedef uint8_t simd_opsize_t;
> +
>  static const struct {
>      opcode_desc_t desc;
> +    simd_opsize_t size;
>  } twobyte_table[256] = {
>      [0x00] = { ModRM },
>      [0x01] = { ImplicitOps|ModRM },
> @@ -196,22 +234,41 @@ static const struct {
>      [0x0d] = { ImplicitOps|ModRM },
>      [0x0e] = { ImplicitOps },
>      [0x0f] = { ModRM|SrcImmByte },
> -    [0x10 ... 0x1f] = { ImplicitOps|ModRM },
> +    [0x10] = { DstImplicit|SrcMem|ModRM|Mov, simd_any_fp },
> +    [0x11] = { DstMem|SrcImplicit|ModRM|Mov, simd_any_fp },
> +    [0x12 ... 0x13] = { ImplicitOps|ModRM },
> +    [0x14 ... 0x15] = { DstImplicit|SrcMem|ModRM, simd_packed_fp },
> +    [0x16 ... 0x1f] = { ImplicitOps|ModRM },
>      [0x20 ... 0x21] = { DstMem|SrcImplicit|ModRM },
>      [0x22 ... 0x23] = { DstImplicit|SrcMem|ModRM },
> -    [0x28 ... 0x2f] = { ImplicitOps|ModRM },
> +    [0x28] = { DstImplicit|SrcMem|ModRM|Mov, simd_packed_fp },
> +    [0x29] = { DstMem|SrcImplicit|ModRM|Mov, simd_packed_fp },
> +    [0x2a] = { ImplicitOps|ModRM },
> +    [0x2b] = { DstMem|SrcImplicit|ModRM|Mov, simd_any_fp },
> +    [0x2c ... 0x2f] = { ImplicitOps|ModRM },
>      [0x30 ... 0x35] = { ImplicitOps },
>      [0x37] = { ImplicitOps },
>      [0x38] = { DstReg|SrcMem|ModRM },
>      [0x3a] = { DstReg|SrcImmByte|ModRM },
>      [0x40 ... 0x4f] = { DstReg|SrcMem|ModRM|Mov },
> -    [0x50 ... 0x6e] = { ModRM },
> -    [0x6f] = { ImplicitOps|ModRM },
> -    [0x70 ... 0x73] = { SrcImmByte|ModRM },
> -    [0x74 ... 0x76] = { ModRM },
> -    [0x77] = { ImplicitOps },
> +    [0x50] = { ModRM },
> +    [0x51] = { DstImplicit|SrcMem|ModRM|TwoOp, simd_any_fp },
> +    [0x52 ... 0x53] = { DstImplicit|SrcMem|ModRM|TwoOp, simd_single_fp },

RCPPS/RCPSS all have 3 operands.  Why is TwoOp used here?

> +    [0x54 ... 0x57] = { DstImplicit|SrcMem|ModRM, simd_packed_fp },
> +    [0x58 ... 0x59] = { DstImplicit|SrcMem|ModRM, simd_any_fp },
> +    [0x5a ... 0x5b] = { ModRM },
> +    [0x5c ... 0x5f] = { DstImplicit|SrcMem|ModRM, simd_any_fp },
> +    [0x60 ... 0x62] = { DstImplicit|SrcMem|ModRM, simd_other },
> +    [0x63 ... 0x67] = { DstImplicit|SrcMem|ModRM, simd_packed_int },
> +    [0x68 ... 0x6a] = { DstImplicit|SrcMem|ModRM, simd_other },
> +    [0x6b ... 0x6d] = { DstImplicit|SrcMem|ModRM, simd_packed_int },
> +    [0x6e ... 0x6f] = { ImplicitOps|ModRM },
> +    [0x70] = { SrcImmByte|ModRM|TwoOp, simd_other },
> +    [0x71 ... 0x73] = { SrcImmByte|ModRM },
> +    [0x74 ... 0x76] = { DstImplicit|SrcMem|ModRM, simd_packed_int },
> +    [0x77] = { DstImplicit|SrcNone },
>      [0x78 ... 0x79] = { ModRM },
> -    [0x7c ... 0x7d] = { ModRM },
> +    [0x7c ... 0x7d] = { DstImplicit|SrcMem|ModRM, simd_other },
>      [0x7e ... 0x7f] = { ImplicitOps|ModRM },
>      [0x80 ... 0x8f] = { DstImplicit|SrcImm },
>      [0x90 ... 0x9f] = { ByteOp|DstMem|SrcNone|ModRM|Mov },
> @@ -2601,13 +2692,53 @@ x86_decode(
>          ea.mem.off = truncate_ea(ea.mem.off);
>      }
>  
> -    /*
> -     * When prefix 66 has a meaning different from operand-size override,
> -     * operand size defaults to 4 and can't be overridden to 2.
> -     */
> -    if ( op_bytes == 2 &&
> -         (ctxt->opcode & X86EMUL_OPC_PFX_MASK) == X86EMUL_OPC_66(0, 0) )
> -        op_bytes = 4;
> +    switch ( state->simd_size )
> +    {
> +    case simd_none:
> +        /*
> +         * When prefix 66 has a meaning different from operand-size override,
> +         * operand size defaults to 4 and can't be overridden to 2.
> +         */
> +        if ( op_bytes == 2 &&
> +             (ctxt->opcode & X86EMUL_OPC_PFX_MASK) == X86EMUL_OPC_66(0, 0) )
> +            op_bytes = 4;
> +        break;
> +
> +    case simd_packed_int:
> +        switch ( vex.pfx )
> +        {
> +        case vex_none: op_bytes = 8;           break;
> +        case vex_66:   op_bytes = 16 << vex.l; break;
> +        default:       op_bytes = 0;           break;
> +        }
> +        break;
> +
> +    case simd_single_fp:
> +        if ( vex.pfx & VEX_PREFIX_DOUBLE_MASK )

This logic would be far easier to follow by using vex.pfx == vex_66 ||
vex.pfx == vex_f2.

> +        {
> +            op_bytes = 0;
> +            break;
> +    case simd_packed_fp:
> +            if ( vex.pfx & VEX_PREFIX_SCALAR_MASK )

Similarly here, vex_none || vex_f3

Having said that, taking VSHUFPS (0xc6) as example of simd_packed_fp,
this instruction is defined for vex_none and vex_66, both of which have
op_bytes of 16 when not vex encoded.

> @@ -5020,116 +5159,117 @@ x86_emulate(
>      case X86EMUL_OPC(0x0f, 0x19) ... X86EMUL_OPC(0x0f, 0x1f): /* nop */
>          break;
>  
> -    case X86EMUL_OPC(0x0f, 0x2b):        /* movntps xmm,m128 */
> -    case X86EMUL_OPC_VEX(0x0f, 0x2b):    /* vmovntps xmm,m128 */
> -                                         /* vmovntps ymm,m256 */
> -    case X86EMUL_OPC_66(0x0f, 0x2b):     /* movntpd xmm,m128 */
> -    case X86EMUL_OPC_VEX_66(0x0f, 0x2b): /* vmovntpd xmm,m128 */
> -                                         /* vmovntpd ymm,m256 */
> -        fail_if(ea.type != OP_MEM);
> -        /* fall through */
> -    case X86EMUL_OPC(0x0f, 0x28):        /* movaps xmm/m128,xmm */
> -    case X86EMUL_OPC_VEX(0x0f, 0x28):    /* vmovaps xmm/m128,xmm */
> -                                         /* vmovaps ymm/m256,ymm */
> -    case X86EMUL_OPC_66(0x0f, 0x28):     /* movapd xmm/m128,xmm */
> -    case X86EMUL_OPC_VEX_66(0x0f, 0x28): /* vmovapd xmm/m128,xmm */
> -                                         /* vmovapd ymm/m256,ymm */
> -    case X86EMUL_OPC(0x0f, 0x29):        /* movaps xmm,xmm/m128 */
> -    case X86EMUL_OPC_VEX(0x0f, 0x29):    /* vmovaps xmm,xmm/m128 */
> -                                         /* vmovaps ymm,ymm/m256 */
> -    case X86EMUL_OPC_66(0x0f, 0x29):     /* movapd xmm,xmm/m128 */
> -    case X86EMUL_OPC_VEX_66(0x0f, 0x29): /* vmovapd xmm,xmm/m128 */
> -                                         /* vmovapd ymm,ymm/m256 */
> -    case X86EMUL_OPC(0x0f, 0x10):        /* movups xmm/m128,xmm */
> -    case X86EMUL_OPC_VEX(0x0f, 0x10):    /* vmovups xmm/m128,xmm */
> -                                         /* vmovups ymm/m256,ymm */
> -    case X86EMUL_OPC_66(0x0f, 0x10):     /* movupd xmm/m128,xmm */
> -    case X86EMUL_OPC_VEX_66(0x0f, 0x10): /* vmovupd xmm/m128,xmm */
> -                                         /* vmovupd ymm/m256,ymm */
> -    case X86EMUL_OPC_F3(0x0f, 0x10):     /* movss xmm/m32,xmm */
> -    case X86EMUL_OPC_VEX_F3(0x0f, 0x10): /* vmovss xmm/m32,xmm */
> -    case X86EMUL_OPC_F2(0x0f, 0x10):     /* movsd xmm/m64,xmm */
> -    case X86EMUL_OPC_VEX_F2(0x0f, 0x10): /* vmovsd xmm/m64,xmm */
> -    case X86EMUL_OPC(0x0f, 0x11):        /* movups xmm,xmm/m128 */
> -    case X86EMUL_OPC_VEX(0x0f, 0x11):    /* vmovups xmm,xmm/m128 */
> -                                         /* vmovups ymm,ymm/m256 */
> -    case X86EMUL_OPC_66(0x0f, 0x11):     /* movupd xmm,xmm/m128 */
> -    case X86EMUL_OPC_VEX_66(0x0f, 0x11): /* vmovupd xmm,xmm/m128 */
> -                                         /* vmovupd ymm,ymm/m256 */
> -    case X86EMUL_OPC_F3(0x0f, 0x11):     /* movss xmm,xmm/m32 */
> -    case X86EMUL_OPC_VEX_F3(0x0f, 0x11): /* vmovss xmm,xmm/m32 */
> -    case X86EMUL_OPC_F2(0x0f, 0x11):     /* movsd xmm,xmm/m64 */
> -    case X86EMUL_OPC_VEX_F2(0x0f, 0x11): /* vmovsd xmm,xmm/m64 */
> -    {
> -        uint8_t *buf = get_stub(stub);
> +#define CASE_SIMD_PACKED_INT(pfx, opc)       \
> +    case X86EMUL_OPC(pfx, opc):              \
> +    case X86EMUL_OPC_66(pfx, opc)
> +#define CASE_SIMD_SINGLE_FP(kind, pfx, opc)  \
> +    case X86EMUL_OPC##kind(pfx, opc):        \
> +    case X86EMUL_OPC##kind##_F3(pfx, opc)
> +#define CASE_SIMD_DOUBLE_FP(kind, pfx, opc)  \
> +    case X86EMUL_OPC##kind##_66(pfx, opc):   \
> +    case X86EMUL_OPC##kind##_F2(pfx, opc)
> +#define CASE_SIMD_ALL_FP(kind, pfx, opc)     \
> +    CASE_SIMD_SINGLE_FP(kind, pfx, opc):     \
> +    CASE_SIMD_DOUBLE_FP(kind, pfx, opc)
> +#define CASE_SIMD_PACKED_FP(kind, pfx, opc)  \
> +    case X86EMUL_OPC##kind(pfx, opc):        \
> +    case X86EMUL_OPC##kind##_66(pfx, opc)
> +#define CASE_SIMD_SCALAR_FP(kind, pfx, opc)  \
> +    case X86EMUL_OPC##kind##_F3(pfx, opc):   \
> +    case X86EMUL_OPC##kind##_F2(pfx, opc)
>  
> -        fic.insn_bytes = 5;
> -        buf[0] = 0x3e;
> -        buf[1] = 0x3e;
> -        buf[2] = 0x0f;
> -        buf[3] = b;
> -        buf[4] = modrm;
> -        buf[5] = 0xc3;
> +    CASE_SIMD_SCALAR_FP(, 0x0f, 0x2b):     /* movnts{s,d} xmm,mem */
> +        host_and_vcpu_must_have(sse4a);
> +        /* fall through */
> +    CASE_SIMD_PACKED_FP(, 0x0f, 0x2b):     /* movntp{s,d} xmm,m128 */
> +    CASE_SIMD_PACKED_FP(_VEX, 0x0f, 0x2b): /* vmovntp{s,d} {x,y}mm,mem */
> +        generate_exception_if(ea.type != OP_MEM, EXC_UD);
> +        sfence = true;

Why do we need to emit an sfence at this point?  The software hitting
this emulation is the entity which should be making sfence decisions.

> +        /* fall through */
> +    CASE_SIMD_ALL_FP(, 0x0f, 0x10):        /* mov{up,s}{s,d} xmm/mem,xmm */
> +    CASE_SIMD_PACKED_FP(_VEX, 0x0f, 0x10): /* vmovup{s,d} 
> {x,y}mm/mem,{x,y}mm */
> +    CASE_SIMD_SCALAR_FP(_VEX, 0x0f, 0x10): /* vmovs{s,d} mem,xmm */
> +                                           /* vmovs{s,d} xmm,xmm,xmm */
> +    CASE_SIMD_ALL_FP(, 0x0f, 0x11):        /* mov{up,s}{s,d} xmm,xmm/mem */
> +    CASE_SIMD_PACKED_FP(_VEX, 0x0f, 0x11): /* vmovup{s,d} 
> {x,y}mm,{x,y}mm/mem */
> +    CASE_SIMD_SCALAR_FP(_VEX, 0x0f, 0x11): /* vmovs{s,d} xmm,mem */
> +                                           /* vmovs{s,d} xmm,xmm,xmm */
> +    CASE_SIMD_PACKED_FP(, 0x0f, 0x14):     /* unpcklp{s,d} xmm/m128,xmm */
> +    CASE_SIMD_PACKED_FP(_VEX, 0x0f, 0x14): /* vunpcklp{s,d} 
> {x,y}mm/mem,{x,y}mm,{x,y}mm */
> +    CASE_SIMD_PACKED_FP(, 0x0f, 0x15):     /* unpckhp{s,d} xmm/m128,xmm */
> +    CASE_SIMD_PACKED_FP(_VEX, 0x0f, 0x15): /* vunpckhp{s,d} 
> {x,y}mm/mem,{x,y}mm,{x,y}mm */
> +    CASE_SIMD_PACKED_FP(, 0x0f, 0x28):     /* movap{s,d} xmm/m128,xmm */
> +    CASE_SIMD_PACKED_FP(_VEX, 0x0f, 0x28): /* vmovap{s,d} 
> {x,y}mm/mem,{x,y}mm */
> +    CASE_SIMD_PACKED_FP(, 0x0f, 0x29):     /* movap{s,d} xmm,xmm/m128 */
> +    CASE_SIMD_PACKED_FP(_VEX, 0x0f, 0x29): /* vmovap{s,d} 
> {x,y}mm,{x,y}mm/mem */
> +    CASE_SIMD_ALL_FP(, 0x0f, 0x51):        /* sqrt{p,s}{s,d} xmm/mem,xmm */
> +    CASE_SIMD_ALL_FP(_VEX, 0x0f, 0x51):    /* vsqrtp{s,d} 
> {x,y}mm/mem,{x,y}mm */
> +                                           /* vsqrts{s,d} xmm/m32,xmm,xmm */
> +    CASE_SIMD_SINGLE_FP(, 0x0f, 0x52):     /* rsqrt{p,s}s xmm/mem,xmm */
> +    CASE_SIMD_SINGLE_FP(_VEX, 0x0f, 0x52): /* vrsqrtps {x,y}mm/mem,{x,y}mm */
> +                                           /* vrsqrtss xmm/m32,xmm,xmm */
> +    CASE_SIMD_SINGLE_FP(, 0x0f, 0x53):     /* rcp{p,s}s xmm/mem,xmm */
> +    CASE_SIMD_SINGLE_FP(_VEX, 0x0f, 0x53): /* vrcpps {x,y}mm/mem,{x,y}mm */
> +                                           /* vrcpss xmm/m32,xmm,xmm */
> +    CASE_SIMD_PACKED_FP(, 0x0f, 0x54):     /* andp{s,d} xmm/m128,xmm */
> +    CASE_SIMD_PACKED_FP(_VEX, 0x0f, 0x54): /* vandp{s,d} 
> {x,y}mm/mem,{x,y}mm,{x,y}mm */
> +    CASE_SIMD_PACKED_FP(, 0x0f, 0x55):     /* andnp{s,d} xmm/m128,xmm */
> +    CASE_SIMD_PACKED_FP(_VEX, 0x0f, 0x55): /* vandnp{s,d} 
> {x,y}mm/mem,{x,y}mm,{x,y}mm */
> +    CASE_SIMD_PACKED_FP(, 0x0f, 0x56):     /* orp{s,d} xmm/m128,xmm */
> +    CASE_SIMD_PACKED_FP(_VEX, 0x0f, 0x56): /* vorp{s,d} 
> {x,y}mm/mem,{x,y}mm,{x,y}mm */
> +    CASE_SIMD_PACKED_FP(, 0x0f, 0x57):     /* xorp{s,d} xmm/m128,xmm */
> +    CASE_SIMD_PACKED_FP(_VEX, 0x0f, 0x57): /* vxorp{s,d} 
> {x,y}mm/mem,{x,y}mm,{x,y}mm */
> +    CASE_SIMD_ALL_FP(, 0x0f, 0x58):        /* add{p,s}{s,d} xmm/mem,xmm */
> +    CASE_SIMD_ALL_FP(_VEX, 0x0f, 0x58):    /* vadd{p,s}{s,d} 
> {x,y}mm/mem,{x,y}mm,{x,y}mm */
> +    CASE_SIMD_ALL_FP(, 0x0f, 0x59):        /* mul{p,s}{s,d} xmm/mem,xmm */
> +    CASE_SIMD_ALL_FP(_VEX, 0x0f, 0x59):    /* vmul{p,s}{s,d} 
> {x,y}mm/mem,{x,y}mm,{x,y}mm */
> +    CASE_SIMD_ALL_FP(, 0x0f, 0x5c):        /* sub{p,s}{s,d} xmm/mem,xmm */
> +    CASE_SIMD_ALL_FP(_VEX, 0x0f, 0x5c):    /* vsub{p,s}{s,d} 
> {x,y}mm/mem,{x,y}mm,{x,y}mm */
> +    CASE_SIMD_ALL_FP(, 0x0f, 0x5d):        /* min{p,s}{s,d} xmm/mem,xmm */
> +    CASE_SIMD_ALL_FP(_VEX, 0x0f, 0x5d):    /* vmin{p,s}{s,d} 
> {x,y}mm/mem,{x,y}mm,{x,y}mm */
> +    CASE_SIMD_ALL_FP(, 0x0f, 0x5e):        /* div{p,s}{s,d} xmm/mem,xmm */
> +    CASE_SIMD_ALL_FP(_VEX, 0x0f, 0x5e):    /* vdiv{p,s}{s,d} 
> {x,y}mm/mem,{x,y}mm,{x,y}mm */
> +    CASE_SIMD_ALL_FP(, 0x0f, 0x5f):        /* max{p,s}{s,d} xmm/mem,xmm */
> +    CASE_SIMD_ALL_FP(_VEX, 0x0f, 0x5f):    /* vmax{p,s}{s,d} 
> {x,y}mm/mem,{x,y}mm,{x,y}mm */
>          if ( vex.opcx == vex_none )
>          {
>              if ( vex.pfx & VEX_PREFIX_DOUBLE_MASK )
> +            {
> +    simd_0f_sse2:
>                  vcpu_must_have(sse2);
> +            }
>              else
>                  vcpu_must_have(sse);
> -            ea.bytes = 16;
> -            SET_SSE_PREFIX(buf[0], vex.pfx);
> +    simd_0f_xmm:
>              get_fpu(X86EMUL_FPU_xmm, &fic);
>          }
>          else
>          {
> -            fail_if((vex.reg != 0xf) &&
> -                    ((ea.type == OP_MEM) ||
> -                     !(vex.pfx & VEX_PREFIX_SCALAR_MASK)));
> +            /* vmovs{s,d} to/from memory have only two operands. */
> +            if ( (b & ~1) == 0x10 && ea.type == OP_MEM )
> +                d |= TwoOp;
> +    simd_0f_avx:
>              host_and_vcpu_must_have(avx);
> +    simd_0f_ymm:
>              get_fpu(X86EMUL_FPU_ymm, &fic);
> -            ea.bytes = 16 << vex.l;
>          }
> -        if ( vex.pfx & VEX_PREFIX_SCALAR_MASK )
> -            ea.bytes = vex.pfx & VEX_PREFIX_DOUBLE_MASK ? 8 : 4;
> +    simd_0f_common:
> +    {
> +        uint8_t *buf = get_stub(stub);
> +
> +        buf[0] = 0x3e;
> +        buf[1] = 0x3e;
> +        buf[2] = 0x0f;
> +        buf[3] = b;
> +        buf[4] = modrm;
>          if ( ea.type == OP_MEM )
>          {
> -            uint32_t mxcsr = 0;
> -
> -            if ( b < 0x28 )
> -                mxcsr = MXCSR_MM;
> -            else if ( vcpu_has_misalignsse() )
> -                asm ( "stmxcsr %0" : "=m" (mxcsr) );
> -            generate_exception_if(!(mxcsr & MXCSR_MM) &&
> -                                  !is_aligned(ea.mem.seg, ea.mem.off, 
> ea.bytes,
> -                                              ctxt, ops),
> -                                  EXC_GP, 0);
> -            if ( !(b & 1) )
> -                rc = ops->read(ea.mem.seg, ea.mem.off+0, mmvalp,
> -                               ea.bytes, ctxt);
> -            else
> -                fail_if(!ops->write); /* Check before running the stub. */
>              /* convert memory operand to (%rAX) */
>              rex_prefix &= ~REX_B;
>              vex.b = 1;
>              buf[4] &= 0x38;
>          }
> -        if ( !rc )
> -        {
> -           copy_REX_VEX(buf, rex_prefix, vex);
> -           asm volatile ( "call *%0" : : "r" (stub.func), "a" (mmvalp)
> -                                     : "memory" );
> -        }
> -        put_fpu(&fic);
> -        put_stub(stub);
> -        if ( !rc && (b & 1) && (ea.type == OP_MEM) )
> -        {
> -            ASSERT(ops->write); /* See the fail_if() above. */
> -            rc = ops->write(ea.mem.seg, ea.mem.off, mmvalp,
> -                            ea.bytes, ctxt);
> -        }
> -        if ( rc )
> -            goto done;
> -        dst.type = OP_NONE;
> +        fic.insn_bytes = 5;
>          break;
>      }
>  
> @@ -6457,22 +6917,6 @@ x86_insn_is_mem_write(const struct x86_e
>      case 0x6c: case 0x6d:                /* INS */
>      case 0xa4: case 0xa5:                /* MOVS */
>      case 0xaa: case 0xab:                /* STOS */
> -    case X86EMUL_OPC(0x0f, 0x11):        /* MOVUPS */
> -    case X86EMUL_OPC_VEX(0x0f, 0x11):    /* VMOVUPS */
> -    case X86EMUL_OPC_66(0x0f, 0x11):     /* MOVUPD */
> -    case X86EMUL_OPC_VEX_66(0x0f, 0x11): /* VMOVUPD */
> -    case X86EMUL_OPC_F3(0x0f, 0x11):     /* MOVSS */
> -    case X86EMUL_OPC_VEX_F3(0x0f, 0x11): /* VMOVSS */
> -    case X86EMUL_OPC_F2(0x0f, 0x11):     /* MOVSD */
> -    case X86EMUL_OPC_VEX_F2(0x0f, 0x11): /* VMOVSD */
> -    case X86EMUL_OPC(0x0f, 0x29):        /* MOVAPS */
> -    case X86EMUL_OPC_VEX(0x0f, 0x29):    /* VMOVAPS */
> -    case X86EMUL_OPC_66(0x0f, 0x29):     /* MOVAPD */
> -    case X86EMUL_OPC_VEX_66(0x0f, 0x29): /* VMOVAPD */
> -    case X86EMUL_OPC(0x0f, 0x2b):        /* MOVNTPS */
> -    case X86EMUL_OPC_VEX(0x0f, 0x2b):    /* VMOVNTPS */
> -    case X86EMUL_OPC_66(0x0f, 0x2b):     /* MOVNTPD */
> -    case X86EMUL_OPC_VEX_66(0x0f, 0x2b): /* VMOVNTPD */

Where have these gone?

~Andrew

>      case X86EMUL_OPC(0x0f, 0x7e):        /* MOVD/MOVQ */
>      case X86EMUL_OPC_66(0x0f, 0x7e):     /* MOVD/MOVQ */
>      case X86EMUL_OPC_VEX_66(0x0f, 0x7e): /* VMOVD/VMOVQ */


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
https://lists.xen.org/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.