[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH v9 03/23] x86emul: prepare for AVX512F S/G insns
They require getting modrm_reg and sib_index set correctly in the EVEX case, to account for the high 16 [XYZ]MM registers when used as addressing index register. Extend the adjustments to modrm_rm as well, such that x86_insn_modrm() would correctly report register numbers (this was a latent issue only as we don't currently have callers of that function which would care about an EVEX case). The adjustment in turn requires dropping the assertion from decode_gpr() as well as re-introducing the explicit masking, as we now need to actively mask off the high bit when a GPR is meant. _decode_gpr() invocations also need slight adjustments, when invoked in generic code ahead of the main switch(). All other uses of modrm_reg and modrm_rm already get suitably masked where necessary. Signed-off-by: Jan Beulich <jbeulich@xxxxxxxx> --- v9: New, split from main gather patch. --- a/xen/arch/x86/x86_emulate/x86_emulate.c +++ b/xen/arch/x86/x86_emulate/x86_emulate.c @@ -3022,7 +3022,8 @@ x86_decode( d &= ~ModRM; #undef ModRM /* Only its aliases are valid to use from here on. */ - modrm_reg = ((rex_prefix & 4) << 1) | ((modrm & 0x38) >> 3); + modrm_reg = ((rex_prefix & 4) << 1) | ((modrm & 0x38) >> 3) | + ((evex_encoded() && !evex.R) << 4); modrm_rm = modrm & 0x07; /* @@ -3192,7 +3193,8 @@ x86_decode( if ( modrm_mod == 3 ) { generate_exception_if(d & vSIB, EXC_UD); - modrm_rm |= (rex_prefix & 1) << 3; + modrm_rm |= ((rex_prefix & 1) << 3) | + (evex_encoded() && !evex.x) << 4; ea.type = OP_REG; } else if ( ad_bytes == 2 ) @@ -3257,7 +3259,10 @@ x86_decode( state->sib_index = ((sib >> 3) & 7) | ((rex_prefix << 2) & 8); state->sib_scale = (sib >> 6) & 3; - if ( state->sib_index != 4 && !(d & vSIB) ) + if ( unlikely(d & vSIB) ) + state->sib_index |= (mode_64bit() && evex_encoded() && + !evex.RX) << 4; + else if ( state->sib_index != 4 ) { ea.mem.off = *decode_gpr(state->regs, state->sib_index); ea.mem.off <<= state->sib_scale; @@ -3560,7 +3565,7 @@ x86_emulate( generate_exception_if(state->not_64bit && mode_64bit(), EXC_UD); if ( ea.type == OP_REG ) - ea.reg = _decode_gpr(&_regs, modrm_rm, (d & ByteOp) && !rex_prefix); + ea.reg = _decode_gpr(&_regs, modrm_rm, (d & ByteOp) && !rex_prefix && !vex.opcx); memset(mmvalp, 0xaa /* arbitrary */, sizeof(*mmvalp)); @@ -3574,7 +3579,7 @@ x86_emulate( src.type = OP_REG; if ( d & ByteOp ) { - src.reg = _decode_gpr(&_regs, modrm_reg, !rex_prefix); + src.reg = _decode_gpr(&_regs, modrm_reg, !rex_prefix && !vex.opcx); src.val = *(uint8_t *)src.reg; src.bytes = 1; } @@ -3681,7 +3686,7 @@ x86_emulate( dst.type = OP_REG; if ( d & ByteOp ) { - dst.reg = _decode_gpr(&_regs, modrm_reg, !rex_prefix); + dst.reg = _decode_gpr(&_regs, modrm_reg, !rex_prefix && !vex.opcx); dst.val = *(uint8_t *)dst.reg; dst.bytes = 1; } --- a/xen/arch/x86/x86_emulate/x86_emulate.h +++ b/xen/arch/x86/x86_emulate/x86_emulate.h @@ -662,8 +662,6 @@ static inline unsigned long *decode_gpr( BUILD_BUG_ON(ARRAY_SIZE(cpu_user_regs_gpr_offsets) & (ARRAY_SIZE(cpu_user_regs_gpr_offsets) - 1)); - ASSERT(modrm < ARRAY_SIZE(cpu_user_regs_gpr_offsets)); - /* Note that this also acts as array_access_nospec() stand-in. */ modrm &= ARRAY_SIZE(cpu_user_regs_gpr_offsets) - 1; _______________________________________________ Xen-devel mailing list Xen-devel@xxxxxxxxxxxxxxxxxxxx https://lists.xenproject.org/mailman/listinfo/xen-devel
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |