[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-changelog] [xen master] x86/HVM: restrict permitted instructions during special purpose emulation
commit 3dd00f7b566b09c7c9fe4af3caf3085b0cd53aab Author: Jan Beulich <jbeulich@xxxxxxxx> AuthorDate: Wed Jan 11 13:40:49 2017 +0100 Commit: Jan Beulich <jbeulich@xxxxxxxx> CommitDate: Wed Jan 11 13:40:49 2017 +0100 x86/HVM: restrict permitted instructions during special purpose emulation Most invocations of the instruction emulator are for VM exits where the set of legitimate instructions (i.e. ones capable of causing the respective exit) is rather small. Restrict the permitted sets via a new callback, at once eliminating the abuse of handle_mmio() for non-MMIO operations. A seemingly unrelated comment adjustment is being done here to keep x86_emulate() in sync with x86_insn_is_mem_write() (in the context of which this was found to be wrong). Signed-off-by: Jan Beulich <jbeulich@xxxxxxxx> Reviewed-by: Andrew Cooper <andrew.cooper3@xxxxxxxxxx> Reviewed-by: Paul Durrant <paul.durrant@xxxxxxxxxx> Reviewed-by: Boris Ostrovsky <boris.ostrovsky@xxxxxxxxxx> Reviewed-by: Kevin Tian <kevin.tian@xxxxxxxxx> --- xen/arch/x86/hvm/emulate.c | 19 +++- xen/arch/x86/hvm/hvm.c | 16 ++- xen/arch/x86/hvm/io.c | 4 +- xen/arch/x86/hvm/ioreq.c | 2 +- xen/arch/x86/hvm/svm/emulate.c | 2 +- xen/arch/x86/hvm/svm/svm.c | 16 ++- xen/arch/x86/hvm/vmx/realmode.c | 2 +- xen/arch/x86/hvm/vmx/vmx.c | 2 +- xen/arch/x86/x86_emulate/x86_emulate.c | 182 ++++++++++++++++++++++++++++++++- xen/arch/x86/x86_emulate/x86_emulate.h | 12 +++ xen/include/asm-x86/hvm/emulate.h | 17 +++ xen/include/asm-x86/hvm/io.h | 1 - 12 files changed, 261 insertions(+), 14 deletions(-) diff --git a/xen/arch/x86/hvm/emulate.c b/xen/arch/x86/hvm/emulate.c index ecbac4b..e22740f 100644 --- a/xen/arch/x86/hvm/emulate.c +++ b/xen/arch/x86/hvm/emulate.c @@ -1039,6 +1039,17 @@ static int hvmemul_cmpxchg( return hvmemul_write(seg, offset, p_new, bytes, ctxt); } +static int hvmemul_validate( + const struct x86_emulate_state *state, + struct x86_emulate_ctxt *ctxt) +{ + const struct hvm_emulate_ctxt *hvmemul_ctxt = + container_of(ctxt, struct hvm_emulate_ctxt, ctxt); + + return !hvmemul_ctxt->validate || hvmemul_ctxt->validate(state, ctxt) + ? X86EMUL_OKAY : X86EMUL_UNHANDLEABLE; +} + static int hvmemul_rep_ins( uint16_t src_port, enum x86_segment dst_seg, @@ -1660,6 +1671,7 @@ static const struct x86_emulate_ops hvm_emulate_ops = { .insn_fetch = hvmemul_insn_fetch, .write = hvmemul_write, .cmpxchg = hvmemul_cmpxchg, + .validate = hvmemul_validate, .rep_ins = hvmemul_rep_ins, .rep_outs = hvmemul_rep_outs, .rep_movs = hvmemul_rep_movs, @@ -1805,7 +1817,8 @@ int hvm_emulate_one_mmio(unsigned long mfn, unsigned long gla) else ops = &hvm_ro_emulate_ops_mmio; - hvm_emulate_init_once(&ctxt, guest_cpu_user_regs()); + hvm_emulate_init_once(&ctxt, x86_insn_is_mem_write, + guest_cpu_user_regs()); ctxt.ctxt.data = &mmio_ro_ctxt; rc = _hvm_emulate_one(&ctxt, ops); switch ( rc ) @@ -1830,7 +1843,7 @@ void hvm_emulate_one_vm_event(enum emul_kind kind, unsigned int trapnr, struct hvm_emulate_ctxt ctx = {{ 0 }}; int rc; - hvm_emulate_init_once(&ctx, guest_cpu_user_regs()); + hvm_emulate_init_once(&ctx, NULL, guest_cpu_user_regs()); switch ( kind ) { @@ -1884,6 +1897,7 @@ void hvm_emulate_one_vm_event(enum emul_kind kind, unsigned int trapnr, void hvm_emulate_init_once( struct hvm_emulate_ctxt *hvmemul_ctxt, + hvm_emulate_validate_t *validate, struct cpu_user_regs *regs) { struct vcpu *curr = current; @@ -1894,6 +1908,7 @@ void hvm_emulate_init_once( hvmemul_get_seg_reg(x86_seg_cs, hvmemul_ctxt); hvmemul_get_seg_reg(x86_seg_ss, hvmemul_ctxt); + hvmemul_ctxt->validate = validate; hvmemul_ctxt->ctxt.regs = regs; hvmemul_ctxt->ctxt.vendor = curr->domain->arch.x86_vendor; hvmemul_ctxt->ctxt.force_writeback = true; diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c index e11fba8..2ec0800 100644 --- a/xen/arch/x86/hvm/hvm.c +++ b/xen/arch/x86/hvm/hvm.c @@ -3601,6 +3601,20 @@ gp_fault: return X86EMUL_EXCEPTION; } +static bool is_cross_vendor(const struct x86_emulate_state *state, + const struct x86_emulate_ctxt *ctxt) +{ + switch ( ctxt->opcode ) + { + case X86EMUL_OPC(0x0f, 0x05): /* syscall */ + case X86EMUL_OPC(0x0f, 0x34): /* sysenter */ + case X86EMUL_OPC(0x0f, 0x35): /* sysexit */ + return true; + } + + return false; +} + void hvm_ud_intercept(struct cpu_user_regs *regs) { struct vcpu *cur = current; @@ -3608,7 +3622,7 @@ void hvm_ud_intercept(struct cpu_user_regs *regs) cur->domain->arch.x86_vendor != boot_cpu_data.x86_vendor; struct hvm_emulate_ctxt ctxt; - hvm_emulate_init_once(&ctxt, regs); + hvm_emulate_init_once(&ctxt, opt_hvm_fep ? NULL : is_cross_vendor, regs); if ( opt_hvm_fep ) { diff --git a/xen/arch/x86/hvm/io.c b/xen/arch/x86/hvm/io.c index 256408b..8c02e82 100644 --- a/xen/arch/x86/hvm/io.c +++ b/xen/arch/x86/hvm/io.c @@ -78,7 +78,7 @@ void send_invalidate_req(void) gprintk(XENLOG_ERR, "Unsuccessful map-cache invalidate\n"); } -bool handle_mmio(void) +bool hvm_emulate_one_insn(hvm_emulate_validate_t *validate) { struct hvm_emulate_ctxt ctxt; struct vcpu *curr = current; @@ -87,7 +87,7 @@ bool handle_mmio(void) ASSERT(!is_pvh_vcpu(curr)); - hvm_emulate_init_once(&ctxt, guest_cpu_user_regs()); + hvm_emulate_init_once(&ctxt, validate, guest_cpu_user_regs()); rc = hvm_emulate_one(&ctxt); diff --git a/xen/arch/x86/hvm/ioreq.c b/xen/arch/x86/hvm/ioreq.c index 673a215..2830f6c 100644 --- a/xen/arch/x86/hvm/ioreq.c +++ b/xen/arch/x86/hvm/ioreq.c @@ -168,7 +168,7 @@ bool_t handle_hvm_io_completion(struct vcpu *v) { struct hvm_emulate_ctxt ctxt; - hvm_emulate_init_once(&ctxt, guest_cpu_user_regs()); + hvm_emulate_init_once(&ctxt, NULL, guest_cpu_user_regs()); vmx_realmode_emulate_one(&ctxt); hvm_emulate_writeback(&ctxt); diff --git a/xen/arch/x86/hvm/svm/emulate.c b/xen/arch/x86/hvm/svm/emulate.c index 9cdbe9e..e1a1581 100644 --- a/xen/arch/x86/hvm/svm/emulate.c +++ b/xen/arch/x86/hvm/svm/emulate.c @@ -107,7 +107,7 @@ int __get_instruction_length_from_list(struct vcpu *v, #endif ASSERT(v == current); - hvm_emulate_init_once(&ctxt, guest_cpu_user_regs()); + hvm_emulate_init_once(&ctxt, NULL, guest_cpu_user_regs()); hvm_emulate_init_per_insn(&ctxt, NULL, 0); state = x86_decode_insn(&ctxt.ctxt, hvmemul_insn_fetch); if ( IS_ERR_OR_NULL(state) ) diff --git a/xen/arch/x86/hvm/svm/svm.c b/xen/arch/x86/hvm/svm/svm.c index 86dcffc..ae8e2c4 100644 --- a/xen/arch/x86/hvm/svm/svm.c +++ b/xen/arch/x86/hvm/svm/svm.c @@ -2156,6 +2156,16 @@ static void svm_invlpg_intercept(unsigned long vaddr) paging_invlpg(current, vaddr); } +static bool is_invlpg(const struct x86_emulate_state *state, + const struct x86_emulate_ctxt *ctxt) +{ + unsigned int ext; + + return ctxt->opcode == X86EMUL_OPC(0x0f, 0x01) && + x86_insn_modrm(state, NULL, &ext) != 3 && + (ext & 7) == 7; +} + static void svm_invlpg(struct vcpu *v, unsigned long vaddr) { svm_asid_g_invlpg(v, vaddr); @@ -2501,7 +2511,7 @@ void svm_vmexit_handler(struct cpu_user_regs *regs) if ( handle_pio(port, bytes, dir) ) __update_guest_eip(regs, vmcb->exitinfo2 - vmcb->rip); } - else if ( !handle_mmio() ) + else if ( !hvm_emulate_one_insn(x86_insn_is_portio) ) hvm_inject_hw_exception(TRAP_gp_fault, 0); break; @@ -2509,7 +2519,7 @@ void svm_vmexit_handler(struct cpu_user_regs *regs) case VMEXIT_CR0_WRITE ... VMEXIT_CR15_WRITE: if ( cpu_has_svm_decode && (vmcb->exitinfo1 & (1ULL << 63)) ) svm_vmexit_do_cr_access(vmcb, regs); - else if ( !handle_mmio() ) + else if ( !hvm_emulate_one_insn(x86_insn_is_cr_access) ) hvm_inject_hw_exception(TRAP_gp_fault, 0); break; @@ -2519,7 +2529,7 @@ void svm_vmexit_handler(struct cpu_user_regs *regs) svm_invlpg_intercept(vmcb->exitinfo1); __update_guest_eip(regs, vmcb->nextrip - vmcb->rip); } - else if ( !handle_mmio() ) + else if ( !hvm_emulate_one_insn(is_invlpg) ) hvm_inject_hw_exception(TRAP_gp_fault, 0); break; diff --git a/xen/arch/x86/hvm/vmx/realmode.c b/xen/arch/x86/hvm/vmx/realmode.c index 0bf70bd..7ef4e45 100644 --- a/xen/arch/x86/hvm/vmx/realmode.c +++ b/xen/arch/x86/hvm/vmx/realmode.c @@ -172,7 +172,7 @@ void vmx_realmode(struct cpu_user_regs *regs) if ( intr_info & INTR_INFO_VALID_MASK ) __vmwrite(VM_ENTRY_INTR_INFO, 0); - hvm_emulate_init_once(&hvmemul_ctxt, regs); + hvm_emulate_init_once(&hvmemul_ctxt, NULL, regs); /* Only deliver interrupts into emulated real mode. */ if ( !(curr->arch.hvm_vcpu.guest_cr[0] & X86_CR0_PE) && diff --git a/xen/arch/x86/hvm/vmx/vmx.c b/xen/arch/x86/hvm/vmx/vmx.c index 0fe179e..61925cf 100644 --- a/xen/arch/x86/hvm/vmx/vmx.c +++ b/xen/arch/x86/hvm/vmx/vmx.c @@ -3717,7 +3717,7 @@ void vmx_vmexit_handler(struct cpu_user_regs *regs) { /* INS, OUTS */ if ( unlikely(is_pvh_vcpu(v)) /* PVH fixme */ || - !handle_mmio() ) + !hvm_emulate_one_insn(x86_insn_is_portio) ) hvm_inject_hw_exception(TRAP_gp_fault, 0); } else diff --git a/xen/arch/x86/x86_emulate/x86_emulate.c b/xen/arch/x86/x86_emulate/x86_emulate.c index 7cea64a..e3ec313 100644 --- a/xen/arch/x86/x86_emulate/x86_emulate.c +++ b/xen/arch/x86/x86_emulate/x86_emulate.c @@ -3678,7 +3678,7 @@ x86_emulate( emulate_fpu_insn_memsrc("flds", src.val); dst.type = OP_NONE; break; - case 2: /* fstp m32fp */ + case 2: /* fst m32fp */ emulate_fpu_insn_memdst("fsts", dst.val); dst.bytes = 4; break; @@ -5928,6 +5928,186 @@ x86_insn_operand_ea(const struct x86_emulate_state *state, return state->ea.mem.off; } +bool +x86_insn_is_mem_access(const struct x86_emulate_state *state, + const struct x86_emulate_ctxt *ctxt) +{ + if ( state->ea.type == OP_MEM ) + return ctxt->opcode != 0x8d /* LEA */ && + (ctxt->opcode != X86EMUL_OPC(0x0f, 0x01) || + (state->modrm_reg & 7) != 7) /* INVLPG */; + + switch ( ctxt->opcode ) + { + case 0x6c ... 0x6f: /* INS / OUTS */ + case 0xa4 ... 0xa7: /* MOVS / CMPS */ + case 0xaa ... 0xaf: /* STOS / LODS / SCAS */ + case 0xd7: /* XLAT */ + return true; + + case X86EMUL_OPC(0x0f, 0x01): + /* Cover CLZERO. */ + return (state->modrm_rm & 7) == 4 && (state->modrm_reg & 7) == 7; + } + + return false; +} + +bool +x86_insn_is_mem_write(const struct x86_emulate_state *state, + const struct x86_emulate_ctxt *ctxt) +{ + switch ( state->desc & DstMask ) + { + case DstMem: + return state->modrm_mod != 3; + + case DstBitBase: + case DstImplicit: + break; + + default: + return false; + } + + if ( state->modrm_mod == 3 ) + /* CLZERO is the odd one. */ + return ctxt->opcode == X86EMUL_OPC(0x0f, 0x01) && + (state->modrm_rm & 7) == 4 && (state->modrm_reg & 7) == 7; + + switch ( ctxt->opcode ) + { + case 0x6c: case 0x6d: /* INS */ + case 0xa4: case 0xa5: /* MOVS */ + case 0xaa: case 0xab: /* STOS */ + case X86EMUL_OPC(0x0f, 0x11): /* MOVUPS */ + case X86EMUL_OPC_VEX(0x0f, 0x11): /* VMOVUPS */ + case X86EMUL_OPC_66(0x0f, 0x11): /* MOVUPD */ + case X86EMUL_OPC_VEX_66(0x0f, 0x11): /* VMOVUPD */ + case X86EMUL_OPC_F3(0x0f, 0x11): /* MOVSS */ + case X86EMUL_OPC_VEX_F3(0x0f, 0x11): /* VMOVSS */ + case X86EMUL_OPC_F2(0x0f, 0x11): /* MOVSD */ + case X86EMUL_OPC_VEX_F2(0x0f, 0x11): /* VMOVSD */ + case X86EMUL_OPC(0x0f, 0x29): /* MOVAPS */ + case X86EMUL_OPC_VEX(0x0f, 0x29): /* VMOVAPS */ + case X86EMUL_OPC_66(0x0f, 0x29): /* MOVAPD */ + case X86EMUL_OPC_VEX_66(0x0f, 0x29): /* VMOVAPD */ + case X86EMUL_OPC(0x0f, 0x2b): /* MOVNTPS */ + case X86EMUL_OPC_VEX(0x0f, 0x2b): /* VMOVNTPS */ + case X86EMUL_OPC_66(0x0f, 0x2b): /* MOVNTPD */ + case X86EMUL_OPC_VEX_66(0x0f, 0x2b): /* VMOVNTPD */ + case X86EMUL_OPC(0x0f, 0x7e): /* MOVD/MOVQ */ + case X86EMUL_OPC_66(0x0f, 0x7e): /* MOVD/MOVQ */ + case X86EMUL_OPC_VEX_66(0x0f, 0x7e): /* VMOVD/VMOVQ */ + case X86EMUL_OPC(0x0f, 0x7f): /* VMOVQ */ + case X86EMUL_OPC_66(0x0f, 0x7f): /* MOVDQA */ + case X86EMUL_OPC_VEX_66(0x0f, 0x7f): /* VMOVDQA */ + case X86EMUL_OPC_F3(0x0f, 0x7f): /* MOVDQU */ + case X86EMUL_OPC_VEX_F3(0x0f, 0x7f): /* VMOVDQU */ + case X86EMUL_OPC(0x0f, 0xab): /* BTS */ + case X86EMUL_OPC(0x0f, 0xb3): /* BTR */ + case X86EMUL_OPC(0x0f, 0xbb): /* BTC */ + case X86EMUL_OPC_66(0x0f, 0xd6): /* MOVQ */ + case X86EMUL_OPC_VEX_66(0x0f, 0xd6): /* VMOVQ */ + case X86EMUL_OPC(0x0f, 0xe7): /* MOVNTQ */ + case X86EMUL_OPC_66(0x0f, 0xe7): /* MOVNTDQ */ + case X86EMUL_OPC_VEX_66(0x0f, 0xe7): /* VMOVNTDQ */ + return true; + + case 0xd9: + switch ( state->modrm_reg & 7 ) + { + case 2: /* FST m32fp */ + case 3: /* FSTP m32fp */ + case 6: /* FNSTENV */ + case 7: /* FNSTCW */ + return true; + } + break; + + case 0xdb: + switch ( state->modrm_reg & 7 ) + { + case 1: /* FISTTP m32i */ + case 2: /* FIST m32i */ + case 3: /* FISTP m32i */ + case 7: /* FSTP m80fp */ + return true; + } + break; + + case 0xdd: + switch ( state->modrm_reg & 7 ) + { + case 1: /* FISTTP m64i */ + case 2: /* FST m64fp */ + case 3: /* FSTP m64fp */ + case 6: /* FNSAVE */ + case 7: /* FNSTSW */ + return true; + } + break; + + case 0xdf: + switch ( state->modrm_reg & 7 ) + { + case 1: /* FISTTP m16i */ + case 2: /* FIST m16i */ + case 3: /* FISTP m16i */ + case 6: /* FBSTP */ + case 7: /* FISTP m64i */ + return true; + } + break; + + case X86EMUL_OPC(0x0f, 0x01): + return !(state->modrm_reg & 6); /* SGDT / SIDT */ + + case X86EMUL_OPC(0x0f, 0xba): + return (state->modrm_reg & 7) > 4; /* BTS / BTR / BTC */ + } + + return false; +} + +bool +x86_insn_is_portio(const struct x86_emulate_state *state, + const struct x86_emulate_ctxt *ctxt) +{ + switch ( ctxt->opcode ) + { + case 0x6c ... 0x6f: /* INS / OUTS */ + case 0xe4 ... 0xe7: /* IN / OUT imm8 */ + case 0xec ... 0xef: /* IN / OUT %dx */ + return true; + } + + return false; +} + +bool +x86_insn_is_cr_access(const struct x86_emulate_state *state, + const struct x86_emulate_ctxt *ctxt) +{ + switch ( ctxt->opcode ) + { + unsigned int ext; + + case X86EMUL_OPC(0x0f, 0x01): + if ( x86_insn_modrm(state, NULL, &ext) >= 0 + && (ext & 5) == 4 ) /* SMSW / LMSW */ + return true; + break; + + case X86EMUL_OPC(0x0f, 0x06): /* CLTS */ + case X86EMUL_OPC(0x0f, 0x20): /* MOV from CRn */ + case X86EMUL_OPC(0x0f, 0x22): /* MOV to CRn */ + return true; + } + + return false; +} + unsigned long x86_insn_immediate(const struct x86_emulate_state *state, unsigned int nr) { diff --git a/xen/arch/x86/x86_emulate/x86_emulate.h b/xen/arch/x86/x86_emulate/x86_emulate.h index 57ebfde..9e2d4f3 100644 --- a/xen/arch/x86/x86_emulate/x86_emulate.h +++ b/xen/arch/x86/x86_emulate/x86_emulate.h @@ -639,6 +639,18 @@ x86_insn_immediate(const struct x86_emulate_state *state, unsigned int x86_insn_length(const struct x86_emulate_state *state, const struct x86_emulate_ctxt *ctxt); +bool +x86_insn_is_mem_access(const struct x86_emulate_state *state, + const struct x86_emulate_ctxt *ctxt); +bool +x86_insn_is_mem_write(const struct x86_emulate_state *state, + const struct x86_emulate_ctxt *ctxt); +bool +x86_insn_is_portio(const struct x86_emulate_state *state, + const struct x86_emulate_ctxt *ctxt); +bool +x86_insn_is_cr_access(const struct x86_emulate_state *state, + const struct x86_emulate_ctxt *ctxt); #ifdef NDEBUG static inline void x86_emulate_free_state(struct x86_emulate_state *state) {} diff --git a/xen/include/asm-x86/hvm/emulate.h b/xen/include/asm-x86/hvm/emulate.h index 83266bb..dd9facb 100644 --- a/xen/include/asm-x86/hvm/emulate.h +++ b/xen/include/asm-x86/hvm/emulate.h @@ -17,9 +17,18 @@ #include <asm/hvm/hvm.h> #include <asm/x86_emulate.h> +typedef bool hvm_emulate_validate_t(const struct x86_emulate_state *state, + const struct x86_emulate_ctxt *ctxt); + struct hvm_emulate_ctxt { struct x86_emulate_ctxt ctxt; + /* + * validate: Post-decode, pre-emulate hook to allow caller controlled + * filtering. + */ + hvm_emulate_validate_t *validate; + /* Cache of 16 bytes of instruction. */ uint8_t insn_buf[16]; unsigned long insn_buf_eip; @@ -41,6 +50,8 @@ enum emul_kind { EMUL_KIND_SET_CONTEXT_INSN }; +bool __nonnull(1) hvm_emulate_one_insn( + hvm_emulate_validate_t *validate); int hvm_emulate_one( struct hvm_emulate_ctxt *hvmemul_ctxt); void hvm_emulate_one_vm_event(enum emul_kind kind, @@ -49,6 +60,7 @@ void hvm_emulate_one_vm_event(enum emul_kind kind, /* Must be called once to set up hvmemul state. */ void hvm_emulate_init_once( struct hvm_emulate_ctxt *hvmemul_ctxt, + hvm_emulate_validate_t *validate, struct cpu_user_regs *regs); /* Must be called once before each instruction emulated. */ void hvm_emulate_init_per_insn( @@ -64,6 +76,11 @@ struct segment_register *hvmemul_get_seg_reg( struct hvm_emulate_ctxt *hvmemul_ctxt); int hvm_emulate_one_mmio(unsigned long mfn, unsigned long gla); +static inline bool handle_mmio(void) +{ + return hvm_emulate_one_insn(x86_insn_is_mem_access); +} + int hvmemul_insn_fetch(enum x86_segment seg, unsigned long offset, void *p_data, diff --git a/xen/include/asm-x86/hvm/io.h b/xen/include/asm-x86/hvm/io.h index 920a218..41ade5c 100644 --- a/xen/include/asm-x86/hvm/io.h +++ b/xen/include/asm-x86/hvm/io.h @@ -118,7 +118,6 @@ void relocate_portio_handler( void send_timeoffset_req(unsigned long timeoff); void send_invalidate_req(void); -bool handle_mmio(void); bool handle_mmio_with_translation(unsigned long gla, unsigned long gpfn, struct npfec); bool handle_pio(uint16_t port, unsigned int size, int dir); -- generated by git-patchbot for /home/xen/git/xen.git#master _______________________________________________ Xen-changelog mailing list Xen-changelog@xxxxxxxxxxxxx https://lists.xenproject.org/xen-changelog
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |