[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH v9 06/13] xen: Add ring 3 vmware_port support
Summary is that VMware treats "in (%dx),%eax" (or "out %eax,(%dx)") to port 0x5658 specially. Note: since many operations return data in EAX, "in (%dx),%eax" is the one to use. The other lengths like "in (%dx),%al" will still do things, only AL part of EAX will be changed. For "out %eax,(%dx)" of all lengths, EAX will remain unchanged. This instruction is allowed to be used from ring 3. To support this the vmexit for GP needs to be enabled. I have not fully tested that nested HVM is doing the right thing for this. The support included is enough to allow VMware tools to install in a HVM domU. Enable no-fault of pio in x86_emulate for VMware port Also adjust the emulation registers after doing a VMware backdoor operation. Add new routine hvm_emulate_one_gp() to be used by the #GP fault handler. Some of the best info is at: https://sites.google.com/site/chitchatvmback/backdoor Signed-off-by: Don Slutz <dslutz@xxxxxxxxxxx> --- v9: Split #GP handling (or skipping of #GP) code out of previous patch to help with the review process. Switch to x86_emulator to handle #GP I think the hvm_emulate_ops_gp() covers all needed ops. Not able to validate all paths though _hvm_emulate_one(). xen/arch/x86/hvm/emulate.c | 62 ++++++++++++++++++++++++++++++++-- xen/arch/x86/hvm/svm/svm.c | 27 +++++++++++++++ xen/arch/x86/hvm/svm/vmcb.c | 2 ++ xen/arch/x86/hvm/vmware/vmport.c | 11 ++++++ xen/arch/x86/hvm/vmx/vmcs.c | 2 ++ xen/arch/x86/hvm/vmx/vmx.c | 38 +++++++++++++++++++++ xen/arch/x86/x86_emulate/x86_emulate.c | 25 +++++++++++--- xen/arch/x86/x86_emulate/x86_emulate.h | 8 +++++ xen/include/asm-x86/hvm/emulate.h | 2 ++ xen/include/asm-x86/hvm/vmport.h | 1 + 10 files changed, 172 insertions(+), 6 deletions(-) diff --git a/xen/arch/x86/hvm/emulate.c b/xen/arch/x86/hvm/emulate.c index 636c909..a6a6a5c 100644 --- a/xen/arch/x86/hvm/emulate.c +++ b/xen/arch/x86/hvm/emulate.c @@ -22,6 +22,7 @@ #include <asm/hvm/trace.h> #include <asm/hvm/support.h> #include <asm/hvm/svm/svm.h> +#include <asm/hvm/vmport.h> static void hvmtrace_io_assist(int is_mmio, ioreq_t *p) { @@ -776,6 +777,7 @@ static int hvmemul_read_io_discard( unsigned long *val, struct x86_emulate_ctxt *ctxt) { + ctxt->do_vmport = 0; return X86EMUL_OKAY; } @@ -785,6 +787,7 @@ static int hvmemul_write_io_discard( unsigned long val, struct x86_emulate_ctxt *ctxt) { + ctxt->do_vmport = 0; return X86EMUL_OKAY; } @@ -802,6 +805,27 @@ static int hvmemul_wbinvd_discard( return X86EMUL_OKAY; } +static int hvmemul_write_gp( + unsigned int seg, + unsigned long offset, + void *p_data, + unsigned int bytes, + struct x86_emulate_ctxt *ctxt) +{ + return X86EMUL_EXCEPTION; +} + +static int hvmemul_cmpxchg_gp( + unsigned int seg, + unsigned long offset, + void *old, + void *new, + unsigned int bytes, + struct x86_emulate_ctxt *ctxt) +{ + return X86EMUL_EXCEPTION; +} + static int hvmemul_cmpxchg( enum x86_segment seg, unsigned long offset, @@ -1355,6 +1379,17 @@ static int hvmemul_invlpg( return rc; } +static int hvmemul_vmport_check( + unsigned int first_port, + struct x86_emulate_ctxt *ctxt) +{ + int rc = vmport_check_port(first_port); + + if (rc) + ctxt->do_vmport = 1; + return rc; +} + static const struct x86_emulate_ops hvm_emulate_ops = { .read = hvmemul_read, .insn_fetch = hvmemul_insn_fetch, @@ -1378,7 +1413,8 @@ static const struct x86_emulate_ops hvm_emulate_ops = { .inject_sw_interrupt = hvmemul_inject_sw_interrupt, .get_fpu = hvmemul_get_fpu, .put_fpu = hvmemul_put_fpu, - .invlpg = hvmemul_invlpg + .invlpg = hvmemul_invlpg, + .vmport_check = hvmemul_vmport_check, }; static const struct x86_emulate_ops hvm_emulate_ops_no_write = { @@ -1404,7 +1440,22 @@ static const struct x86_emulate_ops hvm_emulate_ops_no_write = { .inject_sw_interrupt = hvmemul_inject_sw_interrupt, .get_fpu = hvmemul_get_fpu, .put_fpu = hvmemul_put_fpu, - .invlpg = hvmemul_invlpg + .invlpg = hvmemul_invlpg, + .vmport_check = hvmemul_vmport_check, +}; + +static const struct x86_emulate_ops hvm_emulate_ops_gp = { + .read = hvmemul_read, + .insn_fetch = hvmemul_insn_fetch, + .write = hvmemul_write_gp, + .cmpxchg = hvmemul_cmpxchg_gp, + .read_segment = hvmemul_read_segment, + .write_segment = hvmemul_write_segment, + .read_io = hvmemul_read_io, + .write_io = hvmemul_write_io, + .inject_hw_exception = hvmemul_inject_hw_exception, + .inject_sw_interrupt = hvmemul_inject_sw_interrupt, + .vmport_check = hvmemul_vmport_check, }; static int _hvm_emulate_one(struct hvm_emulate_ctxt *hvmemul_ctxt, @@ -1521,6 +1572,12 @@ int hvm_emulate_one( return _hvm_emulate_one(hvmemul_ctxt, &hvm_emulate_ops); } +int hvm_emulate_one_gp( + struct hvm_emulate_ctxt *hvmemul_ctxt) +{ + return _hvm_emulate_one(hvmemul_ctxt, &hvm_emulate_ops_gp); +} + int hvm_emulate_one_no_write( struct hvm_emulate_ctxt *hvmemul_ctxt) { @@ -1570,6 +1627,7 @@ void hvm_emulate_prepare( hvmemul_ctxt->intr_shadow = hvm_funcs.get_interrupt_shadow(current); hvmemul_ctxt->ctxt.regs = regs; hvmemul_ctxt->ctxt.force_writeback = 1; + hvmemul_ctxt->ctxt.do_vmport = 0; hvmemul_ctxt->seg_reg_accessed = 0; hvmemul_ctxt->seg_reg_dirty = 0; hvmemul_get_seg_reg(x86_seg_cs, hvmemul_ctxt); diff --git a/xen/arch/x86/hvm/svm/svm.c b/xen/arch/x86/hvm/svm/svm.c index 4b7b818..5cb70ae 100644 --- a/xen/arch/x86/hvm/svm/svm.c +++ b/xen/arch/x86/hvm/svm/svm.c @@ -59,6 +59,7 @@ #include <public/sched.h> #include <asm/hvm/vpt.h> #include <asm/hvm/trace.h> +#include <asm/hvm/vmport.h> #include <asm/hap.h> #include <asm/apic.h> #include <asm/debugger.h> @@ -2120,6 +2121,28 @@ svm_vmexit_do_vmsave(struct vmcb_struct *vmcb, return; } +static void svm_vmexit_gp_intercept(struct cpu_user_regs *regs, + struct vcpu *v) +{ + struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb; + int rc; + + if ( vmcb->exitinfo1 != 0 || vmcb->exitinfo2 != 0 ) + rc = X86EMUL_EXCEPTION; + else + { + struct hvm_emulate_ctxt ctxt; + + hvm_emulate_prepare(&ctxt, regs); + rc = hvm_emulate_one_gp(&ctxt); + + if ( rc == X86EMUL_OKAY ) + hvm_emulate_writeback(&ctxt); + } + if ( rc != X86EMUL_OKAY && rc != X86EMUL_RETRY ) + hvm_inject_hw_exception(TRAP_gp_fault, vmcb->exitinfo1); +} + static void svm_vmexit_ud_intercept(struct cpu_user_regs *regs) { struct hvm_emulate_ctxt ctxt; @@ -2484,6 +2507,10 @@ void svm_vmexit_handler(struct cpu_user_regs *regs) break; } + case VMEXIT_EXCEPTION_GP: + svm_vmexit_gp_intercept(regs, v); + break; + case VMEXIT_EXCEPTION_UD: svm_vmexit_ud_intercept(regs); break; diff --git a/xen/arch/x86/hvm/svm/vmcb.c b/xen/arch/x86/hvm/svm/vmcb.c index 21292bb..45ead61 100644 --- a/xen/arch/x86/hvm/svm/vmcb.c +++ b/xen/arch/x86/hvm/svm/vmcb.c @@ -195,6 +195,8 @@ static int construct_vmcb(struct vcpu *v) HVM_TRAP_MASK | (1U << TRAP_no_device); + if ( v->domain->arch.hvm_domain.is_vmware_port_enabled ) + vmcb->_exception_intercepts |= 1U << TRAP_gp_fault; if ( paging_mode_hap(v->domain) ) { vmcb->_np_enable = 1; /* enable nested paging */ diff --git a/xen/arch/x86/hvm/vmware/vmport.c b/xen/arch/x86/hvm/vmware/vmport.c index 3d9f30e..2e61682 100644 --- a/xen/arch/x86/hvm/vmware/vmport.c +++ b/xen/arch/x86/hvm/vmware/vmport.c @@ -25,6 +25,17 @@ void vmport_register(struct domain *d) register_portio_handler(d, BDOOR_PORT, 4, vmport_ioport); } +int vmport_check_port(unsigned int port) +{ + struct vcpu *curr = current; + struct domain *d = curr->domain; + + if ( is_hvm_domain(d) && d->arch.hvm_domain.is_vmware_port_enabled && + port == BDOOR_PORT ) + return 1; + return 0; +} + int vmport_ioport(int dir, uint32_t port, uint32_t bytes, uint32_t *val) { struct cpu_user_regs *regs = guest_cpu_user_regs(); diff --git a/xen/arch/x86/hvm/vmx/vmcs.c b/xen/arch/x86/hvm/vmx/vmcs.c index d614638..4cc2c9e 100644 --- a/xen/arch/x86/hvm/vmx/vmcs.c +++ b/xen/arch/x86/hvm/vmx/vmcs.c @@ -1102,6 +1102,8 @@ static int construct_vmcs(struct vcpu *v) v->arch.hvm_vmx.exception_bitmap = HVM_TRAP_MASK | (paging_mode_hap(d) ? 0 : (1U << TRAP_page_fault)) + | (v->domain->arch.hvm_domain.is_vmware_port_enabled ? + (1U << TRAP_gp_fault) : 0) | (1U << TRAP_no_device); vmx_update_exception_bitmap(v); diff --git a/xen/arch/x86/hvm/vmx/vmx.c b/xen/arch/x86/hvm/vmx/vmx.c index e1c55ce..34026eb 100644 --- a/xen/arch/x86/hvm/vmx/vmx.c +++ b/xen/arch/x86/hvm/vmx/vmx.c @@ -44,6 +44,7 @@ #include <asm/hvm/support.h> #include <asm/hvm/vmx/vmx.h> #include <asm/hvm/vmx/vmcs.h> +#include <asm/hvm/vmport.h> #include <public/sched.h> #include <public/hvm/ioreq.h> #include <asm/hvm/vpic.h> @@ -1281,6 +1282,8 @@ static void vmx_update_guest_cr(struct vcpu *v, unsigned int cr) v->arch.hvm_vmx.exception_bitmap = HVM_TRAP_MASK | (paging_mode_hap(v->domain) ? 0 : (1U << TRAP_page_fault)) + | (v->domain->arch.hvm_domain.is_vmware_port_enabled ? + (1U << TRAP_gp_fault) : 0) | (1U << TRAP_no_device); vmx_update_exception_bitmap(v); vmx_update_debug_state(v); @@ -2632,6 +2635,38 @@ static void vmx_idtv_reinject(unsigned long idtv_info) } } +static void vmx_vmexit_gp_intercept(struct cpu_user_regs *regs, + struct vcpu *v) +{ + unsigned long exit_qualification; + unsigned long ecode; + int rc; + unsigned long vector; + + __vmread(VM_EXIT_INTR_INFO, &vector); + ASSERT(vector & INTR_INFO_VALID_MASK); + ASSERT(vector & INTR_INFO_DELIVER_CODE_MASK); + + __vmread(EXIT_QUALIFICATION, &exit_qualification); + __vmread(VM_EXIT_INTR_ERROR_CODE, &ecode); + + if ( ecode != 0 || exit_qualification != 0 ) + rc = X86EMUL_EXCEPTION; + else + { + struct hvm_emulate_ctxt ctxt; + + hvm_emulate_prepare(&ctxt, regs); + rc = hvm_emulate_one_gp(&ctxt); + + if ( rc == X86EMUL_OKAY ) + hvm_emulate_writeback(&ctxt); + } + + if ( rc != X86EMUL_OKAY && rc != X86EMUL_RETRY ) + hvm_inject_hw_exception(TRAP_gp_fault, ecode); +} + static int vmx_handle_apic_write(void) { unsigned long exit_qualification; @@ -2857,6 +2892,9 @@ void vmx_vmexit_handler(struct cpu_user_regs *regs) HVMTRACE_1D(TRAP, vector); vmx_fpu_dirty_intercept(); break; + case TRAP_gp_fault: + vmx_vmexit_gp_intercept(regs, v); + break; case TRAP_page_fault: __vmread(EXIT_QUALIFICATION, &exit_qualification); __vmread(VM_EXIT_INTR_ERROR_CODE, &ecode); diff --git a/xen/arch/x86/x86_emulate/x86_emulate.c b/xen/arch/x86/x86_emulate/x86_emulate.c index 42e2588..074b597 100644 --- a/xen/arch/x86/x86_emulate/x86_emulate.c +++ b/xen/arch/x86/x86_emulate/x86_emulate.c @@ -971,12 +971,20 @@ static int ioport_access_check( unsigned int first_port, unsigned int bytes, struct x86_emulate_ctxt *ctxt, - const struct x86_emulate_ops *ops) + const struct x86_emulate_ops *ops, + bool_t pio) { unsigned long iobmp; struct segment_register tr; int rc = X86EMUL_OKAY; + if ( pio ) + { + fail_if(ops->vmport_check == NULL); + if ( ops->vmport_check(first_port, ctxt) ) + return X86EMUL_OKAY; + } + if ( !(ctxt->regs->eflags & EFLG_VM) && mode_iopl() ) return X86EMUL_OKAY; @@ -2250,7 +2258,7 @@ x86_emulate( dst.bytes = !(b & 1) ? 1 : (op_bytes == 8) ? 4 : op_bytes; dst.mem.seg = x86_seg_es; dst.mem.off = truncate_ea_and_reps(_regs.edi, nr_reps, dst.bytes); - if ( (rc = ioport_access_check(port, dst.bytes, ctxt, ops)) != 0 ) + if ( (rc = ioport_access_check(port, dst.bytes, ctxt, ops, 0)) != 0 ) goto done; if ( (nr_reps > 1) && (ops->rep_ins != NULL) && ((rc = ops->rep_ins(port, dst.mem.seg, dst.mem.off, dst.bytes, @@ -2279,7 +2287,7 @@ x86_emulate( unsigned int port = (uint16_t)_regs.edx; dst.bytes = !(b & 1) ? 1 : (op_bytes == 8) ? 4 : op_bytes; ea.mem.off = truncate_ea_and_reps(_regs.esi, nr_reps, dst.bytes); - if ( (rc = ioport_access_check(port, dst.bytes, ctxt, ops)) != 0 ) + if ( (rc = ioport_access_check(port, dst.bytes, ctxt, ops, 0)) != 0 ) goto done; if ( (nr_reps > 1) && (ops->rep_outs != NULL) && ((rc = ops->rep_outs(ea.mem.seg, ea.mem.off, port, dst.bytes, @@ -3393,7 +3401,7 @@ x86_emulate( ? insn_fetch_type(uint8_t) : (uint16_t)_regs.edx); op_bytes = !(b & 1) ? 1 : (op_bytes == 8) ? 4 : op_bytes; - if ( (rc = ioport_access_check(port, op_bytes, ctxt, ops)) != 0 ) + if ( (rc = ioport_access_check(port, op_bytes, ctxt, ops, 1)) != 0 ) goto done; if ( b & 2 ) { @@ -3412,6 +3420,15 @@ x86_emulate( } if ( rc != 0 ) goto done; + if ( ctxt->do_vmport ) + { + ctxt->do_vmport = 0; + _regs._ebx = ctxt->regs->_ebx; + _regs._ecx = ctxt->regs->_ecx; + _regs._edx = ctxt->regs->_edx; + _regs._esi = ctxt->regs->_esi; + _regs._edi = ctxt->regs->_edi; + } break; } diff --git a/xen/arch/x86/x86_emulate/x86_emulate.h b/xen/arch/x86/x86_emulate/x86_emulate.h index 593b31e..e8e4413 100644 --- a/xen/arch/x86/x86_emulate/x86_emulate.h +++ b/xen/arch/x86/x86_emulate/x86_emulate.h @@ -393,6 +393,11 @@ struct x86_emulate_ops enum x86_segment seg, unsigned long offset, struct x86_emulate_ctxt *ctxt); + + /* vmport_check */ + int (*vmport_check)( + unsigned int port, + struct x86_emulate_ctxt *ctxt); }; struct cpu_user_regs; @@ -423,6 +428,9 @@ struct x86_emulate_ctxt } flags; uint8_t byte; } retire; + + /* vmport support */ + bool_t do_vmport; }; /* diff --git a/xen/include/asm-x86/hvm/emulate.h b/xen/include/asm-x86/hvm/emulate.h index 5411302..a5e589b 100644 --- a/xen/include/asm-x86/hvm/emulate.h +++ b/xen/include/asm-x86/hvm/emulate.h @@ -36,6 +36,8 @@ struct hvm_emulate_ctxt { int hvm_emulate_one( struct hvm_emulate_ctxt *hvmemul_ctxt); +int hvm_emulate_one_gp( + struct hvm_emulate_ctxt *hvmemul_ctxt); int hvm_emulate_one_no_write( struct hvm_emulate_ctxt *hvmemul_ctxt); void hvm_mem_event_emulate_one(bool_t nowrite, diff --git a/xen/include/asm-x86/hvm/vmport.h b/xen/include/asm-x86/hvm/vmport.h index eb3e472..e11ca1a 100644 --- a/xen/include/asm-x86/hvm/vmport.h +++ b/xen/include/asm-x86/hvm/vmport.h @@ -18,6 +18,7 @@ #define ASM_X86_HVM_VMPORT_H__ void vmport_register(struct domain *d); +int vmport_check_port(unsigned int port); int vmport_ioport(int dir, uint32_t port, uint32_t bytes, uint32_t *val); #endif /* ASM_X86_HVM_VMPORT_H__ */ -- 1.8.4 _______________________________________________ Xen-devel mailing list Xen-devel@xxxxxxxxxxxxx http://lists.xen.org/xen-devel
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |