[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH][RFC] Emulating real mode with x86_emulate
Howdy,Attached is a patch that begins to lay down the infrastructure for emulating real mode with x86_emulate(). With a little more refactoring, I think it could also replace the SVM emulator. The patch introduces an HVMOP hypercall to set a flag in the hvm vcpu struct to signal that real mode should be emulated with x86_emulate instead of using vm86. This is to make development a little bit easier since x86_emulate is not quite there yet wrt 16 bit emulation. It can be enabled by passing -emulate-16bit to qemu-dm (I use a wrapper script similar to qemu-dm.debug). The VT code keeps track of the whether it's in the emulator and loops on the do_resume path in x86_emulate. I think this code probably should be refactored into the common HVM code although this would require changing some of the HVM ops. This would allow SVM to use the x86_emulate to handle individual instructions. There are some issues to work out. Namely, x86_emulate appears to want blocking PIO calls which isn't conducive to the wait PIO works today in HVM. This is only a problem for instructions at the moment. I'm also a bit confused about how to properly loop in the emulator. schedule_tail is not meant to return so perhaps we should loop on emulating == 1 instead of hypercall_preempt_check? I didn't think the hypervisor was preemptable though. The current code doesn't handle non-flat segments as I don't think hvm_copy_from/to_guest handles it (which I assume it would need to). However, it is enough to start running instructions in x86_emulate so it's enough to start working on enhancing that. Regards, Anthony Liguori diff -r 3fd9b0c71b8c tools/firmware/hvmloader/hvmloader.c --- a/tools/firmware/hvmloader/hvmloader.c Tue Mar 20 17:36:18 2007 +0000 +++ b/tools/firmware/hvmloader/hvmloader.c Sun Mar 25 13:14:43 2007 -0500 @@ -349,6 +349,14 @@ static void cmos_write_memory_size(void) cmos_outb(0x35, (uint8_t)( alt_mem >> 8)); } +#define DOMID_SELF (0x7FF0U) + +#define HVMOP_emulate_realmode 6 +struct xen_hvm_op_emulate_realmode +{ + uint16_t domid; +}; + int main(void) { int acpi_sz; @@ -401,15 +409,23 @@ int main(void) if ( !check_amd() ) { - printf("Loading VMXAssist ...\n"); - memcpy((void *)VMXASSIST_PHYSICAL_ADDRESS, - vmxassist, sizeof(vmxassist)); - - printf("VMX go ...\n"); - __asm__ __volatile__( - "jmp *%%eax" - : : "a" (VMXASSIST_PHYSICAL_ADDRESS), "d" (0) - ); + printf("Loading VMXAssist ... %x\n", inl(0x595)); + if (inl(0x595) == 0xdeadbeef) { + struct xen_hvm_op_emulate_realmode op; + + printf("foo\n"); + op.domid = DOMID_SELF; + hypercall_hvm_op(HVMOP_emulate_realmode, &op); + } else { + memcpy((void *)VMXASSIST_PHYSICAL_ADDRESS, + vmxassist, sizeof(vmxassist)); + + printf("VMX go ...\n"); + __asm__ __volatile__( + "jmp *%%eax" + : : "a" (VMXASSIST_PHYSICAL_ADDRESS), "d" (0) + ); + } } printf("Invoking ROMBIOS ...\n"); diff -r 3fd9b0c71b8c tools/ioemu/hw/pc.c --- a/tools/ioemu/hw/pc.c Tue Mar 20 17:36:18 2007 +0000 +++ b/tools/ioemu/hw/pc.c Fri Mar 23 11:32:33 2007 -0500 @@ -316,6 +316,13 @@ static uint32_t ioport92_read(void *opaq return ioport_get_a20() << 1; } +static uint32_t emulate_16bit_read(void *opaque, uint32_t addr) +{ + if (emulate_16bit) + return 0xdeadbeef; + return 0; +} + /***********************************************************/ /* Bochs BIOS debug ports */ @@ -728,6 +735,8 @@ static void pc_init1(uint64_t ram_size, bochs_bios_init(); + register_ioport_read(0x595, 1, 4, emulate_16bit_read, NULL); + #ifndef CONFIG_DM if (linux_boot) { uint8_t bootsect[512]; diff -r 3fd9b0c71b8c tools/ioemu/vl.c --- a/tools/ioemu/vl.c Tue Mar 20 17:36:18 2007 +0000 +++ b/tools/ioemu/vl.c Fri Mar 23 11:32:33 2007 -0500 @@ -161,6 +161,7 @@ int vnc_display = -1; #endif int acpi_enabled = 0; int fd_bootchk = 1; +int emulate_16bit; extern int vcpus; @@ -5535,6 +5536,7 @@ enum { QEMU_OPTION_vncviewer, QEMU_OPTION_vncunused, QEMU_OPTION_vnclisten, + QEMU_OPTION_emulate_16bit, }; typedef struct QEMUOption { @@ -5614,6 +5616,7 @@ const QEMUOption qemu_options[] = { { "vncviewer", 0, QEMU_OPTION_vncviewer }, { "vncunused", 0, QEMU_OPTION_vncunused }, { "vnclisten", HAS_ARG, QEMU_OPTION_vnclisten }, + { "emulate-16bit", 0, QEMU_OPTION_emulate_16bit }, /* temporary options */ { "usb", 0, QEMU_OPTION_usb }, @@ -6522,6 +6525,9 @@ int main(int argc, char **argv) case QEMU_OPTION_vnclisten: parse_host(&vnclisten_addr, optarg); break; + case QEMU_OPTION_emulate_16bit: + emulate_16bit = 1; + break; } } } diff -r 3fd9b0c71b8c tools/ioemu/vl.h --- a/tools/ioemu/vl.h Tue Mar 20 17:36:18 2007 +0000 +++ b/tools/ioemu/vl.h Fri Mar 23 11:32:33 2007 -0500 @@ -156,6 +156,7 @@ extern void *shared_vram; extern FILE *logfile; +int emulate_16bit; #if defined(__i386__) || defined(__x86_64__) diff -r 3fd9b0c71b8c xen/arch/x86/hvm/hvm.c --- a/xen/arch/x86/hvm/hvm.c Tue Mar 20 17:36:18 2007 +0000 +++ b/xen/arch/x86/hvm/hvm.c Fri Mar 23 12:16:25 2007 -0500 @@ -185,6 +185,41 @@ static int hvmop_drain_io( out: rcu_unlock_domain(d); + return rc; +} + +static int hvmop_emulate_realmode( + XEN_GUEST_HANDLE(xen_hvm_emulate_realmode_t) uop) +{ + struct xen_hvm_emulate_realmode op; + struct domain *d; + struct vcpu *v; + int rc; + + printk("hvmop_emulate_realmode\n"); + + if ( copy_from_guest(&op, uop, 1) ) + return -EFAULT; + + if ( op.domid != DOMID_SELF ) + return -EPERM; + + d = rcu_lock_current_domain(); + if ( d == NULL ) + return -ESRCH; + + printk("guest requests real mode emulation\n"); + + for_each_vcpu(d, v) + { + v->arch.hvm_vcpu.emulate_realmode = 1; + } + + rc = 0; + + rcu_unlock_domain(d); + printk("foo %d\n", __LINE__); + return rc; } @@ -963,6 +998,10 @@ long do_hvm_op(unsigned long op, XEN_GUE guest_handle_cast(arg, xen_hvm_drain_io_t)); break; + case HVMOP_emulate_realmode: + rc = hvmop_emulate_realmode( + guest_handle_cast(arg, xen_hvm_emulate_realmode_t)); + break; default: { diff -r 3fd9b0c71b8c xen/arch/x86/hvm/vmx/vmcs.c --- a/xen/arch/x86/hvm/vmx/vmcs.c Tue Mar 20 17:36:18 2007 +0000 +++ b/xen/arch/x86/hvm/vmx/vmcs.c Sun Mar 25 13:02:08 2007 -0500 @@ -493,6 +493,142 @@ void vm_resume_fail(unsigned long eflags domain_crash_synchronous(); } +static int vmx_em_read(enum x86_segment seg, + unsigned long offset, + unsigned long *val, + unsigned int bytes, + struct x86_emulate_ctxt *ctxt) +{ + /* FIXME deal with non-flat segments */ + if ( hvm_copy_from_guest_virt(val, offset, bytes) ) + return -1; + return 0; +} + +static int vmx_em_fetch(enum x86_segment seg, + unsigned long offset, + unsigned long *val, + unsigned int bytes, + struct x86_emulate_ctxt *ctxt) +{ + /* FIXME deal with non-flat segments */ + if ( hvm_copy_from_guest_virt(val, offset, bytes) ) + return -1; + return 0; +} + +static int vmx_em_write(enum x86_segment seg, + unsigned long offset, + unsigned long val, + unsigned int bytes, + struct x86_emulate_ctxt *ctxt) +{ + /* FIXME deal with non-flat segments */ + if ( hvm_copy_to_guest_virt(offset, &val, bytes) ) + return -1; + return 0; +} + +static int vmx_em_read_io(unsigned int port, + unsigned int bytes, + unsigned long *val, + struct x86_emulate_ctxt *ctxt) +{ + /* FIXME we need a smarter interface in x86_emulate since we cannot block + here */ + return -1; +} + +static int vmx_em_write_io(unsigned int port, + unsigned int bytes, + unsigned long val, + struct x86_emulate_ctxt *ctxt) +{ + send_pio_req(port, 1, bytes, val, IOREQ_WRITE, 0, 0); + return 0; +} + +static int vmx_em_read_cr(unsigned int ret, + unsigned long *val, + struct x86_emulate_ctxt *ctxt) +{ + struct vcpu *v = current; + + switch (ret) { + case 0: + *val = v->arch.hvm_vmx.cpu_shadow_cr0; + break; + case 2: + *val = v->arch.hvm_vmx.cpu_cr2; + break; + case 3: + *val = v->arch.hvm_vmx.cpu_cr3; + break; + case 4: + *val = v->arch.hvm_vmx.cpu_shadow_cr4; + break; + default: + return -1; + } + return 0; +} + +static int vmx_em_write_cr(unsigned int ret, + unsigned long val, + struct x86_emulate_ctxt *ctxt) +{ + extern int vmx_set_cr0(unsigned long value); + + if (ret != 0) + return -1; + + return vmx_set_cr0(val); +} + +static int vmx_em_write_rflags(unsigned long val, + struct x86_emulate_ctxt *ctxt) +{ + ctxt->regs->eflags = val; + return 0; +} + +struct x86_emulate_ops em_ops = { + .read = vmx_em_read, + .write = vmx_em_write, + .insn_fetch = vmx_em_fetch, + .read_io = vmx_em_read_io, + .write_io = vmx_em_write_io, + .read_cr = vmx_em_read_cr, + .write_cr = vmx_em_write_cr, + .write_rflags = vmx_em_write_rflags, +}; + +static void vmx_do_emulate(struct vcpu *v) +{ + struct x86_emulate_ctxt ctxt; + struct cpu_user_regs *regs = &v->arch.guest_context.user_regs; + + hvm_store_cpu_guest_regs(v, regs, NULL); + ctxt.regs = regs; + + /* FIXME determine this dynamically */ + ctxt.addr_size = 16; + ctxt.sp_size = 16; + + while (!hypercall_preempt_check()) { + if (x86_emulate(&ctxt, &em_ops)) { + unsigned long eip; + + eip = __vmread(GUEST_RIP); + printk("failed to emulate instruction at %%eip = 0x%lx\n", eip); + domain_crash_synchronous(); + } + } + hvm_load_cpu_guest_regs(v, regs); + /* FIXME how can we ensure we loop here without taking all CPU? */ + domain_crash_synchronous(); +} + void arch_vmx_do_resume(struct vcpu *v) { if ( v->arch.hvm_vmx.active_cpu == smp_processor_id() ) @@ -508,7 +644,11 @@ void arch_vmx_do_resume(struct vcpu *v) } hvm_do_resume(v); - reset_stack_and_jump(vmx_asm_do_vmentry); + + if (v->arch.hvm_vmx.emulating) + vmx_do_emulate(v); + else + reset_stack_and_jump(vmx_asm_do_vmentry); } /* Dump a section of VMCS */ diff -r 3fd9b0c71b8c xen/arch/x86/hvm/vmx/vmx.c --- a/xen/arch/x86/hvm/vmx/vmx.c Tue Mar 20 17:36:18 2007 +0000 +++ b/xen/arch/x86/hvm/vmx/vmx.c Sun Mar 25 12:10:46 2007 -0500 @@ -1865,7 +1865,7 @@ static int vmx_assist(struct vcpu *v, in return 0; } -static int vmx_set_cr0(unsigned long value) +int vmx_set_cr0(unsigned long value) { struct vcpu *v = current; unsigned long mfn; @@ -1982,13 +1982,29 @@ static int vmx_set_cr0(unsigned long val } } - if ( vmx_assist(v, VMX_ASSIST_INVOKE) ) + if ( v->arch.hvm_vcpu.emulate_realmode ) + { + eip = __vmread(GUEST_RIP); + HVM_DBG_LOG(DBG_LEVEL_1, + "Transfering control to x86_emulate %%eip 0x%lx\n", eip); + v->arch.hvm_vmx.emulating = 1; + return 1; + } + else if ( vmx_assist(v, VMX_ASSIST_INVOKE) ) { eip = __vmread(GUEST_RIP); HVM_DBG_LOG(DBG_LEVEL_1, "Transfering control to vmxassist %%eip 0x%lx\n", eip); return 0; /* do not update eip! */ } + } + else if ( v->arch.hvm_vmx.emulating ) + { + eip = __vmread(GUEST_RIP); + HVM_DBG_LOG(DBG_LEVEL_1, + "Enabling CR0.PE at %%eip 0x%lx\n", eip); + v->arch.hvm_vmx.emulating = 0; + return 1; } else if ( v->arch.hvm_vmx.vmxassist_enabled ) { diff -r 3fd9b0c71b8c xen/include/asm-x86/hvm/vcpu.h --- a/xen/include/asm-x86/hvm/vcpu.h Tue Mar 20 17:36:18 2007 +0000 +++ b/xen/include/asm-x86/hvm/vcpu.h Fri Mar 23 11:32:33 2007 -0500 @@ -44,6 +44,7 @@ struct hvm_vcpu { /* Flags */ int flag_dr_dirty; + unsigned long emulate_realmode; union { struct arch_vmx_struct vmx; diff -r 3fd9b0c71b8c xen/include/asm-x86/hvm/vmx/vmcs.h --- a/xen/include/asm-x86/hvm/vmx/vmcs.h Tue Mar 20 17:36:18 2007 +0000 +++ b/xen/include/asm-x86/hvm/vmx/vmcs.h Sun Mar 25 11:52:28 2007 -0500 @@ -77,6 +77,7 @@ struct arch_vmx_struct { unsigned long cpu_cr2; /* save CR2 */ unsigned long cpu_cr3; struct vmx_msr_state msr_state; + unsigned long emulating; unsigned long vmxassist_enabled:1; }; diff -r 3fd9b0c71b8c xen/include/public/hvm/hvm_op.h --- a/xen/include/public/hvm/hvm_op.h Tue Mar 20 17:36:18 2007 +0000 +++ b/xen/include/public/hvm/hvm_op.h Fri Mar 23 11:32:33 2007 -0500 @@ -78,4 +78,13 @@ typedef struct xen_hvm_drain_io xen_hvm_ typedef struct xen_hvm_drain_io xen_hvm_drain_io_t; DEFINE_XEN_GUEST_HANDLE(xen_hvm_drain_io_t); +/* Enter into 16 bit emulation */ +#define HVMOP_emulate_realmode 6 +struct xen_hvm_emulate_realmode { + /* Should be DOMID_SELF */ + domid_t domid; +}; +typedef struct xen_hvm_emulate_realmode xen_hvm_emulate_realmode_t; +DEFINE_XEN_GUEST_HANDLE(xen_hvm_emulate_realmode_t); + #endif /* __XEN_PUBLIC_HVM_HVM_OP_H__ */ _______________________________________________ Xen-devel mailing list Xen-devel@xxxxxxxxxxxxxxxxxxx http://lists.xensource.com/xen-devel
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |