[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH][2/10] Extend the VMX intercept mechanism to include mmio as well as portio.
Extend the VMX intercept mechanism to include mmio as well as portio. Signed-off-by: Yunhong Jiang <yunhong.jiang@xxxxxxxxx> Signed-off-by: Xiaofeng Ling <xiaofeng.ling@xxxxxxxxx> Signed-off-by: Arun Sharma <arun.sharma@xxxxxxxxx> diff -r febfcd0a1a0a -r 9a43d5c12b95 xen/include/asm-x86/vmx_platform.h --- a/xen/include/asm-x86/vmx_platform.h Thu Jun 30 03:20:48 2005 +++ b/xen/include/asm-x86/vmx_platform.h Thu Jun 30 04:08:50 2005 @@ -88,6 +88,7 @@ extern void handle_mmio(unsigned long, unsigned long); extern void vmx_wait_io(void); extern int vmx_setup_platform(struct vcpu *, struct cpu_user_regs *); +extern void vmx_io_assist(struct vcpu *v); // XXX - think about this -- maybe use bit 30 of the mfn to signify an MMIO frame. #define mmio_space(gpa) (!VALID_MFN(phys_to_machine_mapping((gpa) >> PAGE_SHIFT))) diff -r febfcd0a1a0a -r 9a43d5c12b95 xen/arch/x86/vmx.c --- a/xen/arch/x86/vmx.c Thu Jun 30 03:20:48 2005 +++ b/xen/arch/x86/vmx.c Thu Jun 30 04:08:50 2005 @@ -135,17 +135,20 @@ } #endif - if (!vmx_paging_enabled(current)) + if (!vmx_paging_enabled(current)){ handle_mmio(va, va); - + return 1; + } gpte = gva_to_gpte(va); if (!(l1e_get_flags(gpte) & _PAGE_PRESENT) ) return 0; gpa = l1e_get_paddr(gpte) + (va & ~PAGE_MASK); /* Use 1:1 page table to identify MMIO address space */ - if (mmio_space(gpa)) + if (mmio_space(gpa)){ handle_mmio(va, gpa); + return 1; + } result = shadow_fault(va, regs); @@ -451,10 +454,9 @@ p->port_mm = 0; /* Check if the packet needs to be intercepted */ - if (vmx_io_intercept(p)) { + if (vmx_portio_intercept(p)) /* no blocking & no evtchn notification */ return; - } set_bit(ARCH_VMX_IO_WAIT, &d->arch.arch_vmx.flags); p->state = STATE_IOREQ_READY; diff -r febfcd0a1a0a -r 9a43d5c12b95 xen/include/asm-x86/vmx_intercept.h --- a/xen/include/asm-x86/vmx_intercept.h Thu Jun 30 03:20:48 2005 +++ b/xen/include/asm-x86/vmx_intercept.h Thu Jun 30 04:08:50 2005 @@ -1,4 +1,3 @@ - #ifndef _VMX_INTERCEPT_H #define _VMX_INTERCEPT_H @@ -13,18 +12,45 @@ typedef int (*intercept_action_t)(ioreq_t*); +enum {PORTIO, MMIO}; + struct vmx_handler_t { int num_slot; struct { unsigned long addr; + int type; unsigned long offset; intercept_action_t action; } hdl_list[MAX_IO_HANDLER]; }; /* global io interception point in HV */ -extern int vmx_io_intercept(ioreq_t*); -extern int register_io_handler(unsigned long, unsigned long, intercept_action_t); +extern int vmx_io_intercept(ioreq_t *p, int type); +extern int register_io_handler(unsigned long addr, unsigned long offset, + intercept_action_t action, int type); +static inline int vmx_portio_intercept(ioreq_t *p) +{ + return vmx_io_intercept(p, PORTIO); +} + +static inline int vmx_mmio_intercept(ioreq_t *p) +{ + return vmx_io_intercept(p, MMIO); +} + +static inline int register_portio_handler(unsigned long addr, + unsigned long offset, + intercept_action_t action) +{ + return register_io_handler(addr, offset, action, PORTIO); +} + +static inline int register_mmio_handler(unsigned long addr, + unsigned long offset, + intercept_action_t action) +{ + return register_io_handler(addr, offset, action, MMIO); +} #endif /* _VMX_INTERCEPT_H */ diff -r febfcd0a1a0a -r 9a43d5c12b95 xen/arch/x86/vmx_platform.c --- a/xen/arch/x86/vmx_platform.c Thu Jun 30 03:20:48 2005 +++ b/xen/arch/x86/vmx_platform.c Thu Jun 30 04:08:50 2005 @@ -606,12 +606,11 @@ if ((pvalid) && vmx_paging_enabled(current)) p->u.pdata = (void *) gva_to_gpa(p->u.data); -#if 0 - printf("send_mmio_req: eip 0x%lx:0x%lx, dir %d, pdata_valid %d, ", - inst_decoder_regs->cs, inst_decoder_regs->eip, p->dir, p->pdata_valid); - printf("port_mm %d, size %lld, addr 0x%llx, value 0x%lx, count %lld\n", - p->port_mm, p->size, p->addr, value, p->count); -#endif + if (vmx_mmio_intercept(p)){ + p->state = STATE_IORESP_READY; + vmx_io_assist(d); + return; + } evtchn_send(iopacket_port(d->domain)); vmx_wait_io(); @@ -709,6 +708,7 @@ // Send the request and waiting for return value. mpci_p->mmio_target = mmio_inst.operand[1]; send_mmio_req(gpa, &mmio_inst, value, IOREQ_READ, 0); + return; } else { // Write to MMIO if (mmio_inst.operand[0] & IMMEDIATE) { @@ -728,6 +728,7 @@ if (!strncmp((char *)mmio_inst.i_name, "stos", 4)) { send_mmio_req(gpa, &mmio_inst, inst_decoder_regs->eax, IOREQ_WRITE, 0); + return; } domain_crash_synchronous(); diff -r febfcd0a1a0a -r 9a43d5c12b95 xen/arch/x86/vmx_intercept.c --- a/xen/arch/x86/vmx_intercept.c Thu Jun 30 03:20:48 2005 +++ b/xen/arch/x86/vmx_intercept.c Thu Jun 30 04:08:50 2005 @@ -31,14 +31,17 @@ #ifdef CONFIG_VMX -/* for intercepting io request after vm_exit, return value: 0--not handle; 1--handled */ -int vmx_io_intercept(ioreq_t *p) +/* Check if the request is handled inside xen + return value: 0 --not handled; 1 --handled */ +int vmx_io_intercept(ioreq_t *p, int type) { struct vcpu *d = current; struct vmx_handler_t *handler = &(d->domain->arch.vmx_platform.vmx_handler); int i; unsigned long addr, offset; for (i = 0; i < handler->num_slot; i++) { + if( type != handler->hdl_list[i].type) + continue; addr = handler->hdl_list[i].addr; offset = handler->hdl_list[i].offset; if (p->addr >= addr && @@ -48,7 +51,8 @@ return 0; } -int register_io_handler(unsigned long addr, unsigned long offset, intercept_action_t action) +int register_io_handler(unsigned long addr, unsigned long offset, + intercept_action_t action, int type) { struct vcpu *d = current; struct vmx_handler_t *handler = &(d->domain->arch.vmx_platform.vmx_handler); @@ -62,6 +66,7 @@ handler->hdl_list[num].addr = addr; handler->hdl_list[num].offset = offset; handler->hdl_list[num].action = action; + handler->hdl_list[num].type = type; handler->num_slot++; return 1; @@ -262,7 +267,7 @@ p->state = STATE_IORESP_READY; /* register handler to intercept the PIT io when vm_exit */ - register_io_handler(0x40, 4, intercept_pit_io); + register_portio_handler(0x40, 4, intercept_pit_io); } } _______________________________________________ Xen-devel mailing list Xen-devel@xxxxxxxxxxxxxxxxxxx http://lists.xensource.com/xen-devel
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |