[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-changelog] [xen-unstable] x86 hvm: More emulation simplifications.
# HG changeset patch # User Keir Fraser <keir.fraser@xxxxxxxxxx> # Date 1203677547 0 # Node ID 8338290757c56c507d6e705e5dee3510fccdc484 # Parent b21b434b3b1a4630c3d5881c649bbed154f7a815 x86 hvm: More emulation simplifications. Signed-off-by: Keir Fraser <keir.fraser@xxxxxxxxxx> --- xen/arch/x86/hvm/platform.c | 273 -------------------------------- xen/arch/x86/hvm/Makefile | 1 xen/arch/x86/hvm/emulate.c | 29 --- xen/arch/x86/hvm/hvm.c | 35 +++- xen/arch/x86/hvm/intercept.c | 78 --------- xen/arch/x86/hvm/io.c | 341 ++++++++++++++++++++++++++++------------ xen/arch/x86/hvm/svm/emulate.c | 13 + xen/arch/x86/hvm/svm/svm.c | 2 xen/arch/x86/hvm/vmx/realmode.c | 3 xen/include/asm-x86/hvm/io.h | 56 ------ xen/include/asm-x86/hvm/vcpu.h | 5 11 files changed, 297 insertions(+), 539 deletions(-) diff -r b21b434b3b1a -r 8338290757c5 xen/arch/x86/hvm/Makefile --- a/xen/arch/x86/hvm/Makefile Fri Feb 22 10:07:35 2008 +0000 +++ b/xen/arch/x86/hvm/Makefile Fri Feb 22 10:52:27 2008 +0000 @@ -9,7 +9,6 @@ obj-y += iommu.o obj-y += iommu.o obj-y += irq.o obj-y += mtrr.o -obj-y += platform.o obj-y += pmtimer.o obj-y += rtc.o obj-y += hpet.o diff -r b21b434b3b1a -r 8338290757c5 xen/arch/x86/hvm/emulate.c --- a/xen/arch/x86/hvm/emulate.c Fri Feb 22 10:07:35 2008 +0000 +++ b/xen/arch/x86/hvm/emulate.c Fri Feb 22 10:52:27 2008 +0000 @@ -3,7 +3,7 @@ * * HVM instruction emulation. Used for MMIO and VMX real mode. * - * Copyright (c) 2008 Citrix Systems, Inc. + * Copyright (c) 2008, Citrix Systems, Inc. * * Authors: * Keir Fraser <keir.fraser@xxxxxxxxxx> @@ -310,18 +310,9 @@ static int hvmemul_rep_ins( if ( curr->arch.hvm_vcpu.io_in_progress ) return X86EMUL_UNHANDLEABLE; - if ( !curr->arch.hvm_vcpu.io_completed ) - { - curr->arch.hvm_vcpu.io_in_progress = 1; - send_pio_req(src_port, *reps, bytes_per_rep, - gpa, IOREQ_READ, - !!(ctxt->regs->eflags & X86_EFLAGS_DF), 1); - } - - if ( !curr->arch.hvm_vcpu.io_completed ) - return X86EMUL_RETRY; - - curr->arch.hvm_vcpu.io_completed = 0; + curr->arch.hvm_vcpu.io_in_progress = 1; + send_pio_req(src_port, *reps, bytes_per_rep, gpa, IOREQ_READ, + !!(ctxt->regs->eflags & X86_EFLAGS_DF), 1); return X86EMUL_OKAY; } @@ -408,18 +399,10 @@ static int hvmemul_rep_movs( (void)gfn_to_mfn_current(sgpa >> PAGE_SHIFT, &p2mt); if ( !p2m_is_ram(p2mt) ) { - if ( !curr->arch.hvm_vcpu.io_completed ) - { - curr->arch.hvm_vcpu.io_in_progress = 1; - send_mmio_req(IOREQ_TYPE_COPY, sgpa, *reps, bytes_per_rep, + curr->arch.hvm_vcpu.io_in_progress = 1; + send_mmio_req(IOREQ_TYPE_COPY, sgpa, *reps, bytes_per_rep, dgpa, IOREQ_READ, !!(ctxt->regs->eflags & X86_EFLAGS_DF), 1); - } - - if ( !curr->arch.hvm_vcpu.io_completed ) - return X86EMUL_RETRY; - - curr->arch.hvm_vcpu.io_completed = 0; } else { diff -r b21b434b3b1a -r 8338290757c5 xen/arch/x86/hvm/hvm.c --- a/xen/arch/x86/hvm/hvm.c Fri Feb 22 10:07:35 2008 +0000 +++ b/xen/arch/x86/hvm/hvm.c Fri Feb 22 10:52:27 2008 +0000 @@ -3,7 +3,8 @@ * * Copyright (c) 2004, Intel Corporation. * Copyright (c) 2005, International Business Machines Corporation. - * + * Copyright (c) 2008, Citrix Systems, Inc. + * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, * version 2, as published by the Free Software Foundation. @@ -1517,6 +1518,38 @@ enum hvm_copy_result hvm_fetch_from_gues return __hvm_copy(buf, vaddr, size, 0, 1, hvm_nx_enabled(current)); } +DEFINE_PER_CPU(int, guest_handles_in_xen_space); + +/* Note that copy_{to,from}_user_hvm require the PTE to be writable even + when they're only trying to read from it. The guest is expected to + deal with this. */ +unsigned long copy_to_user_hvm(void *to, const void *from, unsigned len) +{ + int rc; + + if ( this_cpu(guest_handles_in_xen_space) ) + { + memcpy(to, from, len); + return 0; + } + + rc = hvm_copy_to_guest_virt_nofault((unsigned long)to, (void *)from, len); + return rc ? len : 0; /* fake a copy_to_user() return code */ +} + +unsigned long copy_from_user_hvm(void *to, const void *from, unsigned len) +{ + int rc; + + if ( this_cpu(guest_handles_in_xen_space) ) + { + memcpy(to, from, len); + return 0; + } + + rc = hvm_copy_from_guest_virt_nofault(to, (unsigned long)from, len); + return rc ? len : 0; /* fake a copy_from_user() return code */ +} /* HVM specific printbuf. Mostly used for hvmloader chit-chat. */ void hvm_print_line(struct vcpu *v, const char c) diff -r b21b434b3b1a -r 8338290757c5 xen/arch/x86/hvm/intercept.c --- a/xen/arch/x86/hvm/intercept.c Fri Feb 22 10:07:35 2008 +0000 +++ b/xen/arch/x86/hvm/intercept.c Fri Feb 22 10:52:27 2008 +0000 @@ -2,6 +2,7 @@ * intercept.c: Handle performance critical I/O packets in hypervisor space * * Copyright (c) 2004, Intel Corporation. + * Copyright (c) 2008, Citrix Systems, Inc. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, @@ -93,83 +94,6 @@ static inline void hvm_mmio_access(struc } } -int hvm_buffered_io_send(ioreq_t *p) -{ - struct vcpu *v = current; - struct hvm_ioreq_page *iorp = &v->domain->arch.hvm_domain.buf_ioreq; - buffered_iopage_t *pg = iorp->va; - buf_ioreq_t bp; - /* Timeoffset sends 64b data, but no address. Use two consecutive slots. */ - int qw = 0; - - /* Ensure buffered_iopage fits in a page */ - BUILD_BUG_ON(sizeof(buffered_iopage_t) > PAGE_SIZE); - - /* - * Return 0 for the cases we can't deal with: - * - 'addr' is only a 20-bit field, so we cannot address beyond 1MB - * - we cannot buffer accesses to guest memory buffers, as the guest - * may expect the memory buffer to be synchronously accessed - * - the count field is usually used with data_is_ptr and since we don't - * support data_is_ptr we do not waste space for the count field either - */ - if ( (p->addr > 0xffffful) || p->data_is_ptr || (p->count != 1) ) - return 0; - - bp.type = p->type; - bp.dir = p->dir; - switch ( p->size ) - { - case 1: - bp.size = 0; - break; - case 2: - bp.size = 1; - break; - case 4: - bp.size = 2; - break; - case 8: - bp.size = 3; - qw = 1; - break; - default: - gdprintk(XENLOG_WARNING, "unexpected ioreq size:%"PRId64"\n", p->size); - return 0; - } - - bp.data = p->data; - bp.addr = p->addr; - - spin_lock(&iorp->lock); - - if ( (pg->write_pointer - pg->read_pointer) >= - (IOREQ_BUFFER_SLOT_NUM - qw) ) - { - /* The queue is full: send the iopacket through the normal path. */ - spin_unlock(&iorp->lock); - return 0; - } - - memcpy(&pg->buf_ioreq[pg->write_pointer % IOREQ_BUFFER_SLOT_NUM], - &bp, sizeof(bp)); - - if ( qw ) - { - bp.data = p->data >> 32; - memcpy(&pg->buf_ioreq[(pg->write_pointer+1) % IOREQ_BUFFER_SLOT_NUM], - &bp, sizeof(bp)); - } - - /* Make the ioreq_t visible /before/ write_pointer. */ - wmb(); - pg->write_pointer += qw ? 2 : 1; - - spin_unlock(&iorp->lock); - - return 1; -} - int hvm_mmio_intercept(ioreq_t *p) { struct vcpu *v = current; diff -r b21b434b3b1a -r 8338290757c5 xen/arch/x86/hvm/io.c --- a/xen/arch/x86/hvm/io.c Fri Feb 22 10:07:35 2008 +0000 +++ b/xen/arch/x86/hvm/io.c Fri Feb 22 10:52:27 2008 +0000 @@ -3,6 +3,7 @@ * * Copyright (c) 2004, Intel Corporation. * Copyright (c) 2005, International Business Machines Corporation. + * Copyright (c) 2008, Citrix Systems, Inc. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, @@ -25,7 +26,6 @@ #include <xen/errno.h> #include <xen/trace.h> #include <xen/event.h> - #include <xen/hypercall.h> #include <asm/current.h> #include <asm/cpufeature.h> @@ -41,79 +41,246 @@ #include <asm/hvm/vpic.h> #include <asm/hvm/vlapic.h> #include <asm/hvm/trace.h> - +#include <asm/hvm/emulate.h> #include <public/sched.h> #include <xen/iocap.h> #include <public/hvm/ioreq.h> -static void hvm_pio_assist( - struct cpu_user_regs *regs, ioreq_t *p, struct hvm_io_op *pio_opp) -{ - if ( p->data_is_ptr || (pio_opp->flags & OVERLAP) ) - { - int sign = p->df ? -1 : 1; - - if ( pio_opp->flags & REPZ ) - regs->ecx -= p->count; - - if ( p->dir == IOREQ_READ ) - { - if ( pio_opp->flags & OVERLAP ) - { - unsigned long addr = pio_opp->addr; - if ( hvm_paging_enabled(current) ) - { - int rv = hvm_copy_to_guest_virt(addr, &p->data, p->size); - if ( rv == HVMCOPY_bad_gva_to_gfn ) - return; /* exception already injected */ - } - else - (void)hvm_copy_to_guest_phys(addr, &p->data, p->size); - } - regs->edi += sign * p->count * p->size; - } - else /* p->dir == IOREQ_WRITE */ - { - ASSERT(p->dir == IOREQ_WRITE); - regs->esi += sign * p->count * p->size; - } - } - else if ( p->dir == IOREQ_READ ) - { - unsigned long old_eax = regs->eax; - - switch ( p->size ) - { - case 1: - regs->eax = (old_eax & ~0xff) | (p->data & 0xff); - break; - case 2: - regs->eax = (old_eax & ~0xffff) | (p->data & 0xffff); - break; - case 4: - regs->eax = (p->data & 0xffffffff); - break; - default: - printk("Error: %s unknown port size\n", __FUNCTION__); - domain_crash_synchronous(); - } - HVMTRACE_1D(IO_ASSIST, current, p->data); - } -} - -void hvm_io_assist(void) -{ +int hvm_buffered_io_send(ioreq_t *p) +{ + struct vcpu *v = current; + struct hvm_ioreq_page *iorp = &v->domain->arch.hvm_domain.buf_ioreq; + buffered_iopage_t *pg = iorp->va; + buf_ioreq_t bp; + /* Timeoffset sends 64b data, but no address. Use two consecutive slots. */ + int qw = 0; + + /* Ensure buffered_iopage fits in a page */ + BUILD_BUG_ON(sizeof(buffered_iopage_t) > PAGE_SIZE); + + /* + * Return 0 for the cases we can't deal with: + * - 'addr' is only a 20-bit field, so we cannot address beyond 1MB + * - we cannot buffer accesses to guest memory buffers, as the guest + * may expect the memory buffer to be synchronously accessed + * - the count field is usually used with data_is_ptr and since we don't + * support data_is_ptr we do not waste space for the count field either + */ + if ( (p->addr > 0xffffful) || p->data_is_ptr || (p->count != 1) ) + return 0; + + bp.type = p->type; + bp.dir = p->dir; + switch ( p->size ) + { + case 1: + bp.size = 0; + break; + case 2: + bp.size = 1; + break; + case 4: + bp.size = 2; + break; + case 8: + bp.size = 3; + qw = 1; + break; + default: + gdprintk(XENLOG_WARNING, "unexpected ioreq size:%"PRId64"\n", p->size); + return 0; + } + + bp.data = p->data; + bp.addr = p->addr; + + spin_lock(&iorp->lock); + + if ( (pg->write_pointer - pg->read_pointer) >= + (IOREQ_BUFFER_SLOT_NUM - qw) ) + { + /* The queue is full: send the iopacket through the normal path. */ + spin_unlock(&iorp->lock); + return 0; + } + + memcpy(&pg->buf_ioreq[pg->write_pointer % IOREQ_BUFFER_SLOT_NUM], + &bp, sizeof(bp)); + + if ( qw ) + { + bp.data = p->data >> 32; + memcpy(&pg->buf_ioreq[(pg->write_pointer+1) % IOREQ_BUFFER_SLOT_NUM], + &bp, sizeof(bp)); + } + + /* Make the ioreq_t visible /before/ write_pointer. */ + wmb(); + pg->write_pointer += qw ? 2 : 1; + + spin_unlock(&iorp->lock); + + return 1; +} + +void send_pio_req(unsigned long port, unsigned long count, int size, + paddr_t value, int dir, int df, int value_is_ptr) +{ + struct vcpu *v = current; + vcpu_iodata_t *vio = get_ioreq(v); + ioreq_t *p = &vio->vp_ioreq; + + if ( p->state != STATE_IOREQ_NONE ) + gdprintk(XENLOG_WARNING, + "WARNING: send pio with something already pending (%d)?\n", + p->state); + + p->dir = dir; + p->data_is_ptr = value_is_ptr; + p->type = IOREQ_TYPE_PIO; + p->size = size; + p->addr = port; + p->count = count; + p->df = df; + p->data = value; + p->io_count++; + + if ( hvm_portio_intercept(p) ) + { + p->state = STATE_IORESP_READY; + hvm_io_assist(); + } + else + { + hvm_send_assist_req(v); + } +} + +void send_mmio_req(unsigned char type, paddr_t gpa, + unsigned long count, int size, paddr_t value, + int dir, int df, int value_is_ptr) +{ + struct vcpu *v = current; + vcpu_iodata_t *vio = get_ioreq(v); + ioreq_t *p = &vio->vp_ioreq; + + if ( p->state != STATE_IOREQ_NONE ) + gdprintk(XENLOG_WARNING, + "WARNING: send mmio with something already pending (%d)?\n", + p->state); + + p->dir = dir; + p->data_is_ptr = value_is_ptr; + p->type = type; + p->size = size; + p->addr = gpa; + p->count = count; + p->df = df; + p->data = value; + p->io_count++; + + if ( hvm_mmio_intercept(p) || hvm_buffered_io_intercept(p) ) + { + p->state = STATE_IORESP_READY; + hvm_io_assist(); + } + else + { + hvm_send_assist_req(v); + } +} + +void send_timeoffset_req(unsigned long timeoff) +{ + ioreq_t p[1]; + + if ( timeoff == 0 ) + return; + + memset(p, 0, sizeof(*p)); + + p->type = IOREQ_TYPE_TIMEOFFSET; + p->size = 8; + p->count = 1; + p->dir = IOREQ_WRITE; + p->data = timeoff; + + p->state = STATE_IOREQ_READY; + + if ( !hvm_buffered_io_send(p) ) + printk("Unsuccessful timeoffset update\n"); +} + +/* Ask ioemu mapcache to invalidate mappings. */ +void send_invalidate_req(void) +{ + struct vcpu *v = current; vcpu_iodata_t *vio; ioreq_t *p; - struct cpu_user_regs *regs; - struct hvm_io_op *io_opp; - struct vcpu *v = current; - - io_opp = &v->arch.hvm_vcpu.io_op; - regs = &io_opp->io_context; - vio = get_ioreq(v); + + vio = get_ioreq(v); + if ( vio == NULL ) + { + printk("bad shared page: %lx\n", (unsigned long) vio); + domain_crash_synchronous(); + } p = &vio->vp_ioreq; + if ( p->state != STATE_IOREQ_NONE ) + printk("WARNING: send invalidate req with something " + "already pending (%d)?\n", p->state); + + p->type = IOREQ_TYPE_INVALIDATE; + p->size = 4; + p->dir = IOREQ_WRITE; + p->data = ~0UL; /* flush all */ + p->io_count++; + + hvm_send_assist_req(v); +} + +int handle_mmio(void) +{ + struct hvm_emulate_ctxt ctxt; + struct vcpu *curr = current; + int rc; + + hvm_emulate_prepare(&ctxt, guest_cpu_user_regs()); + + rc = hvm_emulate_one(&ctxt); + + switch ( rc ) + { + case X86EMUL_UNHANDLEABLE: + gdprintk(XENLOG_WARNING, + "MMIO emulation failed @ %04x:%lx: " + "%02x %02x %02x %02x %02x %02x\n", + hvmemul_get_seg_reg(x86_seg_cs, &ctxt)->sel, + ctxt.insn_buf_eip, + ctxt.insn_buf[0], ctxt.insn_buf[1], + ctxt.insn_buf[2], ctxt.insn_buf[3], + ctxt.insn_buf[4], ctxt.insn_buf[5]); + return 0; + case X86EMUL_EXCEPTION: + if ( ctxt.flags.exn_pending ) + hvm_inject_exception(ctxt.exn_vector, 0, 0); + break; + default: + break; + } + + hvm_emulate_writeback(&ctxt); + + curr->arch.hvm_vcpu.mmio_in_progress = curr->arch.hvm_vcpu.io_in_progress; + + return 1; +} + +void hvm_io_assist(void) +{ + struct vcpu *v = current; + ioreq_t *p = &get_ioreq(v)->vp_ioreq; + if ( p->state != STATE_IORESP_READY ) { gdprintk(XENLOG_ERR, "Unexpected HVM iorequest state %d.\n", p->state); @@ -128,34 +295,14 @@ void hvm_io_assist(void) if ( v->arch.hvm_vcpu.io_in_progress ) { v->arch.hvm_vcpu.io_in_progress = 0; - if ( p->dir == IOREQ_READ ) + if ( (p->dir == IOREQ_READ) && !p->data_is_ptr ) { v->arch.hvm_vcpu.io_completed = 1; v->arch.hvm_vcpu.io_data = p->data; - } - if ( v->arch.hvm_vcpu.mmio_in_progress ) - (void)handle_mmio(); - goto out; - } - - switch ( p->type ) - { - case IOREQ_TYPE_INVALIDATE: - goto out; - case IOREQ_TYPE_PIO: - hvm_pio_assist(regs, p, io_opp); - break; - default: - gdprintk(XENLOG_ERR, "Unexpected HVM iorequest state %d.\n", p->state); - domain_crash(v->domain); - goto out; - } - - /* Copy register changes back into current guest state. */ - regs->eflags &= ~X86_EFLAGS_RF; - memcpy(guest_cpu_user_regs(), regs, HVM_CONTEXT_STACK_BYTES); - if ( regs->eflags & X86_EFLAGS_TF ) - hvm_inject_exception(TRAP_debug, HVM_DELIVER_NO_ERROR_CODE, 0); + if ( v->arch.hvm_vcpu.mmio_in_progress ) + (void)handle_mmio(); + } + } out: vcpu_end_shutdown_deferral(v); @@ -173,13 +320,13 @@ void dpci_ioport_read(uint32_t mport, io switch ( p->size ) { - case BYTE: + case 1: z_data = (uint64_t)inb(mport); break; - case WORD: + case 2: z_data = (uint64_t)inw(mport); break; - case LONG: + case 4: z_data = (uint64_t)inl(mport); break; default: @@ -218,13 +365,13 @@ void dpci_ioport_write(uint32_t mport, i switch ( p->size ) { - case BYTE: + case 1: outb((uint8_t) z_data, mport); break; - case WORD: + case 2: outw((uint16_t) z_data, mport); break; - case LONG: + case 4: outl((uint32_t) z_data, mport); break; default: diff -r b21b434b3b1a -r 8338290757c5 xen/arch/x86/hvm/platform.c --- a/xen/arch/x86/hvm/platform.c Fri Feb 22 10:07:35 2008 +0000 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,273 +0,0 @@ -/* - * platform.c: handling x86 platform related MMIO instructions - * - * Copyright (c) 2004, Intel Corporation. - * Copyright (c) 2005, International Business Machines Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License along with - * this program; if not, write to the Free Software Foundation, Inc., 59 Temple - * Place - Suite 330, Boston, MA 02111-1307 USA. - */ - -#include <xen/config.h> -#include <xen/types.h> -#include <xen/mm.h> -#include <xen/domain_page.h> -#include <asm/page.h> -#include <xen/event.h> -#include <xen/trace.h> -#include <xen/sched.h> -#include <asm/regs.h> -#include <asm/x86_emulate.h> -#include <asm/paging.h> -#include <asm/hvm/hvm.h> -#include <asm/hvm/support.h> -#include <asm/hvm/io.h> -#include <public/hvm/ioreq.h> -#include <xen/lib.h> -#include <xen/sched.h> -#include <asm/hvm/emulate.h> - -int inst_copy_from_guest( - unsigned char *buf, unsigned long guest_eip, int inst_len) -{ - if ( inst_len > MAX_INST_LEN || inst_len <= 0 ) - return 0; - if ( hvm_fetch_from_guest_virt_nofault(buf, guest_eip, inst_len) ) - return 0; - return inst_len; -} - -void send_pio_req(unsigned long port, unsigned long count, int size, - paddr_t value, int dir, int df, int value_is_ptr) -{ - struct vcpu *v = current; - vcpu_iodata_t *vio; - ioreq_t *p; - - if ( size == 0 || count == 0 ) { - printk("null pio request? port %lx, count %lx, " - "size %d, value %"PRIpaddr", dir %d, value_is_ptr %d.\n", - port, count, size, value, dir, value_is_ptr); - } - - vio = get_ioreq(v); - if ( vio == NULL ) { - printk("bad shared page: %lx\n", (unsigned long) vio); - domain_crash_synchronous(); - } - - p = &vio->vp_ioreq; - if ( p->state != STATE_IOREQ_NONE ) - printk("WARNING: send pio with something already pending (%d)?\n", - p->state); - - p->dir = dir; - p->data_is_ptr = value_is_ptr; - - p->type = IOREQ_TYPE_PIO; - p->size = size; - p->addr = port; - p->count = count; - p->df = df; - - p->io_count++; - - p->data = value; - - if ( hvm_portio_intercept(p) ) - { - p->state = STATE_IORESP_READY; - hvm_io_assist(); - return; - } - - hvm_send_assist_req(v); -} - -void send_mmio_req(unsigned char type, paddr_t gpa, - unsigned long count, int size, paddr_t value, - int dir, int df, int value_is_ptr) -{ - struct vcpu *v = current; - vcpu_iodata_t *vio; - ioreq_t *p; - - if ( size == 0 || count == 0 ) { - printk("null mmio request? type %d, gpa %"PRIpaddr", " - "count %lx, size %d, value %"PRIpaddr", dir %d, " - "value_is_ptr %d.\n", - type, gpa, count, size, value, dir, value_is_ptr); - } - - vio = get_ioreq(v); - if (vio == NULL) { - printk("bad shared page\n"); - domain_crash_synchronous(); - } - - p = &vio->vp_ioreq; - - if ( p->state != STATE_IOREQ_NONE ) - printk("WARNING: send mmio with something already pending (%d)?\n", - p->state); - p->dir = dir; - p->data_is_ptr = value_is_ptr; - - p->type = type; - p->size = size; - p->addr = gpa; - p->count = count; - p->df = df; - - p->io_count++; - - p->data = value; - - if ( hvm_mmio_intercept(p) || hvm_buffered_io_intercept(p) ) - { - p->state = STATE_IORESP_READY; - hvm_io_assist(); - return; - } - - hvm_send_assist_req(v); -} - -void send_timeoffset_req(unsigned long timeoff) -{ - ioreq_t p[1]; - - if ( timeoff == 0 ) - return; - - memset(p, 0, sizeof(*p)); - - p->type = IOREQ_TYPE_TIMEOFFSET; - p->size = 8; - p->count = 1; - p->dir = IOREQ_WRITE; - p->data = timeoff; - - p->state = STATE_IOREQ_READY; - - if ( !hvm_buffered_io_send(p) ) - printk("Unsuccessful timeoffset update\n"); -} - -/* Ask ioemu mapcache to invalidate mappings. */ -void send_invalidate_req(void) -{ - struct vcpu *v = current; - vcpu_iodata_t *vio; - ioreq_t *p; - - vio = get_ioreq(v); - if ( vio == NULL ) - { - printk("bad shared page: %lx\n", (unsigned long) vio); - domain_crash_synchronous(); - } - - p = &vio->vp_ioreq; - if ( p->state != STATE_IOREQ_NONE ) - printk("WARNING: send invalidate req with something " - "already pending (%d)?\n", p->state); - - p->type = IOREQ_TYPE_INVALIDATE; - p->size = 4; - p->dir = IOREQ_WRITE; - p->data = ~0UL; /* flush all */ - p->io_count++; - - hvm_send_assist_req(v); -} - -int handle_mmio(void) -{ - struct hvm_emulate_ctxt ctxt; - struct vcpu *curr = current; - int rc; - - hvm_emulate_prepare(&ctxt, guest_cpu_user_regs()); - - rc = hvm_emulate_one(&ctxt); - - switch ( rc ) - { - case X86EMUL_UNHANDLEABLE: - gdprintk(XENLOG_WARNING, - "MMIO emulation failed @ %04x:%lx: " - "%02x %02x %02x %02x %02x %02x\n", - hvmemul_get_seg_reg(x86_seg_cs, &ctxt)->sel, - ctxt.insn_buf_eip, - ctxt.insn_buf[0], ctxt.insn_buf[1], - ctxt.insn_buf[2], ctxt.insn_buf[3], - ctxt.insn_buf[4], ctxt.insn_buf[5]); - return 0; - case X86EMUL_EXCEPTION: - if ( ctxt.flags.exn_pending ) - hvm_inject_exception(ctxt.exn_vector, 0, 0); - break; - default: - break; - } - - hvm_emulate_writeback(&ctxt); - - curr->arch.hvm_vcpu.mmio_in_progress = curr->arch.hvm_vcpu.io_in_progress; - - return 1; -} - -DEFINE_PER_CPU(int, guest_handles_in_xen_space); - -/* Note that copy_{to,from}_user_hvm require the PTE to be writable even - when they're only trying to read from it. The guest is expected to - deal with this. */ -unsigned long copy_to_user_hvm(void *to, const void *from, unsigned len) -{ - int rc; - - if ( this_cpu(guest_handles_in_xen_space) ) - { - memcpy(to, from, len); - return 0; - } - - rc = hvm_copy_to_guest_virt_nofault((unsigned long)to, (void *)from, len); - return rc ? len : 0; /* fake a copy_to_user() return code */ -} - -unsigned long copy_from_user_hvm(void *to, const void *from, unsigned len) -{ - int rc; - - if ( this_cpu(guest_handles_in_xen_space) ) - { - memcpy(to, from, len); - return 0; - } - - rc = hvm_copy_from_guest_virt_nofault(to, (unsigned long)from, len); - return rc ? len : 0; /* fake a copy_from_user() return code */ -} - -/* - * Local variables: - * mode: C - * c-set-style: "BSD" - * c-basic-offset: 4 - * tab-width: 4 - * indent-tabs-mode: nil - * End: - */ diff -r b21b434b3b1a -r 8338290757c5 xen/arch/x86/hvm/svm/emulate.c --- a/xen/arch/x86/hvm/svm/emulate.c Fri Feb 22 10:07:35 2008 +0000 +++ b/xen/arch/x86/hvm/svm/emulate.c Fri Feb 22 10:52:27 2008 +0000 @@ -27,8 +27,17 @@ #include <asm/hvm/svm/vmcb.h> #include <asm/hvm/svm/emulate.h> -int inst_copy_from_guest( - unsigned char *buf, unsigned long guest_eip, int inst_len); +#define MAX_INST_LEN 15 + +static int inst_copy_from_guest( + unsigned char *buf, unsigned long guest_eip, int inst_len) +{ + if ( (inst_len > MAX_INST_LEN) || (inst_len <= 0) ) + return 0; + if ( hvm_fetch_from_guest_virt_nofault(buf, guest_eip, inst_len) ) + return 0; + return inst_len; +} static unsigned int is_prefix(u8 opc) { diff -r b21b434b3b1a -r 8338290757c5 xen/arch/x86/hvm/svm/svm.c --- a/xen/arch/x86/hvm/svm/svm.c Fri Feb 22 10:07:35 2008 +0000 +++ b/xen/arch/x86/hvm/svm/svm.c Fri Feb 22 10:52:27 2008 +0000 @@ -58,8 +58,6 @@ u32 svm_feature_flags; enum handler_return { HNDL_done, HNDL_unhandled, HNDL_exception_raised }; -int inst_copy_from_guest(unsigned char *buf, unsigned long guest_eip, - int inst_len); asmlinkage void do_IRQ(struct cpu_user_regs *); static void svm_update_guest_cr(struct vcpu *v, unsigned int cr); diff -r b21b434b3b1a -r 8338290757c5 xen/arch/x86/hvm/vmx/realmode.c --- a/xen/arch/x86/hvm/vmx/realmode.c Fri Feb 22 10:07:35 2008 +0000 +++ b/xen/arch/x86/hvm/vmx/realmode.c Fri Feb 22 10:52:27 2008 +0000 @@ -221,8 +221,7 @@ void vmx_realmode(struct cpu_user_regs * hvm_emulate_prepare(&rm_ctxt.hvm, regs); rm_ctxt.intr_shadow = __vmread(GUEST_INTERRUPTIBILITY_INFO); - if ( curr->arch.hvm_vcpu.io_in_progress || - curr->arch.hvm_vcpu.io_completed ) + if ( curr->arch.hvm_vcpu.io_completed ) realmode_emulate_one(&rm_ctxt); /* Only deliver interrupts into emulated real mode. */ diff -r b21b434b3b1a -r 8338290757c5 xen/include/asm-x86/hvm/io.h --- a/xen/include/asm-x86/hvm/io.h Fri Feb 22 10:07:35 2008 +0000 +++ b/xen/include/asm-x86/hvm/io.h Fri Feb 22 10:52:27 2008 +0000 @@ -24,61 +24,6 @@ #include <asm/hvm/vioapic.h> #include <public/hvm/ioreq.h> #include <public/event_channel.h> - -#define operand_size(operand) \ - ((operand >> 24) & 0xFF) - -#define operand_index(operand) \ - ((operand >> 16) & 0xFF) - -/* for instruction.operand[].size */ -#define BYTE 1 -#define WORD 2 -#define LONG 4 -#define QUAD 8 -#define BYTE_64 16 - -/* for instruction.operand[].flag */ -#define REGISTER 0x1 -#define MEMORY 0x2 -#define IMMEDIATE 0x4 - -/* for instruction.flags */ -#define REPZ 0x1 -#define REPNZ 0x2 -#define OVERLAP 0x4 - -/* instruction type */ -#define INSTR_PIO 1 -#define INSTR_OR 2 -#define INSTR_AND 3 -#define INSTR_XOR 4 -#define INSTR_CMP 5 -#define INSTR_MOV 6 -#define INSTR_MOVS 7 -#define INSTR_MOVZX 8 -#define INSTR_MOVSX 9 -#define INSTR_STOS 10 -#define INSTR_LODS 11 -#define INSTR_TEST 12 -#define INSTR_BT 13 -#define INSTR_XCHG 14 -#define INSTR_SUB 15 -#define INSTR_ADD 16 -#define INSTR_PUSH 17 - -#define MAX_INST_LEN 15 /* Maximum instruction length = 15 bytes */ - -struct hvm_io_op { - unsigned int instr; /* instruction */ - unsigned int flags; - unsigned long addr; /* virt addr for overlap PIO/MMIO */ - struct { - unsigned int operand[2]; /* operands */ - unsigned long immediate; /* immediate portion */ - }; - struct cpu_user_regs io_context; /* current context */ -}; #define MAX_IO_HANDLER 12 @@ -119,7 +64,6 @@ struct hvm_mmio_handler { hvm_mmio_write_t write_handler; }; -/* global io interception point in HV */ int hvm_io_intercept(ioreq_t *p, int type); int register_io_handler( struct domain *d, unsigned long addr, unsigned long size, diff -r b21b434b3b1a -r 8338290757c5 xen/include/asm-x86/hvm/vcpu.h --- a/xen/include/asm-x86/hvm/vcpu.h Fri Feb 22 10:07:35 2008 +0000 +++ b/xen/include/asm-x86/hvm/vcpu.h Fri Feb 22 10:52:27 2008 +0000 @@ -42,7 +42,6 @@ struct hvm_vcpu { */ unsigned long hw_cr[5]; - struct hvm_io_op io_op; struct vlapic vlapic; s64 cache_tsc_offset; u64 guest_time; @@ -77,9 +76,5 @@ struct hvm_vcpu { unsigned long io_data; }; -#define ARCH_HVM_IO_WAIT 1 /* Waiting for I/O completion */ - -#define HVM_CONTEXT_STACK_BYTES (offsetof(struct cpu_user_regs, ss)) - #endif /* __ASM_X86_HVM_VCPU_H__ */ _______________________________________________ Xen-changelog mailing list Xen-changelog@xxxxxxxxxxxxxxxxxxx http://lists.xensource.com/xen-changelog
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |