[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-changelog] [xen-unstable] [HVM] Clean up IOREQ state managemnet and evtchn notifications.
# HG changeset patch # User kfraser@xxxxxxxxxxxxxxxxxxxxx # Node ID 6c975e64271968baf94672e2d5df2385299ca47d # Parent 7968f02ede38c965f68d649698637df7441fb003 [HVM] Clean up IOREQ state managemnet and evtchn notifications. Based on a patch from Xin Li <xin.b.li@xxxxxxxxx>. Changed significantly on the Xen side -- not as cut down as the original patch: this one keeps the xen_event_channel 'API' unchanged. Signed-off-by: Keir Fraser <keir@xxxxxxxxxxxxx> --- tools/ioemu/target-i386-dm/cpu.h | 2 - tools/ioemu/target-i386-dm/helper2.c | 13 ++------- xen/arch/ia64/vmx/vmx_support.c | 4 +- xen/arch/x86/hvm/hvm.c | 48 ++++++++++++++++++----------------- xen/arch/x86/hvm/io.c | 34 +++++++++++------------- xen/arch/x86/hvm/platform.c | 6 ++-- xen/include/public/hvm/ioreq.h | 2 - 7 files changed, 51 insertions(+), 58 deletions(-) diff -r 7968f02ede38 -r 6c975e642719 tools/ioemu/target-i386-dm/cpu.h --- a/tools/ioemu/target-i386-dm/cpu.h Thu Nov 09 16:50:44 2006 +0000 +++ b/tools/ioemu/target-i386-dm/cpu.h Thu Nov 09 17:23:58 2006 +0000 @@ -55,8 +55,6 @@ typedef struct CPUX86State { int interrupt_request; CPU_COMMON - - int send_event; } CPUX86State; CPUX86State *cpu_x86_init(void); diff -r 7968f02ede38 -r 6c975e642719 tools/ioemu/target-i386-dm/helper2.c --- a/tools/ioemu/target-i386-dm/helper2.c Thu Nov 09 16:50:44 2006 +0000 +++ b/tools/ioemu/target-i386-dm/helper2.c Thu Nov 09 17:23:58 2006 +0000 @@ -506,10 +506,10 @@ void cpu_handle_ioreq(void *opaque) /* No state change if state = STATE_IORESP_HOOK */ if (req->state == STATE_IOREQ_INPROCESS) { - mb(); req->state = STATE_IORESP_READY; - } - env->send_event = 1; + xc_evtchn_notify(xce_handle, ioreq_local_port[send_vcpu]); + } else + destroy_hvm_domain(); } } @@ -525,8 +525,6 @@ int main_loop(void) qemu_mod_timer(buffered_io_timer, qemu_get_clock(rt_clock)); qemu_set_fd_handler(evtchn_fd, cpu_handle_ioreq, NULL, env); - - env->send_event = 0; while (1) { if (vm_running) { @@ -540,11 +538,6 @@ int main_loop(void) /* Wait up to 10 msec. */ main_loop_wait(10); - - if (env->send_event) { - env->send_event = 0; - xc_evtchn_notify(xce_handle, ioreq_local_port[send_vcpu]); - } } destroy_hvm_domain(); return 0; diff -r 7968f02ede38 -r 6c975e642719 xen/arch/ia64/vmx/vmx_support.c --- a/xen/arch/ia64/vmx/vmx_support.c Thu Nov 09 16:50:44 2006 +0000 +++ b/xen/arch/ia64/vmx/vmx_support.c Thu Nov 09 17:23:58 2006 +0000 @@ -49,7 +49,7 @@ void vmx_io_assist(struct vcpu *v) p = &vio->vp_ioreq; if (p->state == STATE_IORESP_READY) { - p->state = STATE_INVALID; + p->state = STATE_IOREQ_NONE; } else { /* Can't block here, for the same reason as other places to @@ -65,7 +65,7 @@ void vmx_send_assist_req(struct vcpu *v) ioreq_t *p; p = &get_vio(v->domain, v->vcpu_id)->vp_ioreq; - if (unlikely(p->state != STATE_INVALID)) { + if (unlikely(p->state != STATE_IOREQ_NONE)) { /* This indicates a bug in the device model. Crash the domain. */ printk("Device model set bad IO state %d.\n", p->state); diff -r 7968f02ede38 -r 6c975e642719 xen/arch/x86/hvm/hvm.c --- a/xen/arch/x86/hvm/hvm.c Thu Nov 09 16:50:44 2006 +0000 +++ b/xen/arch/x86/hvm/hvm.c Thu Nov 09 17:23:58 2006 +0000 @@ -60,10 +60,8 @@ void hvm_stts(struct vcpu *v) void hvm_stts(struct vcpu *v) { /* FPU state already dirty? Then no need to setup_fpu() lazily. */ - if ( test_bit(_VCPUF_fpu_dirtied, &v->vcpu_flags) ) - return; - - hvm_funcs.stts(v); + if ( !test_bit(_VCPUF_fpu_dirtied, &v->vcpu_flags) ) + hvm_funcs.stts(v); } void hvm_set_guest_time(struct vcpu *v, u64 gtime) @@ -79,34 +77,40 @@ void hvm_do_resume(struct vcpu *v) void hvm_do_resume(struct vcpu *v) { ioreq_t *p; - struct periodic_time *pt = - &v->domain->arch.hvm_domain.pl_time.periodic_tm; + struct periodic_time *pt = &v->domain->arch.hvm_domain.pl_time.periodic_tm; hvm_stts(v); - /* pick up the elapsed PIT ticks and re-enable pit_timer */ - if ( pt->enabled && v->vcpu_id == pt->bind_vcpu && pt->first_injected ) { - if ( v->arch.hvm_vcpu.guest_time ) { + /* Pick up the elapsed PIT ticks and re-enable pit_timer. */ + if ( pt->enabled && (v->vcpu_id == pt->bind_vcpu) && pt->first_injected ) + { + if ( v->arch.hvm_vcpu.guest_time ) + { hvm_set_guest_time(v, v->arch.hvm_vcpu.guest_time); v->arch.hvm_vcpu.guest_time = 0; } pickup_deactive_ticks(pt); } + /* NB. Optimised for common case (p->state == STATE_IOREQ_NONE). */ p = &get_vio(v->domain, v->vcpu_id)->vp_ioreq; - wait_on_xen_event_channel(v->arch.hvm_vcpu.xen_port, - p->state != STATE_IOREQ_READY && - p->state != STATE_IOREQ_INPROCESS); - switch ( p->state ) - { - case STATE_IORESP_READY: - hvm_io_assist(v); - break; - case STATE_INVALID: - break; - default: - printk("Weird HVM iorequest state %d.\n", p->state); - domain_crash(v->domain); + while ( p->state != STATE_IOREQ_NONE ) + { + switch ( p->state ) + { + case STATE_IORESP_READY: /* IORESP_READY -> NONE */ + hvm_io_assist(v); + break; + case STATE_IOREQ_READY: /* IOREQ_{READY,INPROCESS} -> IORESP_READY */ + case STATE_IOREQ_INPROCESS: + wait_on_xen_event_channel(v->arch.hvm_vcpu.xen_port, + (p->state != STATE_IOREQ_READY) && + (p->state != STATE_IOREQ_INPROCESS)); + break; + default: + gdprintk(XENLOG_ERR, "Weird HVM iorequest state %d.\n", p->state); + domain_crash_synchronous(); + } } } diff -r 7968f02ede38 -r 6c975e642719 xen/arch/x86/hvm/io.c --- a/xen/arch/x86/hvm/io.c Thu Nov 09 16:50:44 2006 +0000 +++ b/xen/arch/x86/hvm/io.c Thu Nov 09 17:23:58 2006 +0000 @@ -736,27 +736,25 @@ void hvm_io_assist(struct vcpu *v) io_opp = &v->arch.hvm_vcpu.io_op; regs = &io_opp->io_context; - - vio = get_vio(v->domain, v->vcpu_id); - - if ( vio == 0 ) { - printk("bad shared page: %lx\n", (unsigned long)vio); + vio = get_vio(v->domain, v->vcpu_id); + + p = &vio->vp_ioreq; + if ( p->state != STATE_IORESP_READY ) + { + gdprintk(XENLOG_ERR, "Unexpected HVM iorequest state %d.\n", p->state); domain_crash_synchronous(); } - p = &vio->vp_ioreq; - - if ( p->state == STATE_IORESP_READY ) { - p->state = STATE_INVALID; - if ( p->type == IOREQ_TYPE_PIO ) - hvm_pio_assist(regs, p, io_opp); - else - hvm_mmio_assist(regs, p, io_opp); - - /* Copy register changes back into current guest state. */ - hvm_load_cpu_guest_regs(v, regs); - memcpy(guest_cpu_user_regs(), regs, HVM_CONTEXT_STACK_BYTES); - } + p->state = STATE_IOREQ_NONE; + + if ( p->type == IOREQ_TYPE_PIO ) + hvm_pio_assist(regs, p, io_opp); + else + hvm_mmio_assist(regs, p, io_opp); + + /* Copy register changes back into current guest state. */ + hvm_load_cpu_guest_regs(v, regs); + memcpy(guest_cpu_user_regs(), regs, HVM_CONTEXT_STACK_BYTES); } /* diff -r 7968f02ede38 -r 6c975e642719 xen/arch/x86/hvm/platform.c --- a/xen/arch/x86/hvm/platform.c Thu Nov 09 16:50:44 2006 +0000 +++ b/xen/arch/x86/hvm/platform.c Thu Nov 09 17:23:58 2006 +0000 @@ -727,7 +727,7 @@ static void hvm_send_assist_req(struct v ioreq_t *p; p = &get_vio(v->domain, v->vcpu_id)->vp_ioreq; - if ( unlikely(p->state != STATE_INVALID) ) { + if ( unlikely(p->state != STATE_IOREQ_NONE) ) { /* This indicates a bug in the device model. Crash the domain. */ printk("Device model set bad IO state %d.\n", p->state); @@ -760,7 +760,7 @@ void send_pio_req(unsigned long port, un } p = &vio->vp_ioreq; - if ( p->state != STATE_INVALID ) + if ( p->state != STATE_IOREQ_NONE ) printk("WARNING: send pio with something already pending (%d)?\n", p->state); @@ -815,7 +815,7 @@ static void send_mmio_req(unsigned char p = &vio->vp_ioreq; - if ( p->state != STATE_INVALID ) + if ( p->state != STATE_IOREQ_NONE ) printk("WARNING: send mmio with something already pending (%d)?\n", p->state); p->dir = dir; diff -r 7968f02ede38 -r 6c975e642719 xen/include/public/hvm/ioreq.h --- a/xen/include/public/hvm/ioreq.h Thu Nov 09 16:50:44 2006 +0000 +++ b/xen/include/public/hvm/ioreq.h Thu Nov 09 17:23:58 2006 +0000 @@ -27,7 +27,7 @@ #define IOREQ_READ 1 #define IOREQ_WRITE 0 -#define STATE_INVALID 0 +#define STATE_IOREQ_NONE 0 #define STATE_IOREQ_READY 1 #define STATE_IOREQ_INPROCESS 2 #define STATE_IORESP_READY 3 _______________________________________________ Xen-changelog mailing list Xen-changelog@xxxxxxxxxxxxxxxxxxx http://lists.xensource.com/xen-changelog
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |