[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH v5 12/16] x86/hvm: remove HVMIO_dispatched I/O state
By removing the HVMIO_dispatched state and making all pending emulations (i.e. all those not handled by the hypervisor) use HVMIO_awating_completion, various code-paths can be simplified. The completion case for HVMIO_dispatched can also be trivally removed from hvmemul_do_io() as it was already unreachable. This is because that state was only ever used for writes or I/O to/from a guest page and hvmemul_do_io() is never called to complete such I/O. NOTE: There is one sublety in handle_pio()... The only case when handle_pio() got a return code of X86EMUL_RETRY back from hvmemul_do_pio_buffer() and found io_state was not HVMIO_awaiting_completion was in the case where the domain is shutting down. This is because all writes normally yield a return of HVMEMUL_OKAY and all reads put io_state into HVMIO_awaiting_completion. Hence the io_state check there is replaced with a check of the is_shutting_down flag on the domain. Signed-off-by: Paul Durrant <paul.durrant@xxxxxxxxxx> Cc: Keir Fraser <keir@xxxxxxx> Cc: Jan Beulich <jbeulich@xxxxxxxx> Cc: Andrew Cooper <andrew.cooper3@xxxxxxxxxx> --- xen/arch/x86/hvm/emulate.c | 12 +++--------- xen/arch/x86/hvm/hvm.c | 12 +++--------- xen/arch/x86/hvm/io.c | 14 +++++++------- xen/arch/x86/hvm/vmx/realmode.c | 2 +- xen/include/asm-x86/hvm/vcpu.h | 10 +++++++++- 5 files changed, 23 insertions(+), 27 deletions(-) diff --git a/xen/arch/x86/hvm/emulate.c b/xen/arch/x86/hvm/emulate.c index e17e6ab..a6c7f66 100644 --- a/xen/arch/x86/hvm/emulate.c +++ b/xen/arch/x86/hvm/emulate.c @@ -137,20 +137,14 @@ static int hvmemul_do_io( if ( data_is_addr || dir == IOREQ_WRITE ) return X86EMUL_UNHANDLEABLE; goto finish_access; - case HVMIO_dispatched: - /* May have to wait for previous cycle of a multi-write to complete. */ - if ( is_mmio && !data_is_addr && (dir == IOREQ_WRITE) && - (addr == (vio->mmio_large_write_pa + - vio->mmio_large_write_bytes)) ) - return X86EMUL_RETRY; - /* fallthrough */ default: return X86EMUL_UNHANDLEABLE; } - vio->io_state = (data_is_addr || dir == IOREQ_WRITE) ? - HVMIO_dispatched : HVMIO_awaiting_completion; + vio->io_state = HVMIO_awaiting_completion; vio->io_size = size; + vio->io_dir = dir; + vio->io_data_is_addr = data_is_addr; if ( dir == IOREQ_WRITE ) { diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c index 0f0730e..b94a9b6 100644 --- a/xen/arch/x86/hvm/hvm.c +++ b/xen/arch/x86/hvm/hvm.c @@ -416,22 +416,16 @@ static void hvm_io_assist(ioreq_t *p) { struct vcpu *curr = current; struct hvm_vcpu_io *vio = &curr->arch.hvm_vcpu.hvm_io; - enum hvm_io_state io_state; p->state = STATE_IOREQ_NONE; - io_state = vio->io_state; - vio->io_state = HVMIO_none; - - switch ( io_state ) + if ( hvm_vcpu_io_need_completion(vio) ) { - case HVMIO_awaiting_completion: vio->io_state = HVMIO_completed; vio->io_data = p->data; - break; - default: - break; } + else + vio->io_state = HVMIO_none; msix_write_completion(curr); vcpu_end_shutdown_deferral(curr); diff --git a/xen/arch/x86/hvm/io.c b/xen/arch/x86/hvm/io.c index 221d05e..3b51d59 100644 --- a/xen/arch/x86/hvm/io.c +++ b/xen/arch/x86/hvm/io.c @@ -90,9 +90,7 @@ int handle_mmio(void) rc = hvm_emulate_one(&ctxt); - if ( rc != X86EMUL_RETRY ) - vio->io_state = HVMIO_none; - if ( vio->io_state == HVMIO_awaiting_completion || vio->mmio_retry ) + if ( hvm_vcpu_io_need_completion(vio) || vio->mmio_retry ) vio->io_completion = HVMIO_mmio_completion; else vio->mmio_access = (struct npfec){}; @@ -142,6 +140,9 @@ int handle_pio(uint16_t port, unsigned int size, int dir) rc = hvmemul_do_pio_buffer(port, size, dir, &data); + if ( hvm_vcpu_io_need_completion(vio) ) + vio->io_completion = HVMIO_pio_completion; + switch ( rc ) { case X86EMUL_OKAY: @@ -154,11 +155,10 @@ int handle_pio(uint16_t port, unsigned int size, int dir) } break; case X86EMUL_RETRY: - if ( vio->io_state != HVMIO_awaiting_completion ) + /* We should not advance RIP/EIP if the domain is shutting down */ + if ( curr->domain->is_shutting_down ) return 0; - /* Completion in hvm_io_assist() with no re-emulation required. */ - ASSERT(dir == IOREQ_READ); - vio->io_completion = HVMIO_pio_completion; + break; default: gdprintk(XENLOG_ERR, "Weird HVM ioemulation status %d.\n", rc); diff --git a/xen/arch/x86/hvm/vmx/realmode.c b/xen/arch/x86/hvm/vmx/realmode.c index 76ff9a5..deb53ae 100644 --- a/xen/arch/x86/hvm/vmx/realmode.c +++ b/xen/arch/x86/hvm/vmx/realmode.c @@ -111,7 +111,7 @@ void vmx_realmode_emulate_one(struct hvm_emulate_ctxt *hvmemul_ctxt) rc = hvm_emulate_one(hvmemul_ctxt); - if ( vio->io_state == HVMIO_awaiting_completion || vio->mmio_retry ) + if ( hvm_vcpu_io_need_completion(vio) || vio->mmio_retry ) vio->io_completion = HVMIO_realmode_completion; if ( rc == X86EMUL_UNHANDLEABLE ) diff --git a/xen/include/asm-x86/hvm/vcpu.h b/xen/include/asm-x86/hvm/vcpu.h index efb373f..13e7eb9 100644 --- a/xen/include/asm-x86/hvm/vcpu.h +++ b/xen/include/asm-x86/hvm/vcpu.h @@ -32,7 +32,6 @@ enum hvm_io_state { HVMIO_none = 0, - HVMIO_dispatched, HVMIO_awaiting_completion, HVMIO_completed }; @@ -55,6 +54,8 @@ struct hvm_vcpu_io { unsigned long io_data; unsigned int io_size; enum hvm_io_completion io_completion; + uint8_t io_dir; + uint8_t io_data_is_addr; /* * HVM emulation: @@ -87,6 +88,13 @@ struct hvm_vcpu_io { const struct g2m_ioport *g2m_ioport; }; +static inline bool_t hvm_vcpu_io_need_completion(const struct hvm_vcpu_io *vio) +{ + return (vio->io_state == HVMIO_awaiting_completion) && + !vio->io_data_is_addr && + (vio->io_dir == IOREQ_READ); +} + #define VMCX_EADDR (~0ULL) struct nestedvcpu { -- 1.7.10.4 _______________________________________________ Xen-devel mailing list Xen-devel@xxxxxxxxxxxxx http://lists.xen.org/xen-devel
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |