[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH 10/17] x86/hvm: remove HVMIO_dispatched I/O state



By removing the HVMIO_dispatched state and making all pending emulations
(i.e. all those not handled by the hypervisor) use HVMIO_awating_completion,
various code-paths can be simplified.

Signed-off-by: Paul Durrant <paul.durrant@xxxxxxxxxx>
Cc: Keir Fraser <keir@xxxxxxx>
Cc: Jan Beulich <jbeulich@xxxxxxxx>
Cc: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
---
 xen/arch/x86/hvm/emulate.c     |   15 ++-------------
 xen/arch/x86/hvm/hvm.c         |   35 ++++++++++++++---------------------
 xen/arch/x86/hvm/io.c          |   20 +++++++++-----------
 xen/include/asm-x86/hvm/vcpu.h |    2 +-
 4 files changed, 26 insertions(+), 46 deletions(-)

diff --git a/xen/arch/x86/hvm/emulate.c b/xen/arch/x86/hvm/emulate.c
index 18d2401..941a25f 100644
--- a/xen/arch/x86/hvm/emulate.c
+++ b/xen/arch/x86/hvm/emulate.c
@@ -139,23 +139,14 @@ static int hvmemul_do_io(
         break;
     case HVMIO_completed:
         vio->io_state = HVMIO_none;
-        if ( data_is_addr || dir == IOREQ_WRITE )
-            return X86EMUL_UNHANDLEABLE;
         goto finish_access;
-    case HVMIO_dispatched:
-        /* May have to wait for previous cycle of a multi-write to complete. */
-        if ( is_mmio && !data_is_addr && (dir == IOREQ_WRITE) &&
-             (addr == (vio->mmio_large_write_pa +
-                       vio->mmio_large_write_bytes)) )
-            return X86EMUL_RETRY;
-        /* fallthrough */
     default:
         return X86EMUL_UNHANDLEABLE;
     }
 
-    vio->io_state = (data_is_addr || dir == IOREQ_WRITE) ?
-        HVMIO_dispatched : HVMIO_awaiting_completion;
+    vio->io_state = HVMIO_awaiting_completion;
     vio->io_size = size;
+    vio->io_dir = dir;
 
     if ( dir == IOREQ_WRITE )
     {
@@ -191,8 +182,6 @@ static int hvmemul_do_io(
             rc = hvm_send_assist_req(s, &p);
             if ( rc != X86EMUL_RETRY )
                 vio->io_state = HVMIO_none;
-            else if ( data_is_addr || dir == IOREQ_WRITE )
-                rc = X86EMUL_OKAY;
         }
         break;
     }
diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c
index 5fef4e7..c18e1a8 100644
--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -415,41 +415,34 @@ static void hvm_io_assist(ioreq_t *p)
 {
     struct vcpu *curr = current;
     struct hvm_vcpu_io *vio = &curr->arch.hvm_vcpu.hvm_io;
+    enum hvm_io_completion completion = vio->io_completion;
 
     p->state = STATE_IOREQ_NONE;
 
-    switch ( vio->io_state )
-    {
-    case HVMIO_awaiting_completion:
-    {
-        enum hvm_io_completion completion = vio->io_completion;
+    BUG_ON(vio->io_state != HVMIO_awaiting_completion);
 
-        vio->io_state = HVMIO_completed;
-        vio->io_data = p->data;
-        vio->io_completion = HVMIO_no_completion;
+    vio->io_state = HVMIO_completed;
+    vio->io_data = p->data;
+    vio->io_completion = HVMIO_no_completion;
 
-        switch ( completion )
-        {
-        case HVMIO_mmio_completion:
-            (void)handle_mmio();
-            break;
+    switch ( completion )
+    {
+    case HVMIO_mmio_completion:
+        (void)handle_mmio();
+        break;
 
-        case HVMIO_pio_completion:
+    case HVMIO_pio_completion:
+        if ( vio->io_dir == IOREQ_READ )
+        {
             if ( vio->io_size == 4 ) /* Needs zero extension. */
                 guest_cpu_user_regs()->rax = (uint32_t)p->data;
             else
                 memcpy(&guest_cpu_user_regs()->rax, &p->data, vio->io_size);
-
-            vio->io_state = HVMIO_none;
-            break;
-        default:
-            break;
         }
 
+        vio->io_state = HVMIO_none;
         break;
-    }
     default:
-        vio->io_state = HVMIO_none;
         break;
     }
 
diff --git a/xen/arch/x86/hvm/io.c b/xen/arch/x86/hvm/io.c
index e4b4350..d22c5bf 100644
--- a/xen/arch/x86/hvm/io.c
+++ b/xen/arch/x86/hvm/io.c
@@ -90,19 +90,21 @@ int handle_mmio(void)
 
     rc = hvm_emulate_one(&ctxt);
 
-    if ( rc != X86EMUL_RETRY )
-        vio->io_state = HVMIO_none;
-    if ( vio->io_state == HVMIO_awaiting_completion )
-        vio->io_completion = HVMIO_mmio_completion;
-    else
-        vio->mmio_access = (struct npfec){};
-
     switch ( rc )
     {
+    case X86EMUL_OKAY:
+        vio->mmio_access = (struct npfec){};
+        break;
+    case X86EMUL_RETRY:
+        vio->io_completion = HVMIO_mmio_completion;
+        break;
     case X86EMUL_UNHANDLEABLE:
+        vio->mmio_access = (struct npfec){};
         hvm_dump_emulation_state(XENLOG_G_WARNING "MMIO", &ctxt);
         return 0;
     case X86EMUL_EXCEPTION:
+        vio->io_state = HVMIO_none;
+        vio->mmio_access = (struct npfec){};
         if ( ctxt.exn_pending )
             hvm_inject_trap(&ctxt.trap);
         break;
@@ -154,10 +156,6 @@ int handle_pio(uint16_t port, unsigned int size, int dir)
         }
         break;
     case X86EMUL_RETRY:
-        if ( vio->io_state != HVMIO_awaiting_completion )
-            return 0;
-        /* Completion in hvm_io_assist() with no re-emulation required. */
-        ASSERT(dir == IOREQ_READ);
         vio->io_completion = HVMIO_pio_completion;
         break;
     default:
diff --git a/xen/include/asm-x86/hvm/vcpu.h b/xen/include/asm-x86/hvm/vcpu.h
index ee5b258..e86197e 100644
--- a/xen/include/asm-x86/hvm/vcpu.h
+++ b/xen/include/asm-x86/hvm/vcpu.h
@@ -32,7 +32,6 @@
 
 enum hvm_io_state {
     HVMIO_none = 0,
-    HVMIO_dispatched,
     HVMIO_awaiting_completion,
     HVMIO_completed
 };
@@ -53,6 +52,7 @@ struct hvm_vcpu_io {
     enum hvm_io_state      io_state;
     unsigned long          io_data;
     int                    io_size;
+    int                    io_dir;
     enum hvm_io_completion io_completion;
 
     /*
-- 
1.7.10.4


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.