[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH v2 12/17] x86/hvm: use ioreq_t to track in-flight state



Use an ioreq_t rather than open coded state, size, dir and data fields
in struct hvm_vcpu_io. This also allows PIO completion to be handled
similarly to MMIO completion by re-issuing the handle_pio() call.

Signed-off-by: Paul Durrant <paul.durrant@xxxxxxxxxx>
Cc: Keir Fraser <keir@xxxxxxx>
Cc: Jan Beulich <jbeulich@xxxxxxxx>
Cc: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
---
 xen/arch/x86/hvm/emulate.c       |  149 +++++++++++++++++++++-----------------
 xen/arch/x86/hvm/hvm.c           |   19 ++---
 xen/arch/x86/hvm/io.c            |    2 +-
 xen/arch/x86/hvm/svm/nestedsvm.c |    2 +-
 xen/arch/x86/hvm/vmx/realmode.c  |    6 +-
 xen/include/asm-x86/hvm/vcpu.h   |    5 +-
 6 files changed, 93 insertions(+), 90 deletions(-)

diff --git a/xen/arch/x86/hvm/emulate.c b/xen/arch/x86/hvm/emulate.c
index 1c34288..8dd02af 100644
--- a/xen/arch/x86/hvm/emulate.c
+++ b/xen/arch/x86/hvm/emulate.c
@@ -93,6 +93,7 @@ static int hvmemul_do_io(
         .df = df,
         .data = data,
         .data_is_ptr = data_is_addr, /* ioreq_t field name is misleading */
+        .state = STATE_IOREQ_READY,
     };
     void *p_data = (void *)data;
     int rc;
@@ -130,21 +131,79 @@ static int hvmemul_do_io(
         }
     }
 
-    switch ( vio->io_state )
+    switch ( vio->io_req.state )
     {
     case STATE_IOREQ_NONE:
+        vio->io_req = p;
         break;
     case STATE_IORESP_READY:
-        vio->io_state = STATE_IOREQ_NONE;
-        goto finish_access;
+        p = vio->io_req;
+
+        if ( (p.type != is_mmio ? IOREQ_TYPE_COPY : IOREQ_TYPE_PIO) ||
+             (p.addr != addr) ||
+             (p.size != size) ||
+             (p.count != *reps) ||
+             (p.dir != dir) ||
+             (p.df != df) ||
+             (p.data_is_ptr != data_is_addr) )
+        {
+            gdprintk(XENLOG_WARNING, "mismatched request\n");
+            domain_crash(curr->domain);
+
+            vio->io_req.state = STATE_IOREQ_NONE;
+            return X86EMUL_UNHANDLEABLE;
+        }                             
+
+ resp_ready:
+        vio->io_req.state = STATE_IOREQ_NONE;
+
+        if ( dir == IOREQ_READ )
+        {
+            hvmtrace_io_assist(is_mmio, &p);
+
+            if ( !data_is_addr )
+                memcpy(p_data, &p.data, size);
+        }
+
+        if ( is_mmio && !data_is_addr )
+        {
+            /* Part of a multi-cycle read or write? */
+            if ( dir == IOREQ_WRITE )
+            {
+                paddr_t pa = vio->mmio_large_write_pa;
+                unsigned int bytes = vio->mmio_large_write_bytes;
+                if ( bytes == 0 )
+                    pa = vio->mmio_large_write_pa = addr;
+                if ( addr == (pa + bytes) )
+                    vio->mmio_large_write_bytes += size;
+            }
+            else
+            {
+                paddr_t pa = vio->mmio_large_read_pa;
+                unsigned int bytes = vio->mmio_large_read_bytes;
+                if ( bytes == 0 )
+                    pa = vio->mmio_large_read_pa = addr;
+                if ( (addr == (pa + bytes)) &&
+                     ((bytes + size) <= sizeof(vio->mmio_large_read)) )
+                {
+                    memcpy(&vio->mmio_large_read[addr - pa], p_data,
+                           size);
+                    vio->mmio_large_read_bytes += size;
+                }
+            }
+        }
+
+        return X86EMUL_OKAY;
     default:
+        /*
+         * This function should never be called unless
+         * vio->io_req.state matches the above cases.
+         */
+        gdprintk(XENLOG_WARNING, "bad emulation state\n");
+        domain_crash(curr->domain);
         return X86EMUL_UNHANDLEABLE;
     }
 
-    vio->io_state = STATE_IOREQ_READY;
-    vio->io_size = size;
-    vio->io_dir = dir;
-
     if ( dir == IOREQ_WRITE )
     {
         if ( !data_is_addr )
@@ -155,77 +214,31 @@ static int hvmemul_do_io(
 
     rc = hvm_io_intercept(&p);
 
-    switch ( rc )
-    {
-    case X86EMUL_OKAY:
-        vio->io_data = p.data;
-        vio->io_state = STATE_IOREQ_NONE;
-        break;
-    case X86EMUL_UNHANDLEABLE:
+    if ( rc == X86EMUL_UNHANDLEABLE )
     {
         struct hvm_ioreq_server *s =
             hvm_select_ioreq_server(curr->domain, &p);
 
         /* If there is no suitable backing DM, just ignore accesses */
-        if ( !s )
-        {
-            rc = process_io_intercept(curr, &p, &null_handler);
-            if ( rc == X86EMUL_OKAY )
-                vio->io_data = p.data;
-            vio->io_state = STATE_IOREQ_NONE;
-        }
-        else
-        {
-            rc = hvm_send_assist_req(s, &p);
-            if ( rc != X86EMUL_RETRY )
-                vio->io_state = STATE_IOREQ_NONE;
-        }
-        break;
-    }
-    default:
-        BUG();
+        rc = !s ?
+            process_io_intercept(curr, &p, &null_handler) :
+            hvm_send_assist_req(s, &p);
     }
 
-    if ( rc != X86EMUL_OKAY )
-        return rc;
-
- finish_access:
-    if ( dir == IOREQ_READ )
-    {
-        hvmtrace_io_assist(is_mmio, &p);
-
-        if ( !data_is_addr )
-            memcpy(p_data, &vio->io_data, size);
-    }
-
-    if ( is_mmio && !data_is_addr )
+    switch ( rc )
     {
-        /* Part of a multi-cycle read or write? */
-        if ( dir == IOREQ_WRITE )
-        {
-            paddr_t pa = vio->mmio_large_write_pa;
-            unsigned int bytes = vio->mmio_large_write_bytes;
-            if ( bytes == 0 )
-                pa = vio->mmio_large_write_pa = addr;
-            if ( addr == (pa + bytes) )
-                vio->mmio_large_write_bytes += size;
-        }
-        else
-        {
-            paddr_t pa = vio->mmio_large_read_pa;
-            unsigned int bytes = vio->mmio_large_read_bytes;
-            if ( bytes == 0 )
-                pa = vio->mmio_large_read_pa = addr;
-            if ( (addr == (pa + bytes)) &&
-                 ((bytes + size) <= sizeof(vio->mmio_large_read)) )
-            {
-                memcpy(&vio->mmio_large_read[bytes], p_data, size);
-                vio->mmio_large_read_bytes += size;
-            }
-        }
+    case X86EMUL_OKAY:
+        goto resp_ready;
+    case X86EMUL_UNHANDLEABLE:
+        vio->io_req.state = STATE_IOREQ_NONE;
+        break;
+    case X86EMUL_RETRY:
+        break;
+    default:
+        BUG();
     }
 
-    return X86EMUL_OKAY;
+    return rc;
 }
 
 int hvmemul_do_io_buffer(
diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c
index 85944f6..48711ab 100644
--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -423,11 +423,11 @@ static void hvm_io_assist(ioreq_t *p)
      * This function should never be called unless an I/O emulation
      * is awating completion.
      */
-    if (vio->io_state != STATE_IOREQ_READY)
+    if (vio->io_req.state != STATE_IOREQ_READY)
         domain_crash(curr->domain);
 
-    vio->io_state = STATE_IORESP_READY;
-    vio->io_data = p->data;
+    vio->io_req.state = STATE_IORESP_READY;
+    vio->io_req.data = p->data;
     vio->io_completion = HVMIO_no_completion;
 
     switch ( completion )
@@ -437,15 +437,8 @@ static void hvm_io_assist(ioreq_t *p)
         break;
 
     case HVMIO_pio_completion:
-        if ( vio->io_dir == IOREQ_READ )
-        {
-            if ( vio->io_size == 4 ) /* Needs zero extension. */
-                guest_cpu_user_regs()->rax = (uint32_t)p->data;
-            else
-                memcpy(&guest_cpu_user_regs()->rax, &p->data, vio->io_size);
-        }
-
-        vio->io_state = STATE_IOREQ_NONE;
+        (void)handle_pio(vio->io_req.addr, vio->io_req.size,
+                         vio->io_req.dir);
         break;
     default:
         break;
@@ -455,7 +448,7 @@ static void hvm_io_assist(ioreq_t *p)
      * Re-emulation may have scheduled another I/O so io_state set
      * at the top of the function may have changed.
      */
-    if ( vio->io_state == STATE_IOREQ_NONE )
+    if ( vio->io_req.state == STATE_IOREQ_NONE )
     {
         msix_write_completion(curr);
         vcpu_end_shutdown_deferral(curr);
diff --git a/xen/arch/x86/hvm/io.c b/xen/arch/x86/hvm/io.c
index b09b369..e31164e 100644
--- a/xen/arch/x86/hvm/io.c
+++ b/xen/arch/x86/hvm/io.c
@@ -103,7 +103,7 @@ int handle_mmio(void)
         hvm_dump_emulation_state(XENLOG_G_WARNING "MMIO", &ctxt);
         return 0;
     case X86EMUL_EXCEPTION:
-        vio->io_state = STATE_IOREQ_NONE;
+        vio->io_req.state = STATE_IOREQ_NONE;
         vio->mmio_access = (struct npfec){};
         if ( ctxt.exn_pending )
             hvm_inject_trap(&ctxt.trap);
diff --git a/xen/arch/x86/hvm/svm/nestedsvm.c b/xen/arch/x86/hvm/svm/nestedsvm.c
index 8b165c6..78667a2 100644
--- a/xen/arch/x86/hvm/svm/nestedsvm.c
+++ b/xen/arch/x86/hvm/svm/nestedsvm.c
@@ -1231,7 +1231,7 @@ enum hvm_intblk nsvm_intr_blocked(struct vcpu *v)
          * Delay the injection because this would result in delivering
          * an interrupt *within* the execution of an instruction.
          */
-        if ( v->arch.hvm_vcpu.hvm_io.io_state != STATE_IOREQ_NONE )
+        if ( v->arch.hvm_vcpu.hvm_io.io_req.state != STATE_IOREQ_NONE )
             return hvm_intblk_shadow;
 
         if ( !nv->nv_vmexit_pending && n2vmcb->exitintinfo.bytes != 0 ) {
diff --git a/xen/arch/x86/hvm/vmx/realmode.c b/xen/arch/x86/hvm/vmx/realmode.c
index 8c2da9a..69c0297 100644
--- a/xen/arch/x86/hvm/vmx/realmode.c
+++ b/xen/arch/x86/hvm/vmx/realmode.c
@@ -177,7 +177,7 @@ void vmx_realmode(struct cpu_user_regs *regs)
 
     hvm_emulate_prepare(&hvmemul_ctxt, regs);
 
-    if ( vio->io_state == STATE_IORESP_READY )
+    if ( vio->io_req.state == STATE_IORESP_READY )
         realmode_emulate_one(&hvmemul_ctxt);
 
     /* Only deliver interrupts into emulated real mode. */
@@ -191,7 +191,7 @@ void vmx_realmode(struct cpu_user_regs *regs)
     curr->arch.hvm_vmx.vmx_emulate = 1;
     while ( curr->arch.hvm_vmx.vmx_emulate &&
             !softirq_pending(smp_processor_id()) &&
-            (vio->io_state == STATE_IOREQ_NONE) )
+            (vio->io_req.state == STATE_IOREQ_NONE) )
     {
         /*
          * Check for pending interrupts only every 16 instructions, because
@@ -216,7 +216,7 @@ void vmx_realmode(struct cpu_user_regs *regs)
     }
 
     /* Need to emulate next time if we've started an IO operation */
-    if ( vio->io_state != STATE_IOREQ_NONE )
+    if ( vio->io_req.state != STATE_IOREQ_NONE )
         curr->arch.hvm_vmx.vmx_emulate = 1;
 
     if ( !curr->arch.hvm_vmx.vmx_emulate && !curr->arch.hvm_vmx.vmx_realmode )
diff --git a/xen/include/asm-x86/hvm/vcpu.h b/xen/include/asm-x86/hvm/vcpu.h
index 1c2ec27..46e89e4 100644
--- a/xen/include/asm-x86/hvm/vcpu.h
+++ b/xen/include/asm-x86/hvm/vcpu.h
@@ -43,10 +43,7 @@ struct hvm_vcpu_asid {
 
 struct hvm_vcpu_io {
     /* I/O request in flight to device model. */
-    uint8_t                io_state;
-    unsigned long          io_data;
-    int                    io_size;
-    int                    io_dir;
+    ioreq_t                io_req;
     enum hvm_io_completion io_completion;
 
     /*
-- 
1.7.10.4


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.