[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] [xen master] x86/HVM: cache emulated instruction for retry processing



commit 699be3096ce62e2d6ef953b2a223c34330598ac8
Author:     Jan Beulich <jbeulich@xxxxxxxx>
AuthorDate: Mon Oct 14 09:54:09 2013 +0200
Commit:     Jan Beulich <jbeulich@xxxxxxxx>
CommitDate: Mon Oct 14 09:54:09 2013 +0200

    x86/HVM: cache emulated instruction for retry processing
    
    Rather than re-reading the instruction bytes upon retry processing,
    stash away and re-use what we already read. That way we can be certain
    that the retry won't do something different from what requested the
    retry, getting once again closer to real hardware behavior (where what
    we use retries for is simply a bus operation, not involving redundant
    decoding of instructions).
    
    Signed-off-by: Jan Beulich <jbeulich@xxxxxxxx>
    Reviewed-by: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
    Acked-by: Keir Fraser <keir@xxxxxxx>
---
 xen/arch/x86/hvm/emulate.c     |   57 ++++++++++++++++++++++++++++++----------
 xen/include/asm-x86/hvm/vcpu.h |    3 ++
 2 files changed, 46 insertions(+), 14 deletions(-)

diff --git a/xen/arch/x86/hvm/emulate.c b/xen/arch/x86/hvm/emulate.c
index 6cb6678..f39c173 100644
--- a/xen/arch/x86/hvm/emulate.c
+++ b/xen/arch/x86/hvm/emulate.c
@@ -569,9 +569,19 @@ static int hvmemul_insn_fetch(
 
     /* Fall back if requested bytes are not in the prefetch cache. */
     if ( unlikely((insn_off + bytes) > hvmemul_ctxt->insn_buf_bytes) )
-        return __hvmemul_read(
-            seg, offset, p_data, bytes,
-            hvm_access_insn_fetch, hvmemul_ctxt);
+    {
+        int rc = __hvmemul_read(seg, offset, p_data, bytes,
+                                hvm_access_insn_fetch, hvmemul_ctxt);
+
+        if ( rc == X86EMUL_OKAY )
+        {
+            ASSERT(insn_off + bytes <= sizeof(hvmemul_ctxt->insn_buf));
+            memcpy(&hvmemul_ctxt->insn_buf[insn_off], p_data, bytes);
+            hvmemul_ctxt->insn_buf_bytes = insn_off + bytes;
+        }
+
+        return rc;
+    }
 
     /* Hit the cache. Simple memcpy. */
     memcpy(p_data, &hvmemul_ctxt->insn_buf[insn_off], bytes);
@@ -1148,17 +1158,27 @@ int hvm_emulate_one(
         pfec |= PFEC_user_mode;
 
     hvmemul_ctxt->insn_buf_eip = regs->eip;
-    hvmemul_ctxt->insn_buf_bytes =
-        hvm_get_insn_bytes(curr, hvmemul_ctxt->insn_buf)
-        ? :
-        (hvm_virtual_to_linear_addr(
-            x86_seg_cs, &hvmemul_ctxt->seg_reg[x86_seg_cs],
-            regs->eip, sizeof(hvmemul_ctxt->insn_buf),
-            hvm_access_insn_fetch, hvmemul_ctxt->ctxt.addr_size, &addr) &&
-         !hvm_fetch_from_guest_virt_nofault(
-             hvmemul_ctxt->insn_buf, addr,
-             sizeof(hvmemul_ctxt->insn_buf), pfec))
-        ? sizeof(hvmemul_ctxt->insn_buf) : 0;
+    if ( !vio->mmio_insn_bytes )
+    {
+        hvmemul_ctxt->insn_buf_bytes =
+            hvm_get_insn_bytes(curr, hvmemul_ctxt->insn_buf) ?:
+            (hvm_virtual_to_linear_addr(x86_seg_cs,
+                                        &hvmemul_ctxt->seg_reg[x86_seg_cs],
+                                        regs->eip,
+                                        sizeof(hvmemul_ctxt->insn_buf),
+                                        hvm_access_insn_fetch,
+                                        hvmemul_ctxt->ctxt.addr_size,
+                                        &addr) &&
+             hvm_fetch_from_guest_virt_nofault(hvmemul_ctxt->insn_buf, addr,
+                                               sizeof(hvmemul_ctxt->insn_buf),
+                                               pfec) == HVMCOPY_okay) ?
+            sizeof(hvmemul_ctxt->insn_buf) : 0;
+    }
+    else
+    {
+        hvmemul_ctxt->insn_buf_bytes = vio->mmio_insn_bytes;
+        memcpy(hvmemul_ctxt->insn_buf, vio->mmio_insn, vio->mmio_insn_bytes);
+    }
 
     hvmemul_ctxt->exn_pending = 0;
     vio->mmio_retrying = vio->mmio_retry;
@@ -1169,7 +1189,16 @@ int hvm_emulate_one(
     if ( rc == X86EMUL_OKAY && vio->mmio_retry )
         rc = X86EMUL_RETRY;
     if ( rc != X86EMUL_RETRY )
+    {
         vio->mmio_large_read_bytes = vio->mmio_large_write_bytes = 0;
+        vio->mmio_insn_bytes = 0;
+    }
+    else
+    {
+        BUILD_BUG_ON(sizeof(vio->mmio_insn) < sizeof(hvmemul_ctxt->insn_buf));
+        vio->mmio_insn_bytes = hvmemul_ctxt->insn_buf_bytes;
+        memcpy(vio->mmio_insn, hvmemul_ctxt->insn_buf, vio->mmio_insn_bytes);
+    }
 
     if ( rc != X86EMUL_OKAY )
         return rc;
diff --git a/xen/include/asm-x86/hvm/vcpu.h b/xen/include/asm-x86/hvm/vcpu.h
index 1abb364..8fa5452 100644
--- a/xen/include/asm-x86/hvm/vcpu.h
+++ b/xen/include/asm-x86/hvm/vcpu.h
@@ -66,6 +66,9 @@ struct hvm_vcpu_io {
     /* We may write up to m256 as a number of device-model transactions. */
     unsigned int mmio_large_write_bytes;
     paddr_t mmio_large_write_pa;
+    /* For retries we shouldn't re-fetch the instruction. */
+    unsigned int mmio_insn_bytes;
+    unsigned char mmio_insn[16];
     /*
      * For string instruction emulation we need to be able to signal a
      * necessary retry through other than function return codes.
--
generated by git-patchbot for /home/xen/git/xen.git#master

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.