[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH v2 13/17] x86/hvm: only acquire RAM pages for emulation when we need to



If hvmemul_do_io_addr() is called to complete a previously issued
emulation then there is no need to acquire the RAM pages again. There
is also no need to re-calculate the value of *reps, providing
hvmemul_do_io() updates it when returning X86EMUL_OKAY.

Signed-off-by: Paul Durrant <paul.durrant@xxxxxxxxxx>
Cc: Keir Fraser <keir@xxxxxxx>
Cc: Jan Beulich <jbeulich@xxxxxxxx>
Cc: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
---
 xen/arch/x86/hvm/emulate.c |   83 ++++++++++++++++++++++++--------------------
 1 file changed, 46 insertions(+), 37 deletions(-)

diff --git a/xen/arch/x86/hvm/emulate.c b/xen/arch/x86/hvm/emulate.c
index 8dd02af..016bc79 100644
--- a/xen/arch/x86/hvm/emulate.c
+++ b/xen/arch/x86/hvm/emulate.c
@@ -142,7 +142,6 @@ static int hvmemul_do_io(
         if ( (p.type != is_mmio ? IOREQ_TYPE_COPY : IOREQ_TYPE_PIO) ||
              (p.addr != addr) ||
              (p.size != size) ||
-             (p.count != *reps) ||
              (p.dir != dir) ||
              (p.df != df) ||
              (p.data_is_ptr != data_is_addr) )
@@ -193,6 +192,7 @@ static int hvmemul_do_io(
             }
         }
 
+        *reps = p.count;
         return X86EMUL_OKAY;
     default:
         /*
@@ -303,60 +303,69 @@ int hvmemul_do_io_addr(
     unsigned int size, uint8_t dir, bool_t df, paddr_t ram_gpa)
 {
     struct vcpu *v = current;
+    struct hvm_vcpu_io *vio = &v->arch.hvm_vcpu.hvm_io;
     unsigned long ram_gmfn = paddr_to_pfn(ram_gpa);
     struct page_info *ram_page[2];
     int nr_pages = 0;
     unsigned long count;
     int rc;
 
+    switch ( vio->io_req.state )
+    {
+    case STATE_IOREQ_NONE:
+        rc = hvmemul_acquire_page(ram_gmfn, &ram_page[nr_pages]);
+        if ( rc != X86EMUL_OKAY )
+            goto out;
 
-    rc = hvmemul_acquire_page(ram_gmfn, &ram_page[nr_pages]);
-    if ( rc != X86EMUL_OKAY )
-        goto out;
+        nr_pages++;
 
-    nr_pages++;
+        /* Detemine how many reps will fit within this page */
+        for ( count = 0; count < *reps; count++ )
+        {
+            paddr_t start, end;
 
-    /* Detemine how many reps will fit within this page */
-    for ( count = 0; count < *reps; count++ )
-    {
-        paddr_t start, end;
+            if ( df )
+            {
+                start = ram_gpa - count * size;
+                end = ram_gpa + size - 1;
+            }
+            else
+            {
+                start = ram_gpa;
+                end = ram_gpa + (count + 1) * size - 1;
+            }
 
-        if ( df )
-        {
-            start = ram_gpa - count * size;
-            end = ram_gpa + size - 1;
+            if ( paddr_to_pfn(start) != ram_gmfn ||
+                 paddr_to_pfn(end) != ram_gmfn )
+                break;
         }
-        else
+
+        if ( count == 0 )
         {
-            start = ram_gpa;
-            end = ram_gpa + (count + 1) * size - 1;
+            /*
+             * This access must span two pages, so grab a reference
+             * to the next page and do a single rep.
+             */
+            rc = hvmemul_acquire_page(df ? ram_gmfn - 1 : ram_gmfn + 1,
+                                      &ram_page[nr_pages]);
+            if ( rc != X86EMUL_OKAY )
+                goto out;
+
+            nr_pages++;
+            count = 1;
         }
-
-        if ( paddr_to_pfn(start) != ram_gmfn ||
-             paddr_to_pfn(end) != ram_gmfn )
-            break;
-    }
-
-    if ( count == 0 )
-    {
-        /*
-         * This access must span two pages, so grab a reference to
-         * the next page and do a single rep.
-         */
-        rc = hvmemul_acquire_page(df ? ram_gmfn - 1 : ram_gmfn + 1,
-                                  &ram_page[nr_pages]);
-        if ( rc != X86EMUL_OKAY )
-            goto out;
-
-        nr_pages++;
-        count = 1;
-    }
+        break;
+    case STATE_IORESP_READY:
+        break;
+    default:
+        return X86EMUL_UNHANDLEABLE;
+     }
 
     rc = hvmemul_do_io(is_mmio, addr, &count, size, dir, df, 1,
                        (uint64_t)ram_gpa);
     if ( rc == X86EMUL_OKAY )
     {
-        v->arch.hvm_vcpu.hvm_io.mmio_retry = (count < *reps);
+        vio->mmio_retry = (count < *reps);
         *reps = count;
     }
 
-- 
1.7.10.4


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.