[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH v2] x86/HVM: correct hvmemul_map_linear_addr() for multi-page case



The function does two translations in one go for a single guest access.
Any failure of the first translation step (guest linear -> guest
physical), resulting in #PF, ought to take precedence over any failure
of the second step (guest physical -> host physical). Bail out of the
loop early solely when translation produces HVMTRANS_bad_linear_to_gfn,
and record the most relevant of perhaps multiple different errors
otherwise. (The choice of ZERO_BLOCK_PTR as sentinel is arbitrary.)

Signed-off-by: Jan Beulich <jbeulich@xxxxxxxx>
---
v2: Add comment (mapping table) and adjust update_map_err()
    accordingly.

--- a/xen/arch/x86/hvm/emulate.c
+++ b/xen/arch/x86/hvm/emulate.c
@@ -532,6 +532,36 @@ static int hvmemul_do_mmio_addr(paddr_t
 }
 
 /*
+ * Intended mapping, implemented without table lookup:
+ *
+ * -----------------------------------------
+ * | \ new |       |       |       |       |
+ * |   \   | OKAY  | NULL  | RETRY | UNHND |
+ * | err \ |       |       |       |       |
+ * -----------------------------------------
+ * | OKAY  | OKAY  | NULL  | RETRY | UNHND |
+ * -----------------------------------------
+ * | NULL  | NULL  | NULL  | RETRY | UNHND |
+ * -----------------------------------------
+ * | RETRY | RETRY | RETRY | RETRY | UNHND |
+ * -----------------------------------------
+ * | UNHND | UNHND | UNHND | UNHND | UNHND |
+ * -----------------------------------------
+ */
+static void *update_map_err(void *err, void *new)
+{
+    if ( err == ZERO_BLOCK_PTR || err == ERR_PTR(~X86EMUL_OKAY) ||
+         new == ERR_PTR(~X86EMUL_UNHANDLEABLE) )
+        return new;
+
+    if ( new == ERR_PTR(~X86EMUL_OKAY) ||
+         err == ERR_PTR(~X86EMUL_UNHANDLEABLE) )
+        return err;
+
+    return err ?: new;
+}
+
+/*
  * Map the frame(s) covering an individual linear access, for writeable
  * access.  May return NULL for MMIO, or ERR_PTR(~X86EMUL_*) for other errors
  * including ERR_PTR(~X86EMUL_OKAY) for write-discard mappings.
@@ -544,7 +574,7 @@ static void *hvmemul_map_linear_addr(
     struct hvm_emulate_ctxt *hvmemul_ctxt)
 {
     struct vcpu *curr = current;
-    void *err, *mapping;
+    void *err = ZERO_BLOCK_PTR, *mapping;
     unsigned int nr_frames = ((linear + bytes - !!bytes) >> PAGE_SHIFT) -
         (linear >> PAGE_SHIFT) + 1;
     unsigned int i;
@@ -600,27 +630,28 @@ static void *hvmemul_map_linear_addr(
             goto out;
 
         case HVMTRANS_bad_gfn_to_mfn:
-            err = NULL;
-            goto out;
+            err = update_map_err(err, NULL);
+            continue;
 
         case HVMTRANS_gfn_paged_out:
         case HVMTRANS_gfn_shared:
-            err = ERR_PTR(~X86EMUL_RETRY);
-            goto out;
+            err = update_map_err(err, ERR_PTR(~X86EMUL_RETRY));
+            continue;
 
         default:
-            goto unhandleable;
+            err = update_map_err(err, ERR_PTR(~X86EMUL_UNHANDLEABLE));
+            continue;
         }
 
         *mfn++ = page_to_mfn(page);
 
         if ( p2m_is_discard_write(p2mt) )
-        {
-            err = ERR_PTR(~X86EMUL_OKAY);
-            goto out;
-        }
+            err = update_map_err(err, ERR_PTR(~X86EMUL_OKAY));
     }
 
+    if ( err != ZERO_BLOCK_PTR )
+        goto out;
+
     /* Entire access within a single frame? */
     if ( nr_frames == 1 )
         mapping = map_domain_page(hvmemul_ctxt->mfn[0]);
@@ -639,6 +670,7 @@ static void *hvmemul_map_linear_addr(
     return mapping + (linear & ~PAGE_MASK);
 
  unhandleable:
+    ASSERT(err == ZERO_BLOCK_PTR);
     err = ERR_PTR(~X86EMUL_UNHANDLEABLE);
 
  out:





_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxxx
https://lists.xenproject.org/mailman/listinfo/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.