[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH v3 2/3] x86/HVM: add known_gla() emulation helper



... as a central place to do respective checking for whether the
translation for the linear address is available as well as usable.

Signed-off-by: Jan Beulich <jbeulich@xxxxxxxx>
---
v3: Split from subsequent patch.

--- a/xen/arch/x86/hvm/emulate.c
+++ b/xen/arch/x86/hvm/emulate.c
@@ -1041,6 +1041,26 @@ static inline int hvmemul_linear_mmio_wr
                                       pfec, hvmemul_ctxt, translate);
 }
 
+static bool known_gla(unsigned long addr, unsigned int bytes, uint32_t pfec)
+{
+    const struct hvm_vcpu_io *vio = &current->arch.hvm.hvm_io;
+
+    if ( pfec & PFEC_write_access )
+    {
+        if ( !vio->mmio_access.write_access )
+            return false;
+    }
+    else if ( pfec & PFEC_insn_fetch )
+    {
+        if ( !vio->mmio_access.insn_fetch )
+            return false;
+    }
+    else if ( !vio->mmio_access.read_access )
+            return false;
+
+    return vio->mmio_gla == (addr & PAGE_MASK);
+}
+
 static int __hvmemul_read(
     enum x86_segment seg,
     unsigned long offset,
@@ -1049,11 +1069,9 @@ static int __hvmemul_read(
     enum hvm_access_type access_type,
     struct hvm_emulate_ctxt *hvmemul_ctxt)
 {
-    struct vcpu *curr = current;
     pagefault_info_t pfinfo;
     unsigned long addr, reps = 1;
     uint32_t pfec = PFEC_page_present;
-    struct hvm_vcpu_io *vio = &curr->arch.hvm.hvm_io;
     int rc;
 
     if ( is_x86_system_segment(seg) )
@@ -1067,10 +1085,7 @@ static int __hvmemul_read(
         seg, offset, bytes, &reps, access_type, hvmemul_ctxt, &addr);
     if ( rc != X86EMUL_OKAY || !bytes )
         return rc;
-    if ( ((access_type != hvm_access_insn_fetch
-           ? vio->mmio_access.read_access
-           : vio->mmio_access.insn_fetch)) &&
-         (vio->mmio_gla == (addr & PAGE_MASK)) )
+    if ( known_gla(addr, bytes, pfec) )
         return hvmemul_linear_mmio_read(addr, bytes, p_data, pfec, 
hvmemul_ctxt, 1);
 
     rc = hvm_copy_from_guest_linear(p_data, addr, bytes, pfec, &pfinfo);
@@ -1171,10 +1186,8 @@ static int hvmemul_write(
 {
     struct hvm_emulate_ctxt *hvmemul_ctxt =
         container_of(ctxt, struct hvm_emulate_ctxt, ctxt);
-    struct vcpu *curr = current;
     unsigned long addr, reps = 1;
     uint32_t pfec = PFEC_page_present | PFEC_write_access;
-    struct hvm_vcpu_io *vio = &curr->arch.hvm.hvm_io;
     int rc;
     void *mapping;
 
@@ -1188,8 +1201,7 @@ static int hvmemul_write(
     if ( rc != X86EMUL_OKAY || !bytes )
         return rc;
 
-    if ( vio->mmio_access.write_access &&
-         (vio->mmio_gla == (addr & PAGE_MASK)) )
+    if ( known_gla(addr, bytes, pfec) )
         return hvmemul_linear_mmio_write(addr, bytes, p_data, pfec, 
hvmemul_ctxt, 1);
 
     mapping = hvmemul_map_linear_addr(addr, bytes, pfec, hvmemul_ctxt);
@@ -1218,7 +1230,6 @@ static int hvmemul_rmw(
         container_of(ctxt, struct hvm_emulate_ctxt, ctxt);
     unsigned long addr, reps = 1;
     uint32_t pfec = PFEC_page_present | PFEC_write_access;
-    struct hvm_vcpu_io *vio = &current->arch.hvm.hvm_io;
     int rc;
     void *mapping;
 
@@ -1244,8 +1255,7 @@ static int hvmemul_rmw(
     else
     {
         unsigned long data = 0;
-        bool known_gpfn = vio->mmio_access.write_access &&
-                          vio->mmio_gla == (addr & PAGE_MASK);
+        bool known_gpfn = known_gla(addr, bytes, pfec);
 
         if ( bytes > sizeof(data) )
             return X86EMUL_UNHANDLEABLE;





_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxxx
https://lists.xenproject.org/mailman/listinfo/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.