[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Xen-devel] [PATCH v2 08/12] x86/altp2m: alternate p2m memory events.





On Mon, Jun 22, 2015 at 2:56 PM, Ed White <edmund.h.white@xxxxxxxxx> wrote:
Add a flag to indicate that a memory event occurred in an alternate p2m
and a field containing the p2m index. Allow the response to switch to
a different p2m using the same flag and field.

Modify p2m_access_check() to handle alternate p2m's.

Signed-off-by: Ed White <edmund.h.white@xxxxxxxxx>
---
 xen/arch/x86/mm/p2m.c         | 20 +++++++++++++++++++-
 xen/include/asm-arm/p2m.h     |  7 +++++++
 xen/include/asm-x86/p2m.h     |  4 ++++
 xen/include/public/vm_event.h | 13 ++++++++++++-
 xen/include/xen/mem_access.h  |  1 +
 5 files changed, 43 insertions(+), 2 deletions(-)

diff --git a/xen/arch/x86/mm/p2m.c b/xen/arch/x86/mm/p2m.c
index 87b4b75..389360a 100644
--- a/xen/arch/x86/mm/p2m.c
+++ b/xen/arch/x86/mm/p2m.c
@@ -1516,6 +1516,13 @@ void p2m_mem_access_emulate_check(struct vcpu *v,
     }
 }

+void p2m_mem_access_altp2m_check(struct vcpu *v, const vm_event_response_t *rsp)
+{
+    if ( (rsp->flags & MEM_ACCESS_ALTERNATE_P2M) &&
+         altp2mhvm_active(v->domain) )
+        p2m_switch_vcpu_altp2m_by_id(v, rsp->u.mem_access.altp2m_idx);
+}

The function should be renamed p2m_altp2m_check as it is not really required to use mem_access at all to be able use altp2m. See my comment below.
 
+
 bool_t p2m_mem_access_check(paddr_t gpa, unsigned long gla,
                             struct npfec npfec,
                             vm_event_request_t **req_ptr)
@@ -1523,7 +1530,7 @@ bool_t p2m_mem_access_check(paddr_t gpa, unsigned long gla,
     struct vcpu *v = current;
     unsigned long gfn = gpa >> PAGE_SHIFT;
     struct domain *d = v->domain;
-    struct p2m_domain* p2m = p2m_get_hostp2m(d);
+    struct p2m_domain *p2m = NULL;
     mfn_t mfn;
     p2m_type_t p2mt;
     p2m_access_t p2ma;
@@ -1531,6 +1538,11 @@ bool_t p2m_mem_access_check(paddr_t gpa, unsigned long gla,
     int rc;
     unsigned long eip = guest_cpu_user_regs()->eip;

+    if ( altp2mhvm_active(d) )
+        p2m = p2m_get_altp2m(v);
+    if ( !p2m )
+        p2m = p2m_get_hostp2m(d);
+
     /* First, handle rx2rw conversion automatically.
      * These calls to p2m->set_entry() must succeed: we have the gfn
      * locked and just did a successful get_entry(). */
@@ -1637,6 +1649,12 @@ bool_t p2m_mem_access_check(paddr_t gpa, unsigned long gla,
         req->vcpu_id = v->vcpu_id;

         p2m_vm_event_fill_regs(req);
+
+        if ( altp2mhvm_active(v->domain) )
+        {
+            req->flags |= MEM_ACCESS_ALTERNATE_P2M;
+            req->u.mem_access.altp2m_idx = vcpu_altp2mhvm(v).p2midx;
+        }
     }

     /* Pause the current VCPU */
diff --git a/xen/include/asm-arm/p2m.h b/xen/include/asm-arm/p2m.h
index 63748ef..b31dd6f 100644
--- a/xen/include/asm-arm/p2m.h
+++ b/xen/include/asm-arm/p2m.h
@@ -109,6 +109,13 @@ void p2m_mem_access_emulate_check(struct vcpu *v,
     /* Not supported on ARM. */
 }

+static inline
+void p2m_mem_access_altp2m_check(struct vcpu *v,
+                                const mem_event_response_t *rsp)
+{
+    /* Not supported on ARM. */
+}
+
 #define p2m_is_foreign(_t)  ((_t) == p2m_map_foreign)
 #define p2m_is_ram(_t)      ((_t) == p2m_ram_rw || (_t) == p2m_ram_ro)

diff --git a/xen/include/asm-x86/p2m.h b/xen/include/asm-x86/p2m.h
index 16fd523..d84da33 100644
--- a/xen/include/asm-x86/p2m.h
+++ b/xen/include/asm-x86/p2m.h
@@ -762,6 +762,10 @@ bool_t p2m_find_altp2m_by_eptp(struct domain *d, uint64_t eptp, unsigned long *i
 /* Switch alternate p2m for a single vcpu */
 bool_t p2m_switch_vcpu_altp2m_by_id(struct vcpu *v, uint16_t idx);

+/* Check to see if vcpu should be switched to a different p2m. */
+void p2m_mem_access_altp2m_check(struct vcpu *v,
+                                 const vm_event_response_t *rsp);
+
 /*
  * p2m type to IOMMU flags
  */
diff --git a/xen/include/public/vm_event.h b/xen/include/public/vm_event.h
index 577e971..b492f65 100644
--- a/xen/include/public/vm_event.h
+++ b/xen/include/public/vm_event.h
@@ -149,13 +149,24 @@ struct vm_event_regs_x86 {
  * potentially having side effects (like memory mapped or port I/O) disabled.
  */
 #define MEM_ACCESS_EMULATE_NOWRITE      (1 << 7)
+/*
+ * This flag can be set in a request or a response
+ *
+ * On a request, indicates that the event occurred in the alternate p2m specified by
+ * the altp2m_idx request field.
+ *
+ * On a response, indicates that the VCPU should resume in the alternate p2m specified
+ * by the altp2m_idx response field if possible.
+ */
+#define MEM_ACCESS_ALTERNATE_P2M        (1 << 8)

This definition should be renamed VM_EVENT_FLAG_ALTERNATE_P2M and moved to the appropriate location. It should also be checked for all events, not just for mem_access, similar to how VM_EVENT_FLAG_VCPU_PAUSED is checked for, as we might want to switch views in response to a variety of events. Right now I worked around this be specifying the response to a singlestep event as if it was a response to a mem_access one, but that's very hackish.
 

 struct vm_event_mem_access {
     uint64_t gfn;
     uint64_t offset;
     uint64_t gla;   /* if flags has MEM_ACCESS_GLA_VALID set */
     uint32_t flags; /* MEM_ACCESS_* */
-    uint32_t _pad;
+    uint16_t altp2m_idx; /* may be used during request and response */
+    uint16_t _pad;
 };

 struct vm_event_write_ctrlreg {
diff --git a/xen/include/xen/mem_access.h b/xen/include/xen/mem_access.h
index f60b727..4d3d5ca 100644
--- a/xen/include/xen/mem_access.h
+++ b/xen/include/xen/mem_access.h
@@ -36,6 +36,7 @@ static inline
 void mem_access_resume(struct vcpu *v, vm_event_response_t *rsp)
 {
     p2m_mem_access_emulate_check(v, rsp);
+    p2m_mem_access_altp2m_check(v, rsp);
 }

 #else
--
1.9.1




--

www.novetta.com

Tamas K Lengyel

Senior Security Researcher

7921 Jones Branch Drive

McLean VA 22102

Email  tlengyel@novetta.com

_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.