[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH 11/11] x86/altp2m: alternate p2m memory events.



Add a flag to indicate that a memory event occurred in an alternate p2m
and a field containing the p2m index. Allow the response to switch to
a different p2m using the same flag and field.

Modify p2m_access_check() to handle alternate p2m's. Access_required is
always assumed for an alternate p2m.

Signed-off-by: Ed White <edmund.h.white@xxxxxxxxx>
---
 xen/arch/x86/mm/hap/altp2m_hap.c | 53 ++++++++++++++++++++++++++++++++++++++--
 xen/arch/x86/mm/p2m.c            | 18 ++++++++++++--
 xen/common/mem_access.c          |  1 +
 xen/include/asm-arm/p2m.h        |  7 ++++++
 xen/include/asm-x86/p2m.h        |  4 +++
 xen/include/public/mem_event.h   |  9 +++++++
 6 files changed, 88 insertions(+), 4 deletions(-)

diff --git a/xen/arch/x86/mm/hap/altp2m_hap.c b/xen/arch/x86/mm/hap/altp2m_hap.c
index b889626..dd56bbc 100644
--- a/xen/arch/x86/mm/hap/altp2m_hap.c
+++ b/xen/arch/x86/mm/hap/altp2m_hap.c
@@ -19,6 +19,7 @@
  */
 
 #include <xen/mem_event.h>
+#include <xen/mem_access.h>
 #include <xen/event.h>
 #include <public/mem_event.h>
 #include <asm/domain.h>
@@ -63,8 +64,9 @@ altp2m_write_p2m_entry(struct p2m_domain *p2m, unsigned long 
gfn,
  *     else indicate that outer handler should handle fault
  *
  * If the fault is for a present entry:
- *     if the page type is not p2m_ram_rw_ve crash domain
- *     else if hardware does not support #VE emulate it and retry
+ *     if the page type is p2m_ram_rw_ve and hardware does not support #VE
+ *      emulate #VE and retry if successful
+ *     else try to send a memory event
  *     else crash domain
  */
 
@@ -90,11 +92,58 @@ altp2mhvm_hap_nested_page_fault(struct vcpu *v, paddr_t gpa,
 
     if ( mfn_valid(mfn) )
     {
+        bool_t violation;
+        mem_event_request_t *req_ptr = NULL;
+
         /* Should #VE be emulated for this fault? */
         if ( p2mt == p2m_ram_rw_ve && !cpu_has_vmx_virt_exceptions &&
              ahvm_vcpu_emulate_ve(v) )
             return ALTP2MHVM_PAGEFAULT_DONE;
 
+        /* Fault not handled yet, so try for mem_event */
+        switch (p2ma)
+        {
+        case p2m_access_n:
+        case p2m_access_n2rwx:
+        default:
+            violation = npfec.read_access || npfec.write_access || 
npfec.insn_fetch;
+            break;
+        case p2m_access_r:
+            violation = npfec.write_access || npfec.insn_fetch;
+            break;
+        case p2m_access_w:
+            violation = npfec.read_access || npfec.insn_fetch;
+            break;
+        case p2m_access_x:
+            violation = npfec.read_access || npfec.write_access;
+            break;
+        case p2m_access_rx:
+        case p2m_access_rx2rw:
+            violation = npfec.write_access;
+            break;
+        case p2m_access_wx:
+            violation = npfec.read_access;
+            break;
+        case p2m_access_rw:
+            violation = npfec.insn_fetch;
+            break;
+        case p2m_access_rwx:
+            violation = 0;
+            break;
+        }
+
+        if ( violation )
+        {
+            p2m_mem_access_check(gpa, gla, npfec, &req_ptr);
+
+            if ( req_ptr )
+            {
+                mem_access_send_req(v->domain, req_ptr);
+                xfree(req_ptr);
+                return ALTP2MHVM_PAGEFAULT_DONE;
+            }
+        }
+
         /* Could not handle fault here */
         gdprintk(XENLOG_INFO, "Altp2m memory access permissions failure, "
                               "no mem_event listener VCPU %d, dom %d\n",
diff --git a/xen/arch/x86/mm/p2m.c b/xen/arch/x86/mm/p2m.c
index 843a433..d296c8f 100644
--- a/xen/arch/x86/mm/p2m.c
+++ b/xen/arch/x86/mm/p2m.c
@@ -1486,6 +1486,13 @@ void p2m_mem_event_emulate_check(struct vcpu *v, const 
mem_event_response_t *rsp
     }
 }
 
+void p2m_mem_event_altp2m_check(struct vcpu *v, const mem_event_response_t 
*rsp)
+{
+    if ( (rsp->flags & MEM_EVENT_FLAG_ALTERNATE_P2M) &&
+         altp2mhvm_active(v->domain) )
+        p2m_switch_vcpu_altp2m_by_id(v, rsp->altp2m_idx);
+}
+
 void p2m_setup_introspection(struct domain *d)
 {
     if ( hvm_funcs.enable_msr_exit_interception )
@@ -1502,7 +1509,8 @@ bool_t p2m_mem_access_check(paddr_t gpa, unsigned long 
gla,
     struct vcpu *v = current;
     unsigned long gfn = gpa >> PAGE_SHIFT;
     struct domain *d = v->domain;    
-    struct p2m_domain* p2m = p2m_get_hostp2m(d);
+    struct p2m_domain *p2m = altp2mhvm_active(v->domain) ?
+        p2m_get_altp2m(v) : p2m_get_hostp2m(d);
     mfn_t mfn;
     p2m_type_t p2mt;
     p2m_access_t p2ma;
@@ -1536,7 +1544,7 @@ bool_t p2m_mem_access_check(paddr_t gpa, unsigned long 
gla,
     if ( !mem_event_check_ring(&d->mem_event->access) || !req_ptr ) 
     {
         /* No listener */
-        if ( p2m->access_required ) 
+        if ( p2m->access_required || altp2mhvm_active(v->domain) )
         {
             gdprintk(XENLOG_INFO, "Memory access permissions failure, "
                                   "no mem_event listener VCPU %d, dom %d\n",
@@ -1612,6 +1620,12 @@ bool_t p2m_mem_access_check(paddr_t gpa, unsigned long 
gla,
         req->vcpu_id = v->vcpu_id;
 
         p2m_mem_event_fill_regs(req);
+
+        if ( altp2mhvm_active(v->domain) )
+        {
+            req->flags |= MEM_EVENT_FLAG_ALTERNATE_P2M;
+            req->altp2m_idx = vcpu_altp2mhvm(v).p2midx;
+        }
     }
 
     /* Pause the current VCPU */
diff --git a/xen/common/mem_access.c b/xen/common/mem_access.c
index d8aac5f..223d048 100644
--- a/xen/common/mem_access.c
+++ b/xen/common/mem_access.c
@@ -48,6 +48,7 @@ void mem_access_resume(struct domain *d)
         v = d->vcpu[rsp.vcpu_id];
 
         p2m_mem_event_emulate_check(v, &rsp);
+        p2m_mem_event_altp2m_check(v, &rsp);
 
         /* Unpause domain. */
         if ( rsp.flags & MEM_EVENT_FLAG_VCPU_PAUSED )
diff --git a/xen/include/asm-arm/p2m.h b/xen/include/asm-arm/p2m.h
index da36504..c838f26 100644
--- a/xen/include/asm-arm/p2m.h
+++ b/xen/include/asm-arm/p2m.h
@@ -78,6 +78,13 @@ void p2m_mem_event_emulate_check(struct vcpu *v,
 };
 
 static inline
+void p2m_mem_event_altp2m_check(struct vcpu *v,
+                                 const mem_event_response_t *rsp)
+{
+    /* Not supported on ARM. */
+};
+
+static inline
 void p2m_setup_introspection(struct domain *d)
 {
     /* No special setup on ARM. */
diff --git a/xen/include/asm-x86/p2m.h b/xen/include/asm-x86/p2m.h
index 52588ed..e4bc64f 100644
--- a/xen/include/asm-x86/p2m.h
+++ b/xen/include/asm-x86/p2m.h
@@ -737,6 +737,10 @@ bool_t p2m_change_altp2m_pfn(struct domain *d, uint16_t 
idx,
 /* Invalidate a page in all alternate p2m's */
 void p2m_remove_altp2m_page(struct domain *d, unsigned long gfn);
 
+/* Check to see if vcpu should be switched to a different p2m. */
+void p2m_mem_event_altp2m_check(struct vcpu *v,
+                                 const mem_event_response_t *rsp);
+
 /*
  * p2m type to IOMMU flags
  */
diff --git a/xen/include/public/mem_event.h b/xen/include/public/mem_event.h
index 599f9e8..b877899 100644
--- a/xen/include/public/mem_event.h
+++ b/xen/include/public/mem_event.h
@@ -47,6 +47,14 @@
  * potentially having side effects (like memory mapped or port I/O) disabled.
  */
 #define MEM_EVENT_FLAG_EMULATE_NOWRITE (1 << 6)
+/*
+ * On a request, indicates that the event occurred in the alternate p2m 
specified by
+ * the altp2m_idx request field.
+ *
+ * On a response, indicates that the VCPU should resume in the alternate p2m 
specified
+ * by the altp2m_idx response field if possible.
+ */
+#define MEM_EVENT_FLAG_ALTERNATE_P2M   (1 << 7)
 
 /* Reasons for the memory event request */
 #define MEM_EVENT_REASON_UNKNOWN     0    /* typical reason */
@@ -117,6 +125,7 @@ typedef struct mem_event_st {
 
     uint16_t reason;
     struct mem_event_regs_x86 x86_regs;
+    uint16_t altp2m_idx;
 } mem_event_request_t, mem_event_response_t;
 
 DEFINE_RING_TYPES(mem_event, mem_event_request_t, mem_event_response_t);
-- 
1.9.1


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.